repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
williamFalcon/pytorch-lightning | tests/overrides/test_data_parallel.py | 1 | 6859 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock, Mock
import pytest
import torch
import torch.nn as nn
from torch.nn import DataParallel
from pytorch_lightning import LightningModule
from pytorch_lightning.overrides import LightningDistributedModule
from pytorch_lightning.overrides.data_parallel import (
LightningParallelModule,
python_scalar_to_tensor,
unsqueeze_scalar_tensor,
)
from pytorch_lightning.trainer.states import RunningStage
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
@pytest.mark.parametrize("wrapper_class", [LightningParallelModule, LightningDistributedModule])
@pytest.mark.parametrize(
"stage",
[
("training", "training_step"),
("testing", "test_step"),
("validating", "validation_step"),
("predicting", "predict_step"),
],
)
def test_lightning_wrapper_module_methods(wrapper_class, stage):
"""Test that the LightningWrapper redirects .forward() to the LightningModule methods."""
pl_module = MagicMock()
wrapped_module = wrapper_class(pl_module)
batch = torch.rand(5)
batch_idx = 3
prop, step = stage
pl_module.trainer.sanity_checking = False
for p in ("training", "testing", "validating", "predicting"):
setattr(pl_module.trainer, p, p == prop)
wrapped_module(batch, batch_idx)
getattr(pl_module, step).assert_called_with(batch, batch_idx)
@pytest.mark.parametrize(
"inp,expected",
[
[torch.tensor(1.0), torch.tensor([1.0])],
[torch.tensor([2.0]), torch.tensor([2.0])],
[torch.ones(3, 4, 5), torch.ones(3, 4, 5)],
],
)
def test_unsqueeze_scalar_tensor(inp, expected):
"""Test that the utility function unsqueezes only scalar tensors."""
assert torch.all(unsqueeze_scalar_tensor(inp).eq(expected))
@RunIf(min_gpus=2)
def test_lightning_parallel_module_unsqueeze_scalar():
"""Test that LightningParallelModule takes care of un-squeezeing 0-dim tensors."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
loss = output["loss"]
loss = loss.squeeze()
assert loss.dim() == 0
# PyTorch usually warns about 0-dim tensors returned in DP
return {"loss": loss}
model = TestModel()
model.trainer = Mock()
model.trainer.state.stage = RunningStage.TRAINING
batch = torch.rand(2, 32).cuda()
batch_idx = 0
wrapped_model = LightningParallelModule(model).cuda()
dp_module = DataParallel(wrapped_model, device_ids=[0, 1])
output = wrapped_model(batch, batch_idx)
assert output["loss"].dim() == 1
with pytest.warns(None) as record:
output = dp_module(batch, batch_idx)
assert output["loss"].dim() == 1
assert not record
@pytest.mark.parametrize(
"inp,expected", [[1.0, torch.tensor([1.0])], [2, torch.tensor([2.0])], [True, torch.tensor([True])]]
)
def test_python_scalar_to_tensor(inp, expected):
assert torch.all(python_scalar_to_tensor(inp).eq(expected))
@RunIf(min_gpus=1)
@pytest.mark.parametrize("device", [torch.device("cpu"), torch.device("cuda", 0)])
def test_lightning_parallel_module_python_scalar_conversion(device):
"""Test that LightningParallelModule can convert Python scalars to tensors."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
# PyTorch DP does not support Python scalars, Lightning converts them to tensors
output.update({"python scalar": 12.3})
return output
model = TestModel().to(device)
model.trainer = Mock()
model.trainer.state.stage = RunningStage.TRAINING
batch = torch.rand(2, 32).to(device)
batch_idx = 0
wrapped_model = LightningParallelModule(model)
output = wrapped_model(batch, batch_idx)
assert output["python scalar"] == torch.tensor([12.3], device=device)
@RunIf(min_gpus=2)
@pytest.mark.parametrize(
"nest, unnest",
[
(lambda x: x, lambda x: x),
(lambda x: dict(data=x), lambda x: x["data"]),
(lambda x: [x, (x, x)], lambda x: x[1][0]),
],
)
def test_lightning_parallel_module_device_access(nest, unnest):
"""Test that self.device returns the correct value in replicas of DataParallel."""
class DeviceAccessModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = nn.Linear(2, 3)
def training_step(self, batch, batch_idx):
batch = unnest(batch)
assert batch.shape == torch.Size([1, 1])
assert self.device.index == batch.item()
assert self.device == self.layer.weight.device
return torch.tensor(1, device=self.device)
pl_module = DeviceAccessModel()
# required for redirecting the forward call to training_step
pl_module.trainer = Mock()
pl_module.trainer.state.stage = RunningStage.TRAINING
root_device = torch.device("cuda", 0)
wrapped_module = LightningParallelModule(pl_module).to(root_device)
model = DataParallel(wrapped_module, device_ids=[0, 1])
data = torch.tensor([0.0, 1.0], device=root_device).view(2, 1) # one value per gpu
data = data.to(root_device)
data = nest(data)
output = model(data, 0)
assert output.device == root_device
assert pl_module.device == root_device
assert torch.all(output.cpu().eq(torch.tensor([1, 1])))
@RunIf(min_gpus=2)
def test_lightning_parallel_module_device_access_warning():
"""Test that we show a warning when the device can't be inferred from the input."""
class DeviceAccessModel(LightningModule):
def training_step(self, batch, batch_idx):
pass
pl_module = DeviceAccessModel()
# required for redirecting the forward call to training_step
pl_module.trainer = Mock()
pl_module.trainer.state.stage = RunningStage.TRAINING
wrapped_module = LightningParallelModule(pl_module).cuda()
model = DataParallel(wrapped_module, device_ids=[0, 1])
data = dict(x=1) # contains no tensors
with pytest.warns(UserWarning, match="Could not determine on which device the inputs are."):
_ = model(data, 0)
| apache-2.0 |
holtjma/msbwt | MUS/CommandLineInterface.py | 1 | 13002 | '''
Created on Nov 1, 2013
@author: holtjma
'''
import argparse as ap
import logging
import os
import sys
import MSBWTGen
import util
from MUSCython import CompressToRLE
from MUSCython import GenericMerge
from MUSCython import MSBWTCompGenCython
from MUSCython import MSBWTGenCython
from MUSCython import MultimergeCython as Multimerge
from MUSCython import MultiStringBWTCython as MultiStringBWT
def initLogger():
'''
This code taken from Matt's Suspenders for initializing a logger
'''
global logger
logger = logging.getLogger('root')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
def mainRun():
'''
This is the primary function for external typical users to run when the Command Line Interface is used
'''
#start up the logger
initLogger()
#attempt to parse the arguments
p = ap.ArgumentParser(description=util.DESC, formatter_class=ap.RawTextHelpFormatter)
#version data
p.add_argument('-V', '--version', action='version', version='%(prog)s' + \
' %s in MSBWT %s' % (util.VERSION, util.PKG_VERSION))
#TODO: do we want subparsers groups by type or sorted by name? it's type currently
sp = p.add_subparsers(dest='subparserID')
p2 = sp.add_parser('cffq', help='create a MSBWT from FASTQ files (pp + cfpp)')
p2.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')
p2.add_argument('-u', '--uniform', dest='areUniform', action='store_true', help='the input sequences have uniform length', default=False)
p2.add_argument('-c', '--compressed', dest='buildCompressed', action='store_true', help='build the RLE BWT (faster, less disk I/O)', default=False)
p2.add_argument('outBwtDir', type=util.newDirectory, help='the output MSBWT directory')
p2.add_argument('inputFastqs', nargs='+', type=util.readableFastqFile, help='the input FASTQ files')
p7 = sp.add_parser('pp', help='pre-process FASTQ files before BWT creation')
p7.add_argument('-u', '--uniform', dest='areUniform', action='store_true', help='the input sequences have uniform length', default=False)
p7.add_argument('outBwtDir', type=util.newDirectory, help='the output MSBWT directory')
p7.add_argument('inputFastqs', nargs='+', type=util.readableFastqFile, help='the input FASTQ files')
p3 = sp.add_parser('cfpp', help='create a MSBWT from pre-processed sequences and offsets')
p3.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')
p3.add_argument('-u', '--uniform', dest='areUniform', action='store_true', help='the input sequences have uniform length', default=False)
p3.add_argument('-c', '--compressed', dest='buildCompressed', action='store_true', help='build the RLE BWT (faster, less disk I/O)', default=False)
p3.add_argument('bwtDir', type=util.existingDirectory, help='the MSBWT directory to process')
p4 = sp.add_parser('merge', help='merge many MSBWTs into a single MSBWT')
p4.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')
p4.add_argument('outBwtDir', type=util.newDirectory, help='the output MSBWT directory')
p4.add_argument('inputBwtDirs', nargs='+', type=util.existingDirectory, help='input BWT directories to merge')
p5 = sp.add_parser('query', help='search for a sequence in an MSBWT, prints sequence and seqID')
p5.add_argument('inputBwtDir', type=util.existingDirectory, help='the BWT to query')
p5.add_argument('kmer', type=util.validKmer, help='the input k-mer to search for')
p5.add_argument('-d', '--dump-seqs', dest='dumpSeqs', action='store_true', help='print all sequences with the given kmer (default=False)', default=False)
p6 = sp.add_parser('massquery', help='search for many sequences in an MSBWT')
p6.add_argument('inputBwtDir', type=util.existingDirectory, help='the BWT to query')
p6.add_argument('kmerFile', help='a file with one k-mer per line')
p6.add_argument('outputFile', help='output file with counts per line')
p6.add_argument('-r', '--rev-comp', dest='reverseComplement', action='store_true', help='also search for each kmer\'s reverse complement', default=False)
p8 = sp.add_parser('compress', help='compress a MSBWT from byte/base to RLE')
p8.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')
p8.add_argument('srcDir', type=util.existingDirectory, help='the source directory for the BWT to compress')
p8.add_argument('dstDir', type=util.newDirectory, help='the destination directory')
p9 = sp.add_parser('decompress', help='decompress a MSBWT from RLE to byte/base')
p9.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')
p9.add_argument('srcDir', type=util.existingDirectory, help='the source directory for the BWT to compress')
p9.add_argument('dstDir', type=util.newDirectory, help='the destination directory')
p10 = sp.add_parser('convert', help='convert from a raw text input to RLE')
p10.add_argument('-i', metavar='inputTextFN', dest='inputTextFN', default=None, help='input text filename (default: stdin)')
p10.add_argument('dstDir', type=util.newDirectory, help='the destination directory')
args = p.parse_args()
if args.subparserID == 'cffq':
logger.info('Inputs:\t'+str(args.inputFastqs))
logger.info('Uniform:\t'+str(args.areUniform))
logger.info('Output:\t'+args.outBwtDir)
logger.info('Output Compressed:\t'+str(args.buildCompressed))
logger.info('Processes:\t'+str(args.numProcesses))
if args.numProcesses > 1:
logger.warning('Using multi-processing with slow disk accesses can lead to slower build times.')
print
if args.areUniform:
#if they are uniform, use the method developed by Bauer et al., it's likely short Illumina seq
if args.buildCompressed:
MultiStringBWT.createMSBWTCompFromFastq(args.inputFastqs, args.outBwtDir, args.numProcesses, args.areUniform, logger)
else:
MultiStringBWT.createMSBWTFromFastq(args.inputFastqs, args.outBwtDir, args.numProcesses, args.areUniform, logger)
else:
#if they aren't uniform, use the merge method by Holt et al., it's likely longer PacBio seq
if args.buildCompressed:
logger.error('No compressed builder for non-uniform datasets, compress after creation.')
else:
Multimerge.createMSBWTFromFastq(args.inputFastqs, args.outBwtDir, args.numProcesses, args.areUniform, logger)
elif args.subparserID == 'pp':
logger.info('Inputs:\t'+str(args.inputFastqs))
logger.info('Uniform:\t'+str(args.areUniform))
logger.info('Output:\t'+args.outBwtDir)
if args.areUniform:
#preprocess for Bauer et al. method
MultiStringBWT.preprocessFastqs(args.inputFastqs, args.outBwtDir, args.areUniform, logger)
else:
#preprocess for Holt et al. method
numProcs = 1
Multimerge.preprocessFastqs(args.inputFastqs, args.outBwtDir, numProcs, args.areUniform, logger)
elif args.subparserID == 'cfpp':
logger.info('BWT dir:\t'+args.bwtDir)
logger.info('Uniform:\t'+str(args.areUniform))
logger.info('Output Compressed:\t'+str(args.buildCompressed))
logger.info('Processes:\t'+str(args.numProcesses))
if args.numProcesses > 1:
logger.warning('Using multi-processing with slow disk accesses can lead to slower build times.')
print
seqFN = args.bwtDir+'/seqs.npy'
offsetFN = args.bwtDir+'/offsets.npy'
bwtFN = args.bwtDir+'/msbwt.npy'
if args.areUniform:
#process it using the column-wise Bauer et al. method
if args.buildCompressed:
MSBWTCompGenCython.createMsbwtFromSeqs(args.bwtDir, args.numProcesses, logger)
else:
MSBWTGenCython.createMsbwtFromSeqs(args.bwtDir, args.numProcesses, logger)
else:
#process it using the Holt et al. merge method
if args.buildCompressed:
logger.error('No compressed builder for non-uniform datasets, compress after creation.')
else:
Multimerge.interleaveLevelMerge(args.bwtDir, args.numProcesses, args.areUniform, logger)
elif args.subparserID == 'compress':
logger.info('Source Directory:'+args.srcDir)
logger.info('Dest Directory:'+args.dstDir)
logger.info('Processes:'+str(args.numProcesses))
if args.srcDir == args.dstDir:
raise Exception('Source and destination directories cannot be the same directory.')
print
MSBWTGen.compressBWT(args.srcDir+'/msbwt.npy', args.dstDir+'/comp_msbwt.npy', args.numProcesses, logger)
elif args.subparserID == 'decompress':
logger.info('Source Directory: '+args.srcDir)
logger.info('Dest Directory: '+args.dstDir)
logger.info('Processes: '+str(args.numProcesses))
print
MSBWTGen.decompressBWT(args.srcDir, args.dstDir, args.numProcesses, logger)
#TODO: remove if srcdir and dstdir are the same?
elif args.subparserID == 'merge':
logger.info('Inputs:\t'+str(args.inputBwtDirs))
logger.info('Output:\t'+args.outBwtDir)
logger.info('Processes:\t'+str(args.numProcesses))
if args.numProcesses > 1:
logger.warning('Multi-processing is not supported at this time, but will be included in a future release.')
numProcs = 1
#logger.warning('Using multi-processing with slow disk accesses can lead to slower build times.')
print
#MSBWTGen.mergeNewMSBWT(args.outBwtDir, args.inputBwtDirs, args.numProcesses, logger)
if len(args.inputBwtDirs) > 2:
#this is a deprecated method, it may still work if you feel daring
#MSBWTGenCython.mergeMsbwts(args.inputBwtDirs, args.outBwtDir, 1, logger)
logger.error('Merging more than two MSBWTs at once is not currently supported.')
else:
GenericMerge.mergeTwoMSBWTs(args.inputBwtDirs[0], args.inputBwtDirs[1], args.outBwtDir, numProcs, logger)
elif args.subparserID == 'query':
#this is the easiest thing we can do, don't dump the standard info, just do it
msbwt = MultiStringBWT.loadBWT(args.inputBwtDir, logger=logger)
#always print how many are found, users can parse it out if they want
r = msbwt.findIndicesOfStr(args.kmer)
print r[1]-r[0]
#dump the seqs if request
if args.dumpSeqs:
for x in xrange(r[0], r[1]):
dInd = msbwt.getSequenceDollarID(x)
print msbwt.recoverString(dInd)[1:]+','+str(dInd)
elif args.subparserID == 'massquery':
logger.info('Input:\t'+str(args.inputBwtDir))
logger.info('Queries:\t'+str(args.kmerFile))
logger.info('Output:\t'+args.outputFile)
logger.info('Rev-comp:\t'+str(args.reverseComplement))
print
msbwt = MultiStringBWT.loadBWT(args.inputBwtDir, logger=logger)
output = open(args.outputFile, 'w+')
output.write('k-mer,counts')
if args.reverseComplement:
output.write(',revCompCounts\n')
else:
output.write('\n')
logger.info('Beginning queries...')
for line in open(args.kmerFile, 'r'):
kmer = line.strip('\n')
c = msbwt.countOccurrencesOfSeq(kmer)
if args.reverseComplement:
rc = msbwt.countOccurrencesOfSeq(MultiStringBWT.reverseComplement(kmer))
output.write(kmer+','+str(c)+','+str(rc)+'\n')
else:
output.write(kmer+','+str(c)+'\n')
logger.info('Queries complete.')
elif args.subparserID == 'convert':
if args.inputTextFN == None:
logger.info('Input: stdin')
else:
logger.info('Input: '+args.inputTextFN)
logger.info('Output: '+args.dstDir)
logger.info('Beginning conversion...')
CompressToRLE.compressInput(args.inputTextFN, args.dstDir)
logger.info('Finished conversion.')
else:
print args.subparserID+" is currently not implemented, please wait for a future release."
if __name__ == '__main__':
mainRun() | mit |
maramos/faq-bot | base.py | 1 | 2272 | import csv
import matplotlib.pyplot as mp
import nltk as nltk
import sklearn
import scipy as sp
import sys
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
json = open("tmp", "r")
class TfidfVectorizer(sklearn.feature_extraction.text.TfidfVectorizer):
def build_analyzer(self):
global stopwords
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc:(nltk.stem.RSLPStemmer().stem(w) for w in analyzer(doc) if w not in stopwords)
def convert(csv):
quest = []
answer = []
for i in csv:
quest.append(i.split(";")[0])
answer.append(i.split(";")[1])
dictcsv = {i:j for i, j in zip(quest, answer)}
return dictcsv
def get_quest(hash):
get_quest_saida = []
for i,j in hash.items():
get_quest_saida.append(stemmer.stem(i))
return get_quest_saida
def distance(v1, v2):
delta = (v1/sp.linalg.norm(v1.toarray()))-(v2/sp.linalg.norm(v2.toarray()))
return sp.linalg.norm(delta.toarray())
def nearest_one(quests, new_quest):
best_doc = None
best_dist = sys.maxint
best_i = None
for i in range(0, n_samples):
quest = quests
if quest==new_quest:
continue
dist = distance(X_train.getrow(i), vec.transform([new_quest]))
if dist < best_dist:
best_dist = dist
best_i = i
return best_i
def get_ans(new_quest):
nearest_question = nearest_one(FAQ.keys(),new_quest)
return FAQ.items()[nearest_question][1]
FAQ = convert(json)
stemmer = nltk.stem.RSLPStemmer()
stopwords = nltk.corpus.stopwords.words('portuguese')
#vec = sklearn.feature_extraction.text.CountVectorizer(min_df=1)
#vec = sklearn.feature_extraction.text.TfidfVectorizer(min_df=1)
vec = TfidfVectorizer(min_df=1)
X_train = vec.fit_transform(get_quest(FAQ))
n_samples, n_features = X_train.shape
def nt(text):
new_text = text
new_text_vec = vec.transform([new_text])
nearest_question = nearest_one(FAQ.keys(),new_text)
FAQ.items()[nearest_question][1]
######
#new_text = "gostando"
#new_text_vec = vec.transform([new_text])
#nearest_question = nearest_one(FAQ.keys(),new_text)
#FAQ.items()[nearest_question][1]
######
| bsd-3-clause |
kchodorow/tensorflow | tensorflow/examples/learn/boston.py | 13 | 1945 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = tf.contrib.learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Predict and score
y_predicted = list(
regressor.predict(
scaler.transform(x_test), as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ShawnSpooner/starcraft_agents | starcraft_agents/a2c_agent.py | 1 | 7346 | import numpy as np
from collections import namedtuple
from pysc2.agents import base_agent
from pysc2.lib import actions
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from starcraft_agents.a2c_model import A2CModel
from starcraft_agents.learning_agent import LearningAgent
from starcraft_agents.saved_actions import TrajectoryDataset
from torchnet.logger import VisdomPlotLogger, VisdomLogger
import torchnet as tnt
class A2CAgent(LearningAgent):
"""The start of a basic A2C agent for learning agents."""
def __init__(self, screen_width, screen_height, horizon,
num_processes=2,
fully_conv=False,
expirement_name="default_expirement",
learning_rate=7e-4,
value_coef=1.0,
entropy_coef=1e-4,
in_channels=8,
continue_training=False,
summary=None):
super(A2CAgent, self).__init__(expirement_name)
num_functions = len(actions.FUNCTIONS)
self.model = A2CModel(num_functions=num_functions,
expirement_name=expirement_name,
screen_width=screen_width,
screen_height=screen_height).cuda()
self.screen_width = screen_width
self.screen_height = screen_height
self.summary = summary
self.in_channels = in_channels
self.horizon = horizon
self.num_processes = num_processes
self.max_grad = 0.5
self.entropy_coef = entropy_coef
self.value_coef = value_coef
self.gamma = 0.95
self.tau = 0.97
self.saved_actions = TrajectoryDataset(self.horizon,
self.num_processes,
screen_width,
screen_height)
if continue_training:
self.model.load_state_dict(torch.load(f"./models/{expirement_name}.pth"))
self.model.eval()
print(f"learning rate set to: {learning_rate}")
self.optimizer = optim.Adam(self.model.parameters(),
lr=learning_rate)
self.final_rewards = torch.zeros(1, 1)
self.setup_loggers()
def setup_loggers(self):
# visdom setup
self.loss_meter = tnt.meter.AverageValueMeter()
self.loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Train Loss'})
self.pi_loss_meter = tnt.meter.AverageValueMeter()
self.pi_loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Policy Loss'})
self.xy_loss_meter = tnt.meter.AverageValueMeter()
self.xy_loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'XY Loss'})
self.value_loss_meter = tnt.meter.AverageValueMeter()
self.value_loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Value Loss'})
self.reward_meter = tnt.meter.AverageValueMeter()
self.reward_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Batch Reward'})
self.entropy_meter = tnt.meter.AverageValueMeter()
self.entropy_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Entropy'})
self.adv_meter = tnt.meter.AverageValueMeter()
self.adv_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Advantage'})
self.episode_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': "Episode Score"})
self.episode_meter = tnt.meter.MovingAverageValueMeter(windowsize=3)
def finish_step(self):
self.saved_actions.step()
def reset_meters(self):
self.adv_meter.reset()
self.loss_meter.reset()
self.pi_loss_meter.reset()
self.value_loss_meter.reset()
self.entropy_meter.reset()
self.xy_loss_meter.reset()
def rollout(self):
self.reset_meters()
self.saved_actions.compute_advantages(self.gamma)
loader = DataLoader(self.saved_actions, batch_size=self.horizon, shuffle=True)
for screens, minimaps, games, actions, x1s, y1s, rewards, returns in loader:
values, lp, x_lp, y_lp, dist_entropy, spatial_entropy = self.model.evaluate_actions(
Variable(screens).cuda(),
Variable(minimaps).cuda(),
Variable(games).cuda(),
Variable(actions).cuda(),
Variable(x1s).cuda(),
Variable(y1s).cuda())
rewards_var = Variable(rewards).cuda()
returns_var = Variable(returns).cuda()
advs = (returns_var - values).data
advs_var = Variable(advs).cuda()
dist_entropy *= self.entropy_coef
spatial_entropy *= self.entropy_coef
pg_loss = ((lp + x_lp + y_lp) * advs_var).mean()
pg_loss -= dist_entropy
pg_loss -= spatial_entropy
vf_loss = (values - rewards_var).pow(2).mean() * self.value_coef
train_loss = pg_loss + vf_loss
self.optimizer.zero_grad()
nn.utils.clip_grad_norm(self.model.parameters(), self.max_grad)
train_loss.backward()
self.optimizer.step()
self.loss_meter.add(train_loss.data[0])
self.pi_loss_meter.add(pg_loss.data[0])
self.entropy_meter.add(dist_entropy.data[0] + spatial_entropy.data[0])
self.value_loss_meter.add(vf_loss.data[0])
self.reward_meter.add(rewards.sum())
self.adv_meter.add(advs.mean())
self.loss_logger.log(self.steps, self.loss_meter.value()[0])
self.pi_loss_logger.log(self.steps, self.pi_loss_meter.value()[0])
self.reward_logger.log(self.steps, self.reward_meter.value()[0])
self.entropy_logger.log(self.steps, self.entropy_meter.value()[0])
self.value_loss_logger.log(self.steps, self.value_loss_meter.value()[0])
self.adv_logger.log(self.steps, self.adv_meter.value()[0])
self.episode_logger.log(self.steps, self.episode_meter.value()[0])
| apache-2.0 |
pchaigno/grr | parsers/config_file.py | 4 | 32222 | #!/usr/bin/env python
"""Simple parsers for configuration files."""
import collections
import re
import logging
from grr.lib import lexer
from grr.lib import parsers
from grr.lib import utils
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import config_file as rdf_config_file
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import standard as rdf_standard
def AsIter(arg):
"""Encapsulates an argument in a tuple, if it's not already iterable."""
if isinstance(arg, basestring):
rslt = [arg]
elif isinstance(arg, collections.Iterable):
rslt = arg
elif not arg:
rslt = []
else:
rslt = [arg]
return tuple(rslt)
# Grr lexer implementation of ssv parser. Considered using
# https://github.com/Eugeny/reconfigure/blob/master/reconfigure/parsers/ssv.py
# but it doesn't seem to actually forward lookup.
class FieldParser(lexer.Lexer):
r"""A generalized field based parser that splits entries into fields.
Entries refer to distinct records within the text content, for example each
line of /etc/passwd or a ssh configuration attribute.
Fields are elements that make up the entry, for example the individual
parameters in /etc/passwd.
The parser supports:
- Flexible field based separators (e.g. spaces, commas, colons).
- Identification and removal of line comments. Inline comments (e.g. /*...*/)
are not supported.
- Line continuation detection.
- Multiline quotes.
The parser uses the following attributes as defaults:
- comments: #
- cont: \ (followed by any amount of whitespace)
- ml_quote: False (by default, quotes must close before newlines).
- quot: Both " and ' characters.
- sep: Whitespace
- term: Newlines.
To override default values, pass in appropriate keywords with a python
compatible regex string.
"""
def __init__(self, comments=r"#", cont=r"\\\s*\n", ml_quote=False,
quot=(r"\"", r"'"), sep=r"[ \t\f\v]+", term=r"[\r\n]",
verbose=0):
"""A generalized field-based parser. Handles whitespace, csv etc.
Args:
comments: Line comment patterns (e.g. "#").
cont: Continuation patterns (e.g. "\\").
ml_quote: Boolean flag to allow quoted strings to span lines.
quot: Quotation patterns (e.g. "\\"" or "'").
sep: Field separator patterns (e.g. "[\\s,]").
term: Entry termination patterns (e.g. "\\n").
verbose: Enable verbose mode for the lexer. Useful for debugging.
"""
super(FieldParser, self).__init__()
self.entries = []
self.fields = []
self.field = ""
self.comments = AsIter(comments)
self.cont = AsIter(cont)
self.ml_quote = AsIter(ml_quote)
self.quot = AsIter(quot)
self.sep = AsIter(sep)
self.term = AsIter(term)
self.verbose = verbose
self._GenStates()
def Reset(self):
super(FieldParser, self).Reset()
self.entries = []
self.fields = []
self.field = ""
def _GenStates(self):
"""Generate the lexer states."""
self.GenCommentState()
self.GenFwdState()
self.GenQuotedState()
self.GenCatchallState()
def _AddToken(self, state_regex, regex, actions, next_state):
self._tokens.append(lexer.Token(state_regex, regex, actions, next_state))
def GenCommentState(self):
if self.comments:
self._AddToken("COMMENT", r"\n", "PushBack,PopState", None)
self._AddToken("COMMENT", ".", None, None)
def GenFwdState(self):
"""Generates forwarding state rules.
The lexer will fast forward until there is string content. The
string content will be returned to the string processor.
"""
for c in self.cont:
self._AddToken("FWD", c, None, None)
for s in self.sep:
self._AddToken("FWD", s, None, None)
self._AddToken("FWD", ".", "PushBack,PopState", None)
def GenQuotedState(self):
"""Generate string matching state rules."""
for i, q in enumerate(self.quot):
label = "%s_STRING" % i
escaped = q.encode("string_escape")
self._AddToken(label, escaped, "PopState", None)
self._AddToken(label, q, "PopState", None)
if self.ml_quote:
self._AddToken(label, r"\n", None, None)
else:
self._AddToken(label, r"\n", "BadLine", None)
self._AddToken(label, ".", "AddToField", None)
def GenCatchallState(self):
"""Generate string matching state rules.
This sets up initial state handlers that cover both the 'INITIAL' state
and the intermediate content between fields.
The lexer acts on items with precedence:
- continuation characters: use the fast forward state rules.
- field separators: finalize processing the field.
- quotation characters: use the quotation state rules.
"""
for c in self.comments:
self._AddToken(".", c, "PushState,EndField", "COMMENT")
for c in self.cont:
self._AddToken(".", c, "PushState", "FWD")
for t in self.term:
self._AddToken(".", t, "EndEntry", None)
for s in self.sep:
self._AddToken(".", s, "EndField", None)
for i, q in enumerate(self.quot):
self._AddToken(".", q, "PushState", "%s_STRING" % i)
self._AddToken(".", ".", "AddToField", None)
def EndEntry(self, **_):
self.EndField()
if self.fields:
# Copy the fields into the processed entries.
self.entries.append(self.fields[:])
self.fields = []
def AddToField(self, string="", **_):
if string:
self.field += string
def EndField(self, **_):
if self.field:
self.fields.append(self.field[:])
self.field = ""
def BadLine(self, **_):
logging.debug("Skipped bad line in file at %s" % self.processed)
self.field = ""
def ParseEntries(self, data):
# Flush any old results.
self.Reset()
self.Feed(utils.SmartStr(data))
self.Close()
# In case there isn't a terminating field at the end of the feed, e.g. \n
self.EndEntry()
return self.entries
class KeyValueParser(FieldParser):
"""A generalized KeyValue parser that splits entries into key/value pairs.
Capabilities and parameters are identical to FieldParser, with one difference.
The parser also accepts the parameter "kv_sep"
Patterns specified in kv_sep are used to demarcate key/value processing.
kv_sep defaults to "="
"""
def __init__(self, comments=r"#", cont=r"\\\s*\n", kv_sep="=", ml_quote=False,
quot=(r"\"", r"'"), sep=r"[ \t\f\v]+", term=r"[\r\n]",
verbose=0):
"""A generalized key-value parser. Handles whitespace, csv etc.
Args:
comments: Line comment patterns (e.g. "#").
cont: Continuation patterns (e.g. "\\").
kv_sep: Key/Value separators (e.g. "=" or ":").
ml_quote: Boolean flag to allow quoted strings to span lines.
quot: Quotation patterns (e.g. "\\"" or "'").
sep: Field separator patterns (e.g. "[\\s,]").
term: Entry termination patterns (e.g. "\\n").
verbose: Enable verbose mode for the lexer. Useful for debugging.
"""
self.kv_sep = AsIter(kv_sep)
super(KeyValueParser, self).__init__(comments=comments, cont=cont,
ml_quote=ml_quote, quot=quot, sep=sep,
term=term, verbose=verbose)
self.key_field = ""
def _GenStates(self):
self.GenCommentState()
self.GenFwdState()
self.GenQuotedState()
self.GenMatchFirstState()
self.GenInitialState()
self.GenKeyState()
self.GenValueState()
self.GenCatchallState()
def GenMatchFirstState(self):
for i, q in enumerate(self.quot):
self._AddToken(".", q, "PushState", "%s_STRING" % i)
for c in self.cont:
self._AddToken(".", c, "PushState", "FWD")
def GenInitialState(self):
for c in self.comments:
self._AddToken("INITIAL", c, "PushState,EndField", "COMMENT")
for t in self.term:
self._AddToken("INITIAL", t, "EndField,EndEntry", None)
for c in self.sep:
self._AddToken("INITIAL", c, "PushState", "FWD")
for k in self.kv_sep:
self._AddToken("INITIAL", k, "BadLine", None)
self._AddToken("INITIAL", ".", "PushState,PushBack", "KEY")
def GenKeyState(self):
for c in self.comments:
self._AddToken("KEY", c, "EndKeyField,EndEntry,PopState,PushBack",
"COMMENT")
for t in self.term:
self._AddToken("KEY", t, "EndKeyField,EndEntry,PopState", None)
for k in self.kv_sep:
self._AddToken("KEY", k, "EndKeyField", "VALUE")
def GenValueState(self):
for c in self.comments:
self._AddToken("VALUE", c, "EndField,EndEntry,PopState,PushBack",
"COMMENT")
for t in self.term:
self._AddToken("VALUE", t, "EndField,EndEntry,PopState", None)
for s in self.sep:
self._AddToken("VALUE", s, "EndField", None)
def GenCatchallState(self):
self._AddToken(".", ".", "AddToField", None)
def EndKeyField(self, **_):
self.key_field = self.field
self.field = ""
def EndEntry(self, **_):
# Finalize processing for non-terminated entries. Key first, then fields.
if self.field and not self.key_field:
self.EndKeyField()
else:
self.EndField()
# Set up the entry.
key_field = self.key_field.strip()
if key_field:
self.entries.append({key_field: self.fields})
self.key_field = ""
self.fields = []
def ParseToOrderedDict(self, data):
result = collections.OrderedDict()
for field in self.ParseEntries(data):
result.update(field)
return result
class NfsExportsParser(parsers.FileParser, FieldParser):
"""Parser for NFS exports."""
output_types = ["NfsExport"]
supported_artifacts = ["NfsExportsFile"]
def Parse(self, unused_stat, file_obj, unused_knowledge_base):
for entry in self.ParseEntries(file_obj.read()):
if not entry:
continue
result = rdf_config_file.NfsExport()
result.share = entry[0]
for field in entry[1:]:
if field.startswith(("-", "(")):
result.defaults = field.strip("-()").split(",")
else:
client = rdf_config_file.NfsClient()
cfg = field.split("(", 1)
host = cfg[0]
if len(cfg) > 1:
options = cfg[1]
else:
options = None
client.host = host
if options:
client.options = options.strip("()").split(",")
result.clients.append(client)
yield result
class SshdConfigParser(parsers.FileParser):
"""Parser for sshd_config files."""
output_types = ["SshdConfig"]
supported_artifacts = ["SshdConfigFile", "SshdConfigCmd"]
# Specify the values that are boolean or integer. Anything else is a string.
_integers = ["clientalivecountmax",
"magicudsport",
"maxauthtries",
"maxsessions",
"port",
"protocol",
"serverkeybits",
"x11displayoffset"]
_booleans = ["allowagentforwarding",
"challengeresponseauthentication",
"gssapiauthentication",
"gssapicleanupcredentials",
"gssapikeyexchange",
"gssapistorecredentialsonrekey",
"gssapistrictacceptorcheck",
"hostbasedauthentication",
"ignorerhosts",
"ignoreuserknownhosts",
"kbdinteractiveauthentication",
"kerberosauthentication",
"passwordauthentication",
"permitemptypasswords",
"permitrootlogin",
"permittunnel",
"permituserenvironment",
"pubkeyauthentication",
"rhostsrsaauthentication",
"rsaauthentication",
"strictmodes",
"uselogin",
"usepam",
"x11forwarding",
"x11uselocalhost"]
# Valid ways that parameters can repeat
_repeated = {"acceptenv": r"[\n\s]+",
"allowgroups": r"[\s]+",
"allowusers": r"[\s]+",
"authenticationmethods": r"[\s]+",
"authorizedkeysfile": r"[\s]+",
"ciphers": r"[,]+",
"denygroups": r"[\s]+",
"denyusers": r"[\s]+",
"forcecommand": r"[\n]+",
"hostkey": r"[\n]+",
"kexalgorithms": r"[,]+",
"listenaddress": r"[\n]+",
"macs": r"[,]+",
"permitopen": r"[\s]+",
"port": r"[,\n]+",
"protocol": r"[,]+",
"subsystem": r"[\n]+"}
_true = ["yes", "true", "1"]
_match_keywords = [
"acceptenv", "allowagentforwarding", "allowgroups", "allowtcpforwarding",
"allowusers", "authenticationmethods", "authorizedkeyscommand",
"authorizedkeyscommanduser", "authorizedkeysfile",
"authorizedprincipalsfile", "banner", "chrootdirectory", "denygroups",
"denyusers", "forcecommand", "gatewayports", "gssapiauthentication",
"hostbasedauthentication", "hostbasedusesnamefrompacketonly",
"kbdinteractiveauthentication", "kerberosauthentication", "magicudspath",
"magicudsport", "maxauthtries", "maxsessions", "passwordauthentication",
"permitemptypasswords", "permitopen", "permitrootlogin",
"permittemphomedir", "permittty", "permittunnel", "pubkeyauthentication",
"rekeylimit", "rhostsrsaauthentication", "rsaauthentication",
"temphomedirpath", "x11displayoffset", "x11forwarding", "x11uselocalhost"]
def __init__(self):
super(SshdConfigParser, self).__init__()
self.Flush()
def Flush(self):
self.config = {}
self.matches = []
self.section = self.config
self.processor = self._ParseEntry
def ParseLine(self, line):
"""Extracts keyword/value settings from the sshd config.
The keyword is always the first string item.
Values are the remainder of the string. In cases where an sshd config
allows multiple values, these are split according to whatever separator(s)
sshd_config permits for that value.
Keywords and values are normalized. Keywords are converted to lowercase.
Values are converted into integers, booleans or strings. Strings are always
lowercased.
Args:
line: A line of the configuration file.
"""
kv = line.split(None, 1)
keyword = kv[0].lower()
# Safely set the argument string if it wasn't found.
values = kv[1:] or [""]
# Then split any parameters that are actually repeated items.
separators = self._repeated.get(keyword)
if separators:
repeated = []
for v in values:
repeated.extend(re.split(separators, v))
# Remove empty matches.
values = [v for v in repeated if v]
# Now convert the values to the right types.
if keyword in self._integers:
values = [int(v) for v in values]
elif keyword in self._booleans:
values = [v.lower() in self._true for v in values]
else:
values = [v.lower() for v in values]
# Only repeated arguments should be treated as a list.
if keyword not in self._repeated:
values = values[0]
# Switch sections for new match blocks.
if keyword == "match":
self._NewMatchSection(values)
# Add the keyword/values to the section.
self.processor(keyword, values)
def _ParseEntry(self, key, val):
"""Adds an entry for a configuration setting.
Args:
key: The name of the setting.
val: The value of the setting.
"""
if key in self._repeated:
setting = self.section.setdefault(key, [])
setting.extend(val)
else:
self.section.setdefault(key, val)
def _ParseMatchGrp(self, key, val):
"""Adds valid match group parameters to the configuration."""
if key in self._match_keywords:
self._ParseEntry(key, val)
def _NewMatchSection(self, val):
"""Create a new configuration section for each match clause.
Each match clause is added to the main config, and the criterion that will
trigger the match is recorded, as is the configuration.
Args:
val: The value following the 'match' keyword.
"""
section = {"criterion": val, "config": {}}
self.matches.append(section)
# Now add configuration items to config section of the match block.
self.section = section["config"]
# Switch to a match-specific processor on a new match_block.
self.processor = self._ParseMatchGrp
def Parse(self, stat, file_object, knowledge_base):
"""Parse the sshd configuration.
Process each of the lines in the configuration file.
Assembes an sshd_config file into a dictionary with the configuration
keyword as the key, and the configuration settings as value(s).
Args:
stat: unused
file_object: An open configuration file object.
knowledge_base: unused
Yields:
The configuration as an rdfvalue.
"""
_, _ = stat, knowledge_base
# Clean out any residual state.
self.Flush()
# for line in file_object:
lines = [l.strip() for l in file_object.read(100000).splitlines()]
for line in lines:
# Remove comments (will break if it includes a quoted/escaped #)
line = line.split("#")[0].strip()
if line:
self.ParseLine(line)
matches = []
for match in self.matches:
criterion, config = match["criterion"], match["config"]
block = rdf_config_file.SshdMatchBlock(criterion=criterion, config=config)
matches.append(block)
yield rdf_config_file.SshdConfig(config=self.config, matches=matches)
class MtabParser(parsers.FileParser, FieldParser):
"""Parser for mounted filesystem data acquired from /proc/mounts."""
output_types = ["Filesystem"]
supported_artifacts = ["LinuxProcMounts", "LinuxFstab"]
def Parse(self, unused_stat, file_obj, unused_knowledge_base):
for entry in self.ParseEntries(file_obj.read()):
if not entry:
continue
result = rdf_client.Filesystem()
result.device = entry[0].decode("string_escape")
result.mount_point = entry[1].decode("string_escape")
result.type = entry[2].decode("string_escape")
options = KeyValueParser(term=",").ParseToOrderedDict(entry[3])
# Keys without values get assigned [] by default. Because these keys are
# actually true, if declared, change any [] values to True.
for k, v in options.iteritems():
options[k] = v or [True]
result.options = rdf_protodict.AttributedDict(**options)
yield result
class MountCmdParser(parsers.CommandParser, FieldParser):
"""Parser for mounted filesystem data acquired from the mount command."""
output_types = ["Filesystem"]
supported_artifacts = ["LinuxMountCmd"]
mount_re = re.compile(r"(.*) on (.*) type (.*) \((.*)\)")
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Parse the mount command output."""
_ = stderr, time_taken, args, knowledge_base # Unused.
self.CheckReturn(cmd, return_val)
result = rdf_protodict.AttributedDict()
for entry in self.ParseEntries(stdout):
line_str = " ".join(entry)
mount_rslt = self.mount_re.match(line_str)
if mount_rslt:
device, mount_point, fs_type, option_str = mount_rslt.groups()
result = rdf_client.Filesystem()
result.device = device
result.mount_point = mount_point
result.type = fs_type
# Parse these options as a dict as some items may be key/values.
# KeyValue parser uses OrderedDict as the native parser method. Use it.
options = KeyValueParser(term=",").ParseToOrderedDict(option_str)
# Keys without values get assigned [] by default. Because these keys are
# actually true, if declared, change any [] values to True.
for k, v in options.iteritems():
options[k] = v or [True]
result.options = rdf_protodict.AttributedDict(**options)
yield result
class RsyslogParser(parsers.FileParser, FieldParser):
"""Parser for syslog configurations."""
output_types = ["AttributedDict"]
supported_artifacts = ["LinuxRsyslogConfigs"]
process_together = True
log_rule_re = re.compile(r"([\w,\*]+)\.([\w,!=\*]+)")
destinations = collections.OrderedDict([
("TCP", re.compile(r"(?:@@)([^;]*)")),
("UDP", re.compile(r"(?:@)([^;]*)")),
("PIPE", re.compile(r"(?:\|)([^;]*)")),
("NONE", re.compile(r"(?:~)([^;]*)")),
("SCRIPT", re.compile(r"(?:\^)([^;]*)")),
("MODULE", re.compile(r"(?::om\w:)([^;]*)")),
("FILE", re.compile(r"-?(/[^;]*)")),
("WALL", re.compile(r"(\*)"))])
def _ParseAction(self, action):
"""Extract log configuration data from rsyslog actions.
Actions have the format:
<facility>/<severity> <type_def><destination>;<template>
e.g. *.* @@loghost.example.com.:514;RSYSLOG_ForwardFormat
Actions are selected by a type definition. These include:
"@@": TCP syslog
"@": UDP syslog
"|": Named pipe
"~": Drop to /dev/null
"^": Shell script
":om<string>:": An output module
Or a file path.
Args:
action: The action string from rsyslog.
Returns:
a rdfvalue.LogTarget message.
"""
rslt = rdf_config_file.LogTarget()
for dst_str, dst_re in self.destinations.iteritems():
dst = dst_re.match(action)
if dst:
rslt.transport = dst_str
rslt.destination = dst.group(1)
break
return rslt
def ParseMultiple(self, unused_stats, file_objs, unused_knowledge_base):
# TODO(user): review quoting and line continuation.
result = rdf_config_file.LogConfig()
for file_obj in file_objs:
for entry in self.ParseEntries(file_obj.read()):
directive = entry[0]
log_rule = self.log_rule_re.match(directive)
if log_rule and entry[1:]:
target = self._ParseAction(entry[1])
target.facility, target.priority = log_rule.groups()
result.targets.append(target)
return [result]
class PackageSourceParser(parsers.FileParser):
"""Common code for APT and YUM source list parsing."""
output_types = ["AttributedDict"]
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
def Parse(self, stat, file_obj, unused_knowledge_base):
uris_to_parse = self.FindPotentialURIs(file_obj)
uris = []
for url_to_parse in uris_to_parse:
url = rdf_standard.URI()
url.ParseFromString(url_to_parse)
# if no transport then url_to_parse wasn't actually a valid URL
# either host or path also have to exist for this to be a valid URL
if url.transport and (url.host or url.path):
uris.append(url)
filename = stat.pathspec.path
cfg = {"filename": filename, "uris": uris}
yield rdf_protodict.AttributedDict(**cfg)
def FindPotentialURIs(self, file_obj):
"""Stub Method to be overriden by APT and Yum source parsers."""
raise NotImplementedError("Please implement FindPotentialURIs.")
def ParseURIFromKeyValues(self, data, separator, uri_key):
"""Parse key/value formatted source listing and return potential URLs.
The fundamental shape of this format is as follows:
key: value # here : = separator
key : value
URI: [URL] # here URI = uri_key
[URL] # this is where it becomes trickey because [URL]
[URL] # can contain 'separator' specially if separator is :
key: value
The key uri_key is of interest to us and since the next line
in the config could contain another [URL], we need to keep track of context
when we hit uri_key to be able to check if the next line(s)
have more [URL].
Args:
data: unprocessed lines from a file
separator: how the key/value pairs are seperated
uri_key: starting name of the key containing URI.
Returns:
A list of potential URLs found in data
"""
kv_entries = KeyValueParser(kv_sep=separator).ParseEntries(data)
spaced_entries = FieldParser().ParseEntries(data)
uris = []
check_uri_on_next_line = False
for kv_entry, sp_entry in zip(kv_entries, spaced_entries):
for k, v in kv_entry.iteritems():
# This line could be a URL if a) from key:value, value is empty OR
# b) if separator is : and first character of v starts with /.
if (check_uri_on_next_line and
(not v or (separator == ":" and
v and v[0].startswith("/")))):
uris.append(sp_entry[0])
else:
check_uri_on_next_line = False
if k.lower().startswith(uri_key) and v:
check_uri_on_next_line = True
uris.append(v[0]) # v is a list
return uris
class APTPackageSourceParser(PackageSourceParser):
"""Parser for APT source lists to extract URIs only."""
supported_artifacts = ["APTSources"]
def FindPotentialURIs(self, file_obj):
"""Given a file, this will return all potenial APT source URIs."""
rfc822_format = "" # will contain all lines not in legacy format
uris_to_parse = []
for line in file_obj.read().splitlines(True):
# check if legacy style line - if it is then extract URL
m = re.search(r"^\s*deb(?:-\S+)?(?:\s+\[[^\]]*\])*\s+(\S+)(?:\s|$)", line)
if m:
uris_to_parse.append(m.group(1))
else:
rfc822_format += line
uris_to_parse.extend(self.ParseURIFromKeyValues(rfc822_format, ":", "uri"))
return uris_to_parse
class YumPackageSourceParser(PackageSourceParser):
"""Parser for Yum source lists to extract URIs only."""
supported_artifacts = ["YumSources"]
def FindPotentialURIs(self, file_obj):
"""Given a file, this will return all potenial Yum source URIs."""
return self.ParseURIFromKeyValues(file_obj.read(), "=", "baseurl")
class CronAtAllowDenyParser(parsers.FileParser):
"""Parser for /etc/cron.allow /etc/cron.deny /etc/at.allow & /etc/at.deny."""
output_types = ["AttributedDict"]
supported_artifacts = ["CronAtAllowDenyFiles"]
def Parse(self, stat, file_obj, unused_knowledge_base):
lines = set([l.strip() for l in file_obj.read(100000).splitlines()])
users = []
bad_lines = []
for line in lines:
if " " in line: # behaviour of At/Cron is undefined for lines
bad_lines.append(line) # with whitespace separated fields/usernames
elif line: # drop empty lines
users.append(line)
filename = stat.pathspec.path
cfg = {"filename": filename, "users": users}
yield rdf_protodict.AttributedDict(**cfg)
if bad_lines:
yield rdf_anomaly.Anomaly(type="PARSER_ANOMALY",
symptom="Dodgy entries in %s." % (filename),
reference_pathspec=stat.pathspec,
finding=bad_lines)
class NtpdParser(parsers.FileParser, FieldParser):
"""Parser for ntpd.conf file."""
output_types = ["NtpConfig"]
supported_artifacts = ["NtpConfFile"]
process_together = True
# The syntax is based on:
# https://www.freebsd.org/cgi/man.cgi?query=ntp.conf&sektion=5
# keywords with integer args.
_integers = set(["ttl", "hop"])
# keywords with floating point args.
_floats = set(["broadcastdelay", "calldelay"])
# keywords that have repeating args.
_repeated = set(["ttl", "hop"])
# keywords that set an option state, but can be "repeated" as well.
_boolean = set(["enable", "disable"])
# keywords that are keyed to their first argument, an address.
_address_based = set([
"trap", "fudge", "server", "restrict", "peer", "broadcast",
"manycastclient"])
# keywords that append/augment the config.
_accumulators = set(["includefile", "setvar"])
# keywords that can appear multiple times, accumulating data each time.
_duplicates = _address_based | _boolean | _accumulators
# All the expected keywords.
_match_keywords = _integers | _floats | _repeated | _duplicates | set([
"autokey", "revoke", "multicastclient", "driftfile", "broadcastclient",
"manycastserver", "includefile", "interface", "disable", "includefile",
"discard", "logconfig", "logfile", "tos", "tinker", "keys", "keysdir",
"requestkey", "trustedkey", "crypto", "control", "statsdir", "filegen"])
_defaults = {"auth": True, "bclient": False, "calibrate": False,
"kernel": False, "monitor": True, "ntp": True,
"pps": False, "stats": False}
def ParseLine(self, entries):
"""Extracts keyword/value settings from the ntpd config.
The keyword is always the first entry item.
Values are the remainder of the entries. In cases where an ntpd config
allows multiple values, these are split according to whitespace or
duplicate entries.
Keywords and values are normalized. Keywords are converted to lowercase.
Values are converted into integers, floats or strings. Strings are always
lowercased.
Args:
entries: A list of items making up a single line of a ntp.conf file.
"""
# If no entries were found, short circuit.
if not entries:
return
keyword = entries[0].lower()
# Set the argument string if it wasn't found.
values = entries[1:] or [""]
# Convert any types we need too.
if keyword in self._integers:
values = [int(v) for v in values]
if keyword in self._floats:
values = [float(v) for v in values]
if keyword not in self._repeated | self._duplicates:
# We have a plain and simple single key/value config line.
if isinstance(values[0], basestring):
self.config[keyword] = " ".join(values)
else:
self.config[keyword] = values
elif keyword in self._repeated:
# The keyword can have multiple single-word options, so add them as a list
# and overwrite previous settings.
self.config[keyword] = values
elif keyword in self._duplicates:
if keyword in self._address_based:
# If we have an address keyed keyword, join the keyword and address
# together to make the complete key for this data.
address = values[0].lower()
values = values[1:] or [""]
# Add/overwrite the address in this 'keyed' keywords dictionary.
existing_keyword_config = self.keyed.setdefault(keyword, [])
# Create a dict which stores the server name and the options.
# Flatten the remaining options into a single string.
existing_keyword_config.append({"address": address,
"options": " ".join(values)})
# Are we toggling an option?
elif keyword in self._boolean:
for option in values:
if keyword == "enable":
self.config[option] = True
else:
# As there are only two items in this set, we can assume disable.
self.config[option] = False
else:
# We have a non-keyed & non-boolean keyword, so add to the collected
# data so far. Order matters technically.
prev_settings = self.config.setdefault(keyword, [])
prev_settings.append(" ".join(values))
def Parse(self, stat, file_object, knowledge_base):
"""Parse a ntp config into rdf."""
_, _ = stat, knowledge_base
# Clean out any residual state.
self.config = self._defaults.copy()
self.keyed = {}
# ntp.conf has no line continuation. Override the default 'cont' values
# then parse up the lines.
self.cont = ""
for line in self.ParseEntries(file_object.read(100000)):
self.ParseLine(line)
yield rdf_config_file.NtpConfig(
config=self.config,
server=self.keyed.get("server"),
restrict=self.keyed.get("restrict"),
fudge=self.keyed.get("fudge"),
trap=self.keyed.get("trap"),
peer=self.keyed.get("peer"),
broadcast=self.keyed.get("broadcast"),
manycastclient=self.keyed.get("manycastclient"))
def ParseMultiple(self, stats, file_objects, knowledge_base):
for s, f in zip(stats, file_objects):
for rslt in self.Parse(s, f, knowledge_base):
yield rslt
| apache-2.0 |
tensorflow/benchmarks | scripts/tf_cnn_benchmarks/coco_metric.py | 1 | 6366 | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""COCO-style evaluation metrics.
Forked from reference model implementation.
COCO API: github.com/cocodataset/cocoapi/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import tempfile
from absl import flags
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import six
import tensorflow.compat.v1 as tf
import mlperf
import ssd_constants
FLAGS = flags.FLAGS
# https://github.com/cocodataset/cocoapi/issues/49
if six.PY3:
import pycocotools.coco
pycocotools.coco.unicode = str
def async_eval_runner(queue_predictions, queue_results, val_json_file):
"""Load intermediate eval results and get COCO metrics."""
while True:
message = queue_predictions.get()
if message == 'STOP': # poison pill
break
step, predictions = message
results = compute_map(predictions, val_json_file)
queue_results.put((step, results))
def compute_map(predictions, val_json_file):
"""Use model predictions to compute mAP.
Args:
predictions: a list of tuples returned by decoded_predictions function,
each containing the following elements:
image source_id, box coordinates in XYWH order, probability score, label
val_json_file: path to COCO annotation file
Returns:
A dictionary that maps all COCO metrics (keys) to their values
"""
if val_json_file.startswith("gs://"):
_, local_val_json = tempfile.mkstemp(suffix=".json")
tf.gfile.Remove(local_val_json)
tf.gfile.Copy(val_json_file, local_val_json)
atexit.register(tf.gfile.Remove, local_val_json)
else:
local_val_json = val_json_file
cocoGt = COCO(local_val_json)
cocoDt = cocoGt.loadRes(np.array(predictions))
E = COCOeval(cocoGt, cocoDt, iouType='bbox')
E.evaluate()
E.accumulate()
E.summarize()
print("Current AP: {:.5f}".format(E.stats[0]))
metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',
'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']
# Prefix with "COCO" to group in TensorBoard.
return {"COCO/" + key: value for key, value in zip(metric_names, E.stats)}
def calc_iou(target, candidates):
target_tiled = np.tile(target[np.newaxis, :], (candidates.shape[0], 1))
# Left Top & Right Bottom
lt = np.maximum(target_tiled[:,:2], candidates[:,:2])
rb = np.minimum(target_tiled[:,2:], candidates[:,2:])
delta = np.maximum(rb - lt, 0)
intersect = delta[:,0] * delta[:,1]
delta1 = target_tiled[:,2:] - candidates[:,:2]
area1 = delta1[:,0] * delta1[:,1]
delta2 = target_tiled[:,2:] - candidates[:,:2]
area2 = delta2[:,0] * delta2[:,1]
iou = intersect/(area1 + area2 - intersect)
return iou
# TODO(haoyuzhang): Rewrite this NumPy based implementation to TensorFlow based
# implementation under ssd_model.py accuracy_function.
def decode_predictions(labels_and_predictions):
"""Decode predictions and remove unused boxes and labels."""
predictions = []
for example in labels_and_predictions:
source_id = int(example[ssd_constants.SOURCE_ID])
pred_box = example[ssd_constants.PRED_BOXES]
pred_scores = example[ssd_constants.PRED_SCORES]
locs, labels, probs = decode_single(
pred_box, pred_scores, ssd_constants.OVERLAP_CRITERIA,
ssd_constants.MAX_NUM_EVAL_BOXES, ssd_constants.MAX_NUM_EVAL_BOXES)
raw_height, raw_width, _ = example[ssd_constants.RAW_SHAPE]
for loc, label, prob in zip(locs, labels, probs):
# Ordering convention differs, hence [1], [0] rather than [0], [1]
x, y = loc[1] * raw_width, loc[0] * raw_height
w, h = (loc[3] - loc[1]) * raw_width, (loc[2] - loc[0]) * raw_height
predictions.append(
[source_id, x, y, w, h, prob, ssd_constants.CLASS_INV_MAP[label]])
mlperf.logger.log(key=mlperf.tags.NMS_THRESHOLD,
value=ssd_constants.OVERLAP_CRITERIA)
mlperf.logger.log(key=mlperf.tags.NMS_MAX_DETECTIONS,
value=ssd_constants.MAX_NUM_EVAL_BOXES)
return predictions
def decode_single(bboxes_in, scores_in, criteria, max_output, max_num=200):
# Reference to https://github.com/amdegroot/ssd.pytorch
bboxes_out = []
scores_out = []
labels_out = []
for i, score in enumerate(np.split(scores_in, scores_in.shape[1], 1)):
score = np.squeeze(score, 1)
# skip background
if i == 0:
continue
mask = score > ssd_constants.MIN_SCORE
if not np.any(mask):
continue
bboxes, score = bboxes_in[mask, :], score[mask]
score_idx_sorted = np.argsort(score)
score_sorted = score[score_idx_sorted]
score_idx_sorted = score_idx_sorted[-max_num:]
candidates = []
# perform non-maximum suppression
while len(score_idx_sorted):
idx = score_idx_sorted[-1]
bboxes_sorted = bboxes[score_idx_sorted, :]
bboxes_idx = bboxes[idx, :]
iou = calc_iou(bboxes_idx, bboxes_sorted)
score_idx_sorted = score_idx_sorted[iou < criteria]
candidates.append(idx)
bboxes_out.append(bboxes[candidates, :])
scores_out.append(score[candidates])
labels_out.extend([i]*len(candidates))
if len(scores_out) == 0:
tf.logging.info("No objects detected. Returning dummy values.")
return (
np.zeros(shape=(1, 4), dtype=np.float32),
np.zeros(shape=(1,), dtype=np.int32),
np.ones(shape=(1,), dtype=np.float32) * ssd_constants.DUMMY_SCORE,
)
bboxes_out = np.concatenate(bboxes_out, axis=0)
scores_out = np.concatenate(scores_out, axis=0)
labels_out = np.array(labels_out)
max_ids = np.argsort(scores_out)[-max_output:]
return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids]
| apache-2.0 |
google/qkeras | examples/example_mnist_ae.py | 1 | 3980 | # Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""uses po2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from collections import defaultdict
import tensorflow.keras.backend as K
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
from qkeras import *
from qkeras.utils import model_save_quantized_weights
import numpy as np
import tensorflow.compat.v1 as tf
np.random.seed(42)
NB_EPOCH = 100
BATCH_SIZE = 64
VERBOSE = 1
NB_CLASSES = 10
OPTIMIZER = Adam(lr=0.0001, decay=0.000025)
VALIDATION_SPLIT = 0.1
train = 1
(x_train, y_train), (x_test, y_test) = mnist.load_data()
RESHAPED = 784
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train = x_train[..., np.newaxis]
x_test = x_test[..., np.newaxis]
x_train /= 256.0
x_test /= 256.0
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
print(y_train[0:10])
y_train = to_categorical(y_train, NB_CLASSES)
y_test = to_categorical(y_test, NB_CLASSES)
x = x_in = Input(
x_train.shape[1:-1] + (1,))
x = QConv2D(
32,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2D(
16,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2D(
8,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2DTranspose(
8,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2DTranspose(
16,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2DTranspose(
32,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2D(
1,
kernel_size=(3, 3),
padding="same",
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x_out = x
x = Activation("sigmoid")(x)
model = Model(inputs=[x_in], outputs=[x])
mo = Model(inputs=[x_in], outputs=[x_out])
model.summary()
model.compile(
loss="binary_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"])
if train:
history = model.fit(
x_train, x_train, batch_size=BATCH_SIZE,
epochs=NB_EPOCH, initial_epoch=1, verbose=VERBOSE,
validation_split=VALIDATION_SPLIT)
# Generate reconstructions
num_reco = 8
samples = x_test[:num_reco]
targets = y_test[:num_reco]
reconstructions = model.predict(samples)
for layer in model.layers:
for w, weight in enumerate(layer.get_weights()):
print(layer.name, w, weight.shape)
print_qstats(model)
| apache-2.0 |
Clyde-fare/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 220 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
vigilv/scikit-learn | sklearn/utils/random.py | 232 | 10510 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/data/mask_tokens_dataset.py | 1 | 6847 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import numpy as np
import torch
from fairseq.data import data_utils, Dictionary
from . import BaseWrapperDataset, LRUCacheDataset
class MaskTokensDataset(BaseWrapperDataset):
"""
A wrapper Dataset for masked language modeling.
Input items are masked according to the specified masking probability.
Args:
dataset: Dataset to wrap.
sizes: Sentence lengths
vocab: Dictionary with the vocabulary and special tokens.
pad_idx: Id of pad token in vocab
mask_idx: Id of mask token in vocab
return_masked_tokens: controls whether to return the non-masked tokens
(the default) or to return a tensor with the original masked token
IDs (and *pad_idx* elsewhere). The latter is useful as targets for
masked LM training.
seed: Seed for random number generator for reproducibility.
mask_prob: probability of replacing a token with *mask_idx*.
leave_unmasked_prob: probability that a masked token is unmasked.
random_token_prob: probability of replacing a masked token with a
random token from the vocabulary.
freq_weighted_replacement: sample random replacement words based on
word frequencies in the vocab.
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
bpe: BPE to use for whole-word masking.
"""
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
"""Return the source and target datasets for masked LM training."""
dataset = LRUCacheDataset(dataset)
return (
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)),
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)),
)
def __init__(
self,
dataset: torch.utils.data.Dataset,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
return_masked_tokens: bool = False,
seed: int = 1,
mask_prob: float = 0.15,
leave_unmasked_prob: float = 0.1,
random_token_prob: float = 0.1,
freq_weighted_replacement: bool = False,
mask_whole_words: torch.Tensor = None,
):
assert 0.0 < mask_prob < 1.0
assert 0.0 <= random_token_prob <= 1.0
assert 0.0 <= leave_unmasked_prob <= 1.0
assert random_token_prob + leave_unmasked_prob <= 1.0
self.dataset = dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.return_masked_tokens = return_masked_tokens
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
self.mask_whole_words = mask_whole_words
if random_token_prob > 0.0:
if freq_weighted_replacement:
weights = np.array(self.vocab.count)
else:
weights = np.ones(len(self.vocab))
weights[:self.vocab.nspecial] = 0
self.weights = weights / weights.sum()
self.epoch = 0
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=8)
def __getitem__(self, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
assert self.mask_idx not in item, \
'Dataset contains mask_idx (={}), this is not expected!'.format(
self.mask_idx,
)
if self.mask_whole_words is not None:
word_begins_mask = self.mask_whole_words.gather(0, item)
word_begins_idx = word_begins_mask.nonzero().view(-1)
sz = len(word_begins_idx)
words = np.split(word_begins_mask, word_begins_idx)[1:]
assert len(words) == sz
word_lens = list(map(len, words))
# decide elements to mask
mask = np.full(sz, False)
num_mask = int(
# add a random number for probabilistic rounding
self.mask_prob * sz + np.random.rand()
)
mask[np.random.choice(sz, num_mask, replace=False)] = True
if self.return_masked_tokens:
# exit early if we're just returning the masked tokens
# (i.e., the targets for masked LM training)
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.full(len(mask), self.pad_idx)
new_item[mask] = item[torch.from_numpy(mask.astype(np.uint8)) == 1]
return torch.from_numpy(new_item)
# decide unmasking and random replacement
rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob
if rand_or_unmask_prob > 0.0:
rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob)
if self.random_token_prob == 0.0:
unmask = rand_or_unmask
rand_mask = None
elif self.leave_unmasked_prob == 0.0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob
decision = np.random.rand(sz) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
else:
unmask = rand_mask = None
if unmask is not None:
mask = mask ^ unmask
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.copy(item)
new_item[mask] = self.mask_idx
if rand_mask is not None:
num_rand = rand_mask.sum()
if num_rand > 0:
if self.mask_whole_words is not None:
rand_mask = np.repeat(rand_mask, word_lens)
num_rand = rand_mask.sum()
new_item[rand_mask] = np.random.choice(
len(self.vocab),
num_rand,
p=self.weights,
)
return torch.from_numpy(new_item)
| bsd-3-clause |
jpata/ROOTDataHelpers | python/sklearn_to_tmva.py | 1 | 7582 | from sklearn.externals import six
from sklearn.tree import _tree
import numpy as np
def node_to_str(cls, tree, node_id, criterion, depth, kind, coef):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
cType = 1
IVar = 0
cut = 0.0
if tree.children_left[node_id] == _tree.TREE_LEAF:
IVar = -1
#print "Node", depth*" ", kind, value[0]
return '<Node pos="{0}" depth="{1}" NCoef="0" \
IVar="{2}" Cut="{3:.16E}" cType="1" \
res="{4:.16E}" rms="0.0e-00" \
purity="{5}" nType="-99">'.format(
kind, depth, IVar, 0.0, value[0] / cls.n_estimators * coef,
tree.impurity[node_id]
)
else:
IVar = tree.feature[node_id]
#print "Node", depth*" ", kind, IVar, tree.threshold[node_id], value[0]
return '<Node pos="{0}" depth="{1}" NCoef="0" \
IVar="{2}" Cut="{3:.16E}" cType="1" \
res="{4:.16E}" rms="0.0" \
purity="{5}" nType="0">'.format(
kind, depth, IVar, tree.threshold[node_id], value[0] / cls.n_estimators * coef,
tree.impurity[node_id]
)
def recurse(cls, outfile, t, coef, node_id=0, criterion="impurity", depth=0, kind="s"):
outfile.write(depth*" "+node_to_str(cls, t, node_id, criterion, depth, kind, coef) + '\n')
left_child = t.children_left[node_id]
right_child = t.children_right[node_id]
if depth == 10:
return
if left_child != _tree.TREE_LEAF:
recurse(cls, outfile, t, coef, left_child, criterion, depth+1, kind="l")
if right_child != _tree.TREE_LEAF:
recurse(cls, outfile, t, coef, right_child, criterion, depth+1, kind="r")
#if left_child == _tree.TREE_LEAF or right_child == _tree.TREE_LEAF:
outfile.write(depth*" "+"</Node>\n")
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
def gbr_to_tmva(cls, data, outfile_name, **kwargs):
# if not isinstance(cls, GradientBoostingRegressor):
# raise ValueError("Can only export GradientBoostingRegressor")
# if cls.loss != "huber":
# raise ValueError("TMVA assumes loss=huber")
#if cls.n_classes_ != 1:
# raise ValueError("Currently only two-class classification supported. (regression between 0 and 1).")
mva_name = kwargs.get("mva_name", "bdt")
coef = kwargs.get("coef", 10)
feature_names = data.columns.values
features_min = [data[fn].min() for fn in feature_names]
features_max = [data[fn].max() for fn in feature_names]
#Create list of variables
varstring = ""
for i in range(cls.n_features):
varstring += '<Variable VarIndex="{0}" Expression="{1}" Label="{1}" Title="{1}" Unit="" Internal="{1}" Type="F" Min="{2:.16E}" Max="{3:.16E}"/>\n'.format(
i, feature_names[i], features_min[i], features_max[i]
)
outfile = open(outfile_name, "w")
outfile.write(
"""
<?xml version="1.0"?>
<MethodSetup Method="BDT::{mva_name}">
<GeneralInfo>
<Info name="TMVA Release" value="4.2.0 [262656]"/>
<Info name="ROOT Release" value="6.02/05 [393733]"/>
<Info name="Creator" value="joosep"/>
<Info name="Date" value="Sun May 31 01:31:49 2015"/>
<Info name="Host" value="Linux cmsbuild13.cern.ch 2.6.32-504.8.1.el6.x86_64 #1 SMP Wed Jan 28 08:50:46 CET 2015 x86_64 x86_64 x86_64 GNU/Linux"/>
<Info name="Dir" value="/home/joosep/btv/CMSSW_7_4_0/src/RecoBTag/CMSCSTagger"/>
<Info name="Training events" value="1692351"/>
<Info name="TrainingTime" value="7.82964582e+03"/>
<Info name="AnalysisType" value="Classification"/>
</GeneralInfo>
<Options>
<Option name="V" modified="Yes">False</Option>
<Option name="VerbosityLevel" modified="Yes">Fatal</Option>
<Option name="VarTransform" modified="No">None</Option>
<Option name="H" modified="Yes">False</Option>
<Option name="CreateMVAPdfs" modified="No">False</Option>
<Option name="IgnoreNegWeightsInTraining" modified="No">False</Option>
<Option name="NTrees" modified="Yes">{ntrees}</Option>
<Option name="MaxDepth" modified="Yes">{maxdepth}</Option>
<Option name="MinNodeSize" modified="No">5%</Option>
<Option name="nCuts" modified="Yes">50</Option>
<Option name="BoostType" modified="Yes">Grad</Option>
<Option name="AdaBoostR2Loss" modified="No">quadratic</Option>
<Option name="UseBaggedBoost" modified="Yes">False</Option>
<Option name="Shrinkage" modified="Yes">{learnrate}</Option>
<Option name="AdaBoostBeta" modified="No">5.000000e-01</Option>
<Option name="UseRandomisedTrees" modified="No">False</Option>
<Option name="UseNvars" modified="Yes">{usenvars}</Option>
<Option name="UsePoissonNvars" modified="No">True</Option>
<Option name="BaggedSampleFraction" modified="No">6.000000e-01</Option>
<Option name="UseYesNoLeaf" modified="No">False</Option>
<Option name="NegWeightTreatment" modified="No">ignorenegweightsintraining</Option>
<Option name="Css" modified="No">1.000000e+00</Option>
<Option name="Cts_sb" modified="No">1.000000e+00</Option>
<Option name="Ctb_ss" modified="No">1.000000e+00</Option>
<Option name="Cbb" modified="No">1.000000e+00</Option>
<Option name="NodePurityLimit" modified="No">5.000000e-01</Option>
<Option name="SeparationType" modified="No">giniindex</Option>
<Option name="DoBoostMonitor" modified="Yes">False</Option>
<Option name="UseFisherCuts" modified="No">False</Option>
<Option name="MinLinCorrForFisher" modified="No">8.000000e-01</Option>
<Option name="UseExclusiveVars" modified="No">False</Option>
<Option name="DoPreselection" modified="No">False</Option>
<Option name="SigToBkgFraction" modified="No">1.000000e+00</Option>
<Option name="PruneMethod" modified="No">nopruning</Option>
<Option name="PruneStrength" modified="No">0.000000e+00</Option>
<Option name="PruningValFraction" modified="No">5.000000e-01</Option>
<Option name="nEventsMin" modified="No">0</Option>
<Option name="UseBaggedGrad" modified="No">False</Option>
<Option name="GradBaggingFraction" modified="No">6.000000e-01</Option>
<Option name="UseNTrainEvents" modified="No">0</Option>
<Option name="NNodesMax" modified="No">0</Option>
</Options>
<Variables NVar="{nvars}">
{varstring}
</Variables>
<Classes NClass="2">
<Class Name="Signal" Index="0"/>
<Class Name="Background" Index="1"/>
</Classes>
<Transformations NTransformations="0"/>
<MVAPdfs/>
<Weights NTrees="{ntrees}" AnalysisType="1">
""".format(**{
"mva_name": mva_name,
"ntrees":cls.n_estimators,
"maxdepth":cls.max_depth,
"maxdepth":cls.max_depth,
"usenvars":cls.max_features,
"nvars": cls.n_features,
"varstring": varstring,
"learnrate": cls.learning_rate
}
)
)
for itree, t in enumerate(cls.estimators_[:, 0]):
outfile.write('<BinaryTree type="DecisionTree" boostWeight="1.0" itree="{0}">\n'.format(itree, cls.learning_rate))
recurse(cls, outfile, t.tree_, coef)
outfile.write('</BinaryTree>\n')
outfile.write("""
</Weights>
</MethodSetup>
""")
outfile.close()
def evaluate_sklearn(cls, vals, coef=10):
ret = 0
for t in cls.estimators_[:,0]:
r = t.tree_.predict(np.array(vals, dtype="float32")) / cls.n_estimators * coef
ret += r[0,0]
return 2.0/(1.0+np.exp(-2.0*ret))-1
| mit |
selective-inference/selective-inference | selectinf/randomized/slope.py | 3 | 10166 | from __future__ import print_function
import functools
import numpy as np
# sklearn imports
have_isotonic = False
try:
from sklearn.isotonic import IsotonicRegression
have_isotonic = True
except ImportError:
raise ValueError('unable to import isotonic regression from sklearn, SLOPE subgradient projection will not work')
# regreg imports
from regreg.atoms.slope import _basic_proximal_map
import regreg.api as rr
from ..constraints.affine import constraints
from .randomization import randomization
from ..base import restricted_estimator
from .query import gaussian_query
from .lasso import lasso
class slope(gaussian_query):
def __init__(self,
loglike,
slope_weights,
ridge_term,
randomizer,
perturb=None):
r"""
Create a new post-selection object for the SLOPE problem
Parameters
----------
loglike : `regreg.smooth.glm.glm`
A (negative) log-likelihood as implemented in `regreg`.
slope_weights : np.ndarray
SLOPE weights for L-1 penalty. If a float,
it is broadcast to all features.
ridge_term : float
How big a ridge term to add?
randomizer : object
Randomizer -- contains representation of randomization density.
perturb : np.ndarray
Random perturbation subtracted as a linear
term in the objective function.
"""
self.loglike = loglike
self.nfeature = p = self.loglike.shape[0]
if np.asarray(slope_weights).shape == ():
slope_weights = np.ones(loglike.shape) * slope_weights
self.slope_weights = np.asarray(slope_weights)
self.randomizer = randomizer
self.ridge_term = ridge_term
self.penalty = rr.slope(slope_weights, lagrange=1.)
self._initial_omega = perturb # random perturbation
def _solve_randomized_problem(self,
perturb=None,
solve_args={'tol': 1.e-12, 'min_its': 50}):
p = self.nfeature
# take a new perturbation if supplied
if perturb is not None:
self._initial_omega = perturb
if self._initial_omega is None:
self._initial_omega = self.randomizer.sample()
quad = rr.identity_quadratic(self.ridge_term, 0, -self._initial_omega, 0)
problem = rr.simple_problem(self.loglike, self.penalty)
initial_soln = problem.solve(quad, **solve_args)
initial_subgrad = -(self.loglike.smooth_objective(initial_soln, 'grad') +
quad.objective(initial_soln, 'grad'))
return initial_soln, initial_subgrad
def fit(self,
solve_args={'tol': 1.e-12, 'min_its': 50},
perturb=None):
self.initial_soln, self.initial_subgrad = self._solve_randomized_problem(perturb=perturb, solve_args=solve_args)
p = self.initial_soln.shape[0]
# now we have to work out SLOPE details, clusters, etc.
active_signs = np.sign(self.initial_soln)
active = self._active = active_signs != 0
self._overall = overall = active> 0
self._inactive = inactive = ~self._overall
_active_signs = active_signs.copy()
self.selection_variable = {'sign': _active_signs,
'variables': self._overall}
indices = np.argsort(-np.fabs(self.initial_soln))
sorted_soln = self.initial_soln[indices]
initial_scalings = np.sort(np.unique(np.fabs(self.initial_soln[active])))[::-1]
self.observed_opt_state = initial_scalings
self._unpenalized = np.zeros(p, np.bool)
_beta_unpenalized = restricted_estimator(self.loglike, self._overall, solve_args=solve_args)
beta_bar = np.zeros(p)
beta_bar[overall] = _beta_unpenalized
self._beta_full = beta_bar
self.num_opt_var = self.observed_opt_state.shape[0]
X, y = self.loglike.data
W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar))
_hessian_active = np.dot(X.T, X[:, active] * W[:, None])
_score_linear_term = -_hessian_active
self.score_transform = (_score_linear_term, np.zeros(_score_linear_term.shape[0]))
self.observed_score_state = _score_linear_term.dot(_beta_unpenalized)
self.observed_score_state[inactive] += self.loglike.smooth_objective(beta_bar, 'grad')[inactive]
cur_indx_array = []
cur_indx_array.append(0)
cur_indx = 0
pointer = 0
signs_cluster = []
for j in range(p - 1):
if np.abs(sorted_soln[j + 1]) != np.abs(sorted_soln[cur_indx]):
cur_indx_array.append(j + 1)
cur_indx = j + 1
sign_vec = np.zeros(p)
sign_vec[np.arange(j + 1 - cur_indx_array[pointer]) + cur_indx_array[pointer]] = \
np.sign(self.initial_soln[indices[np.arange(j + 1 - cur_indx_array[pointer]) + cur_indx_array[pointer]]])
signs_cluster.append(sign_vec)
pointer = pointer + 1
if sorted_soln[j + 1] == 0:
break
signs_cluster = np.asarray(signs_cluster).T
if signs_cluster.size == 0:
return active_signs
else:
X_clustered = X[:, indices].dot(signs_cluster)
_opt_linear_term = X.T.dot(X_clustered)
_, prec = self.randomizer.cov_prec
opt_linear, opt_offset = (_opt_linear_term, self.initial_subgrad)
# now make the constraints
self._setup = True
A_scaling_0 = -np.identity(self.num_opt_var)
A_scaling_1 = -np.identity(self.num_opt_var)[:(self.num_opt_var - 1), :]
for k in range(A_scaling_1.shape[0]):
A_scaling_1[k, k + 1] = 1
A_scaling = np.vstack([A_scaling_0, A_scaling_1])
b_scaling = np.zeros(2 * self.num_opt_var - 1)
self._setup_sampler(A_scaling,
b_scaling,
opt_linear,
opt_offset)
return active_signs
# Targets of inference
# and covariance with score representation
# are same as LASSO
@staticmethod
def gaussian(X,
Y,
slope_weights,
sigma=1.,
quadratic=None,
ridge_term=0.,
randomizer_scale=None):
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
return slope(loglike,
np.asarray(slope_weights) / sigma ** 2,
ridge_term,
randomizer)
# Projection onto selected subgradients of SLOPE
def _projection_onto_selected_subgradients(prox_arg,
weights,
ordering,
cluster_sizes,
active_signs,
last_value_zero=True):
"""
Compute the projection of a point onto the set of
subgradients of the SLOPE penalty with a given
clustering of the solution and signs of the variables.
This is a projection onto a lower dimensional set. The dimension
of this set is p -- the dimensions of the `prox_arg` minus
the number of unique values in `ordered_clustering` + 1 if the
last value of the solution was zero (i.e. solution was sparse).
Parameters
----------
prox_arg : np.ndarray(p, np.float)
Point to project
weights : np.ndarray(p, np.float)
Weights of the SLOPE penalty.
ordering : np.ndarray(p, np.int)
Order of original argument to SLOPE prox.
First entry corresponds to largest argument of SLOPE prox.
cluster_sizes : sequence
Sizes of clusters, starting with
largest in absolute value.
active_signs : np.ndarray(p, np.int)
Signs of non-zero coefficients.
last_value_zero : bool
Is the last solution value equal to 0?
"""
result = np.zeros_like(prox_arg)
ordered_clustering = []
cur_idx = 0
for cluster_size in cluster_sizes:
ordered_clustering.append([ordering[j + cur_idx] for j in range(cluster_size)])
cur_idx += cluster_size
# Now, run appropriate SLOPE prox on each cluster
cur_idx = 0
for i, cluster in enumerate(ordered_clustering):
prox_subarg = np.array([prox_arg[j] for j in cluster])
# If the value of the soln to the prox was non-zero
# then we solve a SLOPE of size 1 smaller than the cluster
# If the cluster size is 1, the value is just
# the corresponding signed weight
if i < len(ordered_clustering) - 1 or not last_value_zero:
if len(cluster) == 1:
result[cluster[0]] = weights[cur_idx] * active_signs[cluster[0]]
else:
indices = [j + cur_idx for j in range(len(cluster))]
cluster_weights = weights[indices]
ir = IsotonicRegression()
_ir_result = ir.fit_transform(np.arange(len(cluster)), cluster_weights[::-1])[::-1]
result[indices] = -np.multiply(active_signs[indices], _ir_result/2.)
else:
indices = np.array([j + cur_idx for j in range(len(cluster))])
cluster_weights = weights[indices]
slope_prox = _basic_proximal_map(prox_subarg, cluster_weights)
result[indices] = prox_subarg - slope_prox
cur_idx += len(cluster)
return result
| bsd-3-clause |
vigilv/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
Clyde-fare/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 290 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
prakhar2b/Weekend-Projects | Machine-Learning/predict.py | 2 | 1833 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author : Prakhar Pratyush <prakhar@paralleldots.com>
#
######################################################################
import torch
from torch.utils.data import Dataset, DataLoader
from utils import file_stats, to_tensor, create_word_vocab, create_weights
import numpy as np
import pickle
restaurant_test_file = 'Data/Restaurant/restaurant_test.json'
###################################################################
##################### Vocab Initialization ########################
###################################################################
words = []
word2idx = {}
words.append('<pad>')
word2idx['<pad>'] = 0
###################################################################
class vocabDataset(Dataset):
def __init__(self, vocab, transform=None):
self.vocab = vocab
self.transform = transform
def __len__(self):
return len(self.vocab)
def __getitem__(self, idx):
return self.vocab[idx]
def main():
global words
global word2idx
test_file = restaurant_test_file
# Load Model
path = "Saved_Models/PD8/10epoch_5"
model = torch.load(path)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
print("\nTest Set :")
#test_vocab = file_stats(test_file, test_file=True)
with open("test_pd_data.pickle", "rb") as f:
test_vocab = pickle.load(f)
file_stats(test_vocab, return_vocab=False)
words, word2idx = create_word_vocab(test_vocab, words, word2idx)
weights_matrix = create_weights(words, word2idx)
test_vocab = to_tensor(test_vocab, word2idx)
test_set = vocabDataset(test_vocab)
test_batch = DataLoader(test_set, batch_size=32, shuffle=True)
acc, cm = model.test(test_batch, weights_matrix)
print(cm)
print(f'Test Acc: {acc*100:.2f}%')
if __name__ == '__main__':
main() | mit |
williamFalcon/pytorch-lightning | tests/plugins/environments/test_lightning_environment.py | 1 | 3053 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
from pytorch_lightning.plugins.environments import LightningEnvironment
@mock.patch.dict(os.environ, {})
def test_default_attributes():
"""Test the default attributes when no environment variables are set."""
env = LightningEnvironment()
assert not env.creates_children()
assert env.master_address() == "127.0.0.1"
assert isinstance(env.master_port(), int)
assert env.world_size() == 1
assert env.local_rank() == 0
assert env.node_rank() == 0
@mock.patch.dict(os.environ, {"MASTER_ADDR": "1.2.3.4", "MASTER_PORT": "500", "LOCAL_RANK": "2", "NODE_RANK": "3"})
def test_attributes_from_environment_variables():
"""Test that the default cluster environment takes the attributes from the environment variables."""
env = LightningEnvironment()
assert env.master_address() == "1.2.3.4"
assert env.master_port() == 500
assert env.world_size() == 1
assert env.global_rank() == 0
assert env.local_rank() == 2
assert env.node_rank() == 3
env.set_global_rank(100)
assert env.global_rank() == 100
env.set_world_size(100)
assert env.world_size() == 100
@pytest.mark.parametrize(
"environ, creates_children", [({}, False), (dict(LOCAL_RANK="2"), True), (dict(NODE_RANK="1"), False)]
)
def test_manual_user_launch(environ, creates_children):
"""Test that the environment switches to manual user mode when LOCAL_RANK env variable detected."""
with mock.patch.dict(os.environ, environ):
env = LightningEnvironment()
assert env.creates_children() == creates_children
@mock.patch.dict(os.environ, {"GROUP_RANK": "1"})
def test_node_rank_from_group_rank():
"""Test that the GROUP_RANK substitutes NODE_RANK."""
env = LightningEnvironment()
assert "NODE_RANK" not in os.environ
assert env.node_rank() == 1
@mock.patch.dict(os.environ, {})
def test_random_master_port():
"""Test randomly chosen master port when no master port was given by user."""
env = LightningEnvironment()
port = env.master_port()
assert isinstance(port, int)
# repeated calls do not generate a new port number
assert env.master_port() == port
@mock.patch.dict(os.environ, {"WORLD_SIZE": "1"})
def test_teardown():
"""Test that the GROUP_RANK substitutes NODE_RANK."""
env = LightningEnvironment()
assert "WORLD_SIZE" in os.environ
env.teardown()
assert "WORLD_SIZE" not in os.environ
| apache-2.0 |
thomasantony/CarND-Projects | Exercises/Term1/transfer-learning-lab/run_bottleneck.py | 1 | 3874 | from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from keras.layers import Dense, Flatten, Input, AveragePooling2D
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.datasets import cifar10
from skimage.transform import resize
import numpy as np
import pickle
import tensorflow as tf
import keras.backend as K
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'cifar10', "Make bottleneck features this for dataset, one of 'cifar10', or 'traffic'")
flags.DEFINE_string('network', 'resnet', "The model to bottleneck, one of 'vgg', 'inception', or 'resnet'")
flags.DEFINE_integer('batch_size', 16, 'The batch size for the generator')
batch_size = FLAGS.batch_size
h, w, ch = 224, 224, 3
if FLAGS.network == 'inception':
h, w, ch = 299, 299, 3
from keras.applications.inception_v3 import preprocess_input
img_placeholder = tf.placeholder("uint8", (None, 32, 32, 3))
resize_op = tf.image.resize_images(img_placeholder, (h, w), method=0)
def gen(session, data, labels, batch_size):
def _f():
start = 0
end = start + batch_size
n = data.shape[0]
while True:
X_batch = session.run(resize_op, {img_placeholder: data[start:end]})
X_batch = preprocess_input(X_batch)
y_batch = data[start:end]
start += batch_size
end += batch_size
if start >= n:
start = 0
end = batch_size
print(start, end)
yield (X_batch, y_batch)
return _f
def create_model():
input_tensor = Input(shape=(h, w, ch))
if FLAGS.network == 'vgg':
model = VGG16(input_tensor=input_tensor, include_top=False)
x = model.output
x = AveragePooling2D((7,7))(x)
model = Model(model.input, x)
elif FLAGS.network == 'inception':
model = InceptionV3(input_tensor=input_tensor, include_top=False)
x = model.output
x = AveragePooling2D((8, 8), strides=(8, 8))(x)
model = Model(model.input, x)
else:
model = ResNet50(input_tensor=input_tensor, include_top=False)
return model
def main(_):
if FLAGS.dataset == 'cifar10':
(X_train, y_train), (_, _) = cifar10.load_data()
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
else:
with open('data/train.p', mode='rb') as f:
train = pickle.load(f)
X_train, X_val, y_train, y_val = train_test_split(train['features'], train['labels'], test_size=0.33, random_state=0)
train_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_train')
validation_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_validation')
print("Resizing to", (w, h, ch))
print("Saving to ...")
print(train_output_file)
print(validation_output_file)
with tf.Session() as sess:
K.set_session(sess)
K.set_learning_phase(1)
model = create_model()
print('Bottleneck training')
train_gen = gen(sess, X_train, y_train, batch_size)
bottleneck_features_train = model.predict_generator(train_gen(), X_train.shape[0])
data = {'features': bottleneck_features_train, 'labels': y_train}
pickle.dump(data, open(train_output_file, 'wb'))
print('Bottleneck validation')
val_gen = gen(sess, X_val, y_val, batch_size)
bottleneck_features_validation = model.predict_generator(val_gen(), X_val.shape[0])
data = {'features': bottleneck_features_validation, 'labels': y_val}
pickle.dump(data, open(validation_output_file, 'wb'))
if __name__ == '__main__':
tf.app.run()
| mit |
vigilv/scikit-learn | benchmarks/bench_20newsgroups.py | 370 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
mhugo/QGIS | tests/src/python/test_qgsserver_accesscontrol.py | 12 | 10565 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Stephane Brunner'
__date__ = '28/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis # NOQA
import os
from shutil import copyfile
from math import sqrt
from utilities import unitTestDataPath
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
from qgis.server import QgsServer, QgsAccessControlFilter, QgsServerRequest, QgsBufferServerRequest, QgsBufferServerResponse
from qgis.core import QgsRenderChecker, QgsApplication
from qgis.PyQt.QtCore import QSize
import tempfile
from test_qgsserver import QgsServerTestBase
import base64
XML_NS = \
'service="WFS" version="1.0.0" ' \
'xmlns:wfs="http://www.opengis.net/wfs" ' \
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \
'xmlns:ogc="http://www.opengis.net/ogc" ' \
'xmlns="http://www.opengis.net/wfs" updateSequence="0" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" ' \
'xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.0.0/WFS-capabilities.xsd" ' \
'xmlns:gml="http://www.opengis.net/gml" ' \
'xmlns:ows="http://www.opengis.net/ows" '
class RestrictedAccessControl(QgsAccessControlFilter):
""" Used to have restriction access """
# Be able to deactivate the access control to have a reference point
_active = False
def __init__(self, server_iface):
super(QgsAccessControlFilter, self).__init__(server_iface)
def layerFilterExpression(self, layer):
""" Return an additional expression filter """
if not self._active:
return super(RestrictedAccessControl, self).layerFilterExpression(layer)
return "$id = 1" if layer.name() == "Hello" else None
def layerFilterSubsetString(self, layer):
""" Return an additional subset string (typically SQL) filter """
if not self._active:
return super(RestrictedAccessControl, self).layerFilterSubsetString(layer)
if layer.name() == "Hello_SubsetString":
return "pk = 1"
elif layer.name() == "Hello_Project_SubsetString":
return "pkuid = 6 or pkuid = 7"
elif layer.name() == "Hello_Filter_SubsetString":
return "pkuid = 6 or pkuid = 7"
else:
return None
def layerPermissions(self, layer):
""" Return the layer rights """
if not self._active:
return super(RestrictedAccessControl, self).layerPermissions(layer)
rh = self.serverInterface().requestHandler()
rights = QgsAccessControlFilter.LayerPermissions()
# Used to test WFS transactions
if rh.parameterMap().get("LAYER_PERM") == "no":
return rights
# Used to test the WCS
if rh.parameterMap().get("TEST") == "dem":
rights.canRead = layer.name() != "dem"
else:
rights.canRead = layer.name() not in ("Country", "Hello_OnOff")
if layer.name() == "db_point":
rights.canRead = rights.canInsert = rights.canUpdate = rights.canDelete = True
return rights
def authorizedLayerAttributes(self, layer, attributes):
""" Return the authorised layer attributes """
if not self._active:
return super(RestrictedAccessControl, self).authorizedLayerAttributes(layer, attributes)
if "color" in attributes: # spellok
attributes.remove("color") # spellok
return attributes
def allowToEdit(self, layer, feature):
""" Are we authorise to modify the following geometry """
if not self._active:
return super(RestrictedAccessControl, self).allowToEdit(layer, feature)
return feature.attribute("color") in ["red", "yellow"]
def cacheKey(self):
return "r" if self._active else "f"
class TestQgsServerAccessControl(QgsServerTestBase):
@classmethod
def _execute_request(cls, qs, requestMethod=QgsServerRequest.GetMethod, data=None):
if data is not None:
data = data.encode('utf-8')
request = QgsBufferServerRequest(qs, requestMethod, {}, data)
response = QgsBufferServerResponse()
cls._server.handleRequest(request, response)
headers = []
rh = response.headers()
rk = sorted(rh.keys())
for k in rk:
headers.append(("%s: %s" % (k, rh[k])).encode('utf-8'))
return b"\n".join(headers) + b"\n\n", bytes(response.body())
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls._app = QgsApplication([], False)
cls._server = QgsServer()
cls._execute_request("")
cls._server_iface = cls._server.serverInterface()
cls._accesscontrol = RestrictedAccessControl(cls._server_iface)
cls._server_iface.registerAccessControl(cls._accesscontrol, 100)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
del cls._server
cls._app.exitQgis()
def setUp(self):
super().setUp()
self.testdata_path = unitTestDataPath("qgis_server_accesscontrol")
data_file = os.path.join(self.testdata_path, "helloworld.db")
self.assertTrue(os.path.isfile(data_file), 'Could not find data file "{}"'.format(data_file))
copyfile(data_file, os.path.join(self.testdata_path, "_helloworld.db"))
for k in ["QUERY_STRING", "QGIS_PROJECT_FILE"]:
if k in os.environ:
del os.environ[k]
self.projectPath = os.path.join(self.testdata_path, "project_grp.qgs")
self.assertTrue(os.path.isfile(self.projectPath), 'Could not find project file "{}"'.format(self.projectPath))
def tearDown(self):
copyfile(os.path.join(self.testdata_path, "_helloworld.db"), os.path.join(self.testdata_path, "helloworld.db"))
def _handle_request(self, restricted, query_string, **kwargs):
self._accesscontrol._active = restricted
qs = "?" + query_string if query_string is not None else ''
result = self._result(self._execute_request(qs, **kwargs))
return result
def _result(self, data):
headers = {}
for line in data[0].decode('UTF-8').split("\n"):
if line != "":
header = line.split(":")
self.assertEqual(len(header), 2, line)
headers[str(header[0])] = str(header[1]).strip()
return data[1], headers
def _get_fullaccess(self, query_string):
result = self._handle_request(False, query_string)
return result
def _get_restricted(self, query_string):
result = self._handle_request(True, query_string)
return result
def _post_fullaccess(self, data, query_string=None):
self._server.putenv("QGIS_PROJECT_FILE", self.projectPath)
result = self._handle_request(False, query_string, requestMethod=QgsServerRequest.PostMethod, data=data)
self._server.putenv("QGIS_PROJECT_FILE", '')
return result
def _post_restricted(self, data, query_string=None):
self._server.putenv("QGIS_PROJECT_FILE", self.projectPath)
result = self._handle_request(True, query_string, requestMethod=QgsServerRequest.PostMethod, data=data)
self._server.putenv("QGIS_PROJECT_FILE", '')
return result
def _img_diff(self, image, control_image, max_diff, max_size_diff=QSize(), outputJpg=False):
extFile = 'png'
if outputJpg:
extFile = 'jpg'
temp_image = os.path.join(tempfile.gettempdir(), "%s_result.%s" % (control_image, extFile))
with open(temp_image, "wb") as f:
f.write(image)
control = QgsRenderChecker()
control.setControlPathPrefix("qgis_server_accesscontrol")
control.setControlName(control_image)
control.setRenderedImage(temp_image)
if max_size_diff.isValid():
control.setSizeTolerance(max_size_diff.width(), max_size_diff.height())
return control.compareImages(control_image), control.report()
def _img_diff_error(self, response, headers, image, max_diff=10, max_size_diff=QSize()):
super()._img_diff_error(response, headers, image, max_diff=max_diff,
max_size_diff=max_size_diff,
unittest_data_path='qgis_server_accesscontrol')
def _geo_img_diff(self, image_1, image_2):
if os.name == 'nt':
# Not supported on Windows due to #13061
return 0
with open(os.path.join(tempfile.gettempdir(), image_2), "wb") as f:
f.write(image_1)
image_1 = gdal.Open(os.path.join(tempfile.gettempdir(), image_2), GA_ReadOnly)
assert image_1, "No output image written: " + image_2
image_2 = gdal.Open(os.path.join(self.testdata_path, "results", image_2), GA_ReadOnly)
assert image_1, "No expected image found:" + image_2
if image_1.RasterXSize != image_2.RasterXSize or image_1.RasterYSize != image_2.RasterYSize:
image_1 = None
image_2 = None
return 1000 # wrong size
square_sum = 0
for x in range(image_1.RasterXSize):
for y in range(image_1.RasterYSize):
square_sum += (image_1.ReadAsArray()[x][y] - image_2.ReadAsArray()[x][y]) ** 2
# Explicitly close GDAL datasets
image_1 = None
image_2 = None
return sqrt(square_sum)
def _test_colors(self, colors):
for id, color in list(colors.items()):
response, headers = self._post_fullaccess(
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="db_point" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>gid</ogc:PropertyName>
<ogc:Literal>{id}</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(id=id, xml_ns=XML_NS)
)
self.assertTrue(
str(response).find("<qgs:color>{color}</qgs:color>".format(color=color)) != -1,
"Wrong color in result\n%s" % response)
| gpl-2.0 |
jorge2703/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
vigilv/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/benchmark/dummy_model.py | 1 | 2971 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.data import Dictionary
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
@register_model('dummy_model')
class DummyModel(FairseqLanguageModel):
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
@staticmethod
def add_args(parser):
parser.add_argument('--num-layers', type=int, default=24)
parser.add_argument('--embed-dim', type=int, default=1024)
@classmethod
def build_model(cls, args, task):
encoder = DummyEncoder(
num_embed=len(task.target_dictionary),
embed_dim=args.embed_dim,
num_layers=args.num_layers,
)
return cls(args, encoder)
def forward(self, src_tokens, masked_tokens=None, **kwargs):
return self.decoder(src_tokens, masked_tokens=masked_tokens)
class DummyEncoder(FairseqDecoder):
def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24):
super().__init__(Dictionary())
self.embed = nn.Embedding(
num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0
)
self.layers_a = nn.ModuleList([
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 3*embed_dim), # q, k, v input projection
nn.Linear(3*embed_dim, embed_dim), # skip self-attention
nn.Linear(embed_dim, embed_dim), # output projection
nn.Dropout(),
)
for i in range(num_layers)
])
self.layers_b = nn.ModuleList([
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 4*embed_dim), # FFN
nn.ReLU(),
nn.Linear(4*embed_dim, embed_dim), # FFN
nn.Dropout(0.1),
)
for i in range(num_layers)
])
self.out_proj = nn.Linear(embed_dim, num_embed)
def forward(self, tokens, masked_tokens=None):
x = self.embed(tokens)
for layer_a, layer_b in zip(self.layers_a, self.layers_b):
x = x + layer_a(x)
x = x + layer_b(x)
x = self.out_proj(x)
if masked_tokens is not None:
x = x[masked_tokens]
return (x,)
def max_positions(self):
return 1024
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
@register_model_architecture('dummy_model', 'dummy_model')
def base_architecture(args):
pass
| bsd-3-clause |
CompPhysics/MachineLearning | doc/src/SupportVMachines/Programs/bootstrap.py | 2 | 1048 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from sklearn.tree import DecisionTreeRegressor
from sklearn.datasets import load_breast_cancer
# Load the data
cancer = load_breast_cancer()
maxdepth = 6
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
score = np.zeros(maxdepth)
depth = np.zeros(maxdepth)
for degree in range(1,maxdepth):
model = DecisionTreeRegressor(max_depth=degree)
model.fit(X_train_scaled,y_train)
y_pred = model.predict(X_test_scaled)
depth[degree] = degree
score[degree] = model.score(X_test_scaled,y_pred)
print('Max Tree Depth:', degree)
print('Score:', score[degree])
plt.xlim(1,maxdepth)
plt.plot(depth, score, label='Score')
plt.legend()
plt.show()
| cc0-1.0 |
CompPhysics/MachineLearning | doc/src/week39/codes/test11.py | 1 | 5673 | ##Make synthetic data
n = 1000
np.random.seed(20)
x1 = np.random.rand(n)
x2 = np.random.rand(n)
X = designMatrix(x1, x2, 4)
y = franke(x1, x2)
##Train-validation-test samples.
# We choose / play with hyper-parameters on the validation data and then test predictions on the test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=1) # 0.25 x 0.8 = 0.2
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_val = scaler.transform(X_val)
X_train[:, 0] = 1
X_test[:, 0] = 1
X_val[:, 0] = 1
linreg = linregOwn(method='ols')
#print('Invert OLS:', linreg.fit(X_train, y_train))
beta = SGD(X_train, y_train, learning_rate=0.07)
#print('SGD OLS:', beta)
linreg = linregOwn(method='ridge')
#print('Invert Ridge:', linreg.fit(X_train, y_train, lambda_= 0.01))
beta = SGD(X_train, y_train, learning_rate=0.0004, method='ridge')
#print('SGD Ridge:', beta)
sgdreg = SGDRegressor(max_iter = 100, penalty=None, eta0=0.1)
sgdreg.fit(X_train[:, 1:],y_train.ravel())
#print('sklearn:', sgdreg.coef_)
#print('sklearn intercept:', sgdreg.intercept_)
def plot_MSE(method = 'ridge', scheme = None):
eta = np.logspace(-5, -3, 10)
lambda_ = np.logspace(-5, -1, 10)
MSE_ols = []
MSE_ridge = []
if scheme == 'joint':
if method == 'ridge':
for lmbd in lambda_:
for i in eta:
beta = SGD(X_train, y_train, learning_rate=i, lambda_ = lmbd, method = method)
mse_ols_test, mse_ridge_test = compute_test_mse(X_val, y_val, lambda_ = lmbd, beta = beta)
MSE_ridge.append(mse_ridge_test)
fig = plt.figure()
ax = fig.gca(projection='3d') ##get current axis
lambda_ = np.ravel(lambda_)
eta = np.ravel(eta)
ax.zaxis.set_major_locator(LinearLocator(5))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.03f'))
ax.plot_trisurf(lambda_, eta, MSE_ridge, cmap='viridis', edgecolor='none')
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$\eta$')
ax.set_title(r'MSE Ridge')
ax.view_init(30, 60)
plt.show()
if scheme == 'separate':
if method == 'ols':
eta = np.logspace(-5, 0, 10)
for i in eta:
beta = SGD(X_train, y_train, learning_rate=i, lambda_ = 0.01, method = method)
mse_ols_test, mse_ridge_test = compute_test_mse(X_val, y_val, beta = beta)
MSE_ols.append(mse_ols_test)
print('The learning rate {} performs best for the OLS' .format(eta[MSE_ols.index(min(MSE_ols))]))
print('Corresponding minimum MSE for OLS: {}'.format(min(MSE_ols)))
plt.semilogx(eta, MSE_ols)
plt.xlabel(r'Learning rate, $\eta$')
plt.ylabel('MSE OLS')
plt.title('Stochastic Gradient Descent')
plt.show()
if scheme == 'separate':
if method == 'ridge':
eta = np.logspace(-5, 0, 10)
for i in eta:
beta = SGD(X_train, y_train, learning_rate=i, lambda_ = 0.01, method = method)
mse_ols_test, mse_ridge_test = compute_test_mse(X_val, y_val, beta = beta)
MSE_ols.append(mse_ridge_test)
print('The learning rate {} performs best for Ridge' .format(eta[MSE_ols.index(min(MSE_ols))]))
print('Corresponding minimum MSE for Ridge: {}'.format(min(MSE_ols)))
plt.plot(eta, MSE_ols)
plt.xlabel(r'Learning rate, $\eta$')
plt.ylabel('MSE Ridge')
plt.title('Stochastic Gradient Descent')
plt.show()
# plot_MSE(method='ridge', scheme = 'joint')
# plot_MSE(method='ols', scheme = 'separate')
# plot_MSE(method='ridge', scheme = 'separate')
####Predict OLS, Ridge on test data after tuning learning rate and lambda on validation data
def plot_scatter(y_true, method = 'ols'):
if method == 'ols':
beta = SGD(X_train, y_train, learning_rate=0.07, lambda_ = 0, method = method, n_epochs=300)
if method == 'ridge':
beta = SGD(X_train, y_train, learning_rate=0.0001, lambda_ = 0, method = method, n_epochs=300)
y_pred = np.dot(X_test, beta)
mse_ols_test, mse_ridge_test = compute_test_mse(X_test, y_true, beta = beta)
print('Test MSE OLS: {}' .format(mse_ols_test))
print('Test MSE Ridge: {}' .format(mse_ridge_test))
a = plt.axes(aspect='equal')
plt.scatter(y_pred, y_pred, color= 'blue', label = "True values")
plt.scatter(y_pred, y_true, color = 'red', label = "Predicted values")
plt.xlabel('True y values')
plt.ylabel('Predicted y')
plt.title(f"Prediction - {method}")
plt.legend()
# if method == 'ols':
# plt.savefig(os.path.join(os.path.dirname(__file__), 'Plots', 'ols_reg_pred.png'), transparent=True, bbox_inches='tight')
# if method == 'ridge':
# plt.savefig(os.path.join(os.path.dirname(__file__), 'Plots', 'ridge_reg_pred.png'), transparent=True, bbox_inches='tight')
plt.show()
plot_scatter(y_test, method='ols')
plot_scatter(y_test, method='ridge')
| cc0-1.0 |
vigilv/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
jorge2703/scikit-learn | examples/preprocessing/plot_function_transformer.py | 160 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
microsoft/onnxruntime | onnxruntime/test/python/transformers/test_parity_decoder_attention.py | 1 | 20955 | # --------------------------------------------------------------------------
# Copyright 2020 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import math
import os
from typing import Dict, List, Optional, Tuple
import numpy
import torch
from torch import Tensor, nn
from torch.nn import functional as F
torch.manual_seed(0)
"""
This is an example of export bart decoder attention with huggingface v3.5.1
def my_bart_attention_forward(
self,
query,
key: Tensor,
key_padding_mask: Optional[Tensor],
layer_state: Optional[List[Tensor]],
attn_mask: Optional[Tensor] = None,
output_attentions: bool=False,
use_past=torch.tensor(False),
):
static_kv: bool = self.encoder_decoder_attention
q_weight = self.q_proj.weight.transpose(0,1)
q_weight = q_weight.reshape(self.embed_dim, self.embed_dim)
kv_weight = torch.stack((self.k_v_proj.k_proj.weight.transpose(0,1), self.k_v_proj.v_proj.weight.transpose(0,1)), dim=1)
kv_weight = kv_weight.reshape(self.embed_dim, 2 * self.embed_dim)
bias = torch.stack((self.q_proj.bias, self.k_v_proj.k_proj.bias, self.k_v_proj.v_proj.bias), dim=0)
bias = bias.reshape(3 * self.embed_dim)
self_p_k, self_p_v, enc_dec_p_k, enc_dec_p_v = layer_state
if static_kv:
key_cache, value_cache = enc_dec_p_k, enc_dec_p_v
else:
key_cache, value_cache = self_p_k, self_p_v
if not static_kv:
key_padding_mask = torch.tensor(False)
attn_output, new_key_cache, new_value_cache = torch.ops.onnxruntime.DecoderAttention(
query,
key,
q_weight,
kv_weight,
bias,
key_padding_mask,
key_cache,
value_cache,
torch.tensor(static_kv), #static_kv
use_past, #use_past
torch.tensor(True), #has_layer_state
torch.tensor(static_kv), #has_key_padding_mask
self.num_heads)
if not use_past:
if self.encoder_decoder_attention:
layer_state[2] = new_key_cache
layer_state[3] = new_value_cache
else:
layer_state[0] = new_key_cache
layer_state[1] = new_value_cache
else:
if not self.encoder_decoder_attention:
layer_state[0] = new_key_cache
layer_state[1] = new_value_cache
attn_output = self.out_proj(attn_output)
return attn_output, None, layer_state
"""
class Config:
batch_size = 0
sequence_length = 0
kv_sequence_length = 0
num_heads = 0
head_size = 0
embed_dim = 0
def __init__(self, b, s, s2, n, h):
self.batch_size = b
self.sequence_length = s
self.kv_sequence_length = s2
self.num_heads = n
self.head_size = h
self.embed_dim = self.num_heads * self.head_size
class AttentionProjection(nn.Module):
def __init__(self, num_heads, head_dim, embed_dim, bias=True):
super().__init__()
self.num_heads = num_heads
self.head_dim = head_dim
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def shape_state(self, state, batch_size):
return state.view(batch_size * self.num_heads, -1, self.head_dim)
def shape_proj(self, proj, batch_size):
return proj.view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key,
layer_state: Optional[List[Tensor]],
encoder_decoder_attention: bool,
use_past=torch.tensor(False),
):
bsz = torch._shape_as_tensor(query)[1]
if layer_state is None or not use_past:
if not encoder_decoder_attention:
k = self.k_proj(query)
v = self.v_proj(query)
k = self.shape_proj(k, bsz)
v = self.shape_proj(v, bsz)
else:
k = self.k_proj(key)
v = self.v_proj(key)
k = self.shape_proj(k, bsz)
v = self.shape_proj(v, bsz)
else:
self_p_k, self_p_v, enc_dec_p_k, enc_dec_p_v = layer_state
if not encoder_decoder_attention:
k = self.k_proj(query)
v = self.v_proj(query)
k = self.shape_proj(k, bsz)
v = self.shape_proj(v, bsz)
k = torch.cat([self.shape_state(self_p_k, bsz), k], dim=1)
v = torch.cat([self.shape_state(self_p_v, bsz), v], dim=1)
else:
k = self.shape_state(enc_dec_p_k, bsz)
v = self.shape_state(enc_dec_p_v, bsz)
return k, v
class AttentionForONNX(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
encoder_decoder_attention=False, # otherwise self_attention
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self.encoder_decoder_attention = encoder_decoder_attention
self.k_v_proj = torch.jit.script(AttentionProjection(num_heads, self.head_dim, embed_dim, bias))
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
def _shape(self, tensor, seq_len, bsz):
return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key: Tensor,
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[List[Tensor]] = None,
attn_mask: Optional[Tensor] = None,
output_attentions: bool = False,
use_past=torch.tensor(False),
has_key_padding_mask: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
static_kv: bool = self.encoder_decoder_attention
tgt_len, bsz, embed_dim = query.size()
# get here for encoder decoder cause of static_kv
k, v = self.k_v_proj(query, key, layer_state, self.encoder_decoder_attention, use_past)
q = self.q_proj(query) * self.scaling
q = self._shape(q, tgt_len, bsz)
# Update cache
if layer_state is not None:
cached_shape = (
bsz,
self.num_heads,
-1,
self.head_dim,
) # bsz must be first for reorder_cache
if static_kv:
# cross-attn
new_key_cache = k.view(*cached_shape)
new_value_cache = v.view(*cached_shape)
else:
# self-attn
new_key_cache = k.view(*cached_shape)
new_value_cache = v.view(*cached_shape)
src_len = k.size(1)
assert key_padding_mask is None or key_padding_mask.shape == (bsz, src_len)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
if has_key_padding_mask: # don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
attn_weights = attn_weights.masked_fill(reshaped, float("-inf"))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = attn_weights
assert v is not None
attn_output = torch.bmm(attn_probs, v)
assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, new_key_cache, new_value_cache
def ORT_forward(
self,
query,
key: Tensor,
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[List[Tensor]] = None,
attn_mask: Optional[Tensor] = None,
output_attentions: bool = False,
use_past=torch.tensor(False),
has_key_padding_mask: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
# For readability
static_kv = True if self.encoder_decoder_attention else False
has_layer_state = True if layer_state is not None else False
use_past_cache = True if use_past else False
q_weight = self.q_proj.weight.transpose(0, 1)
q_weight = q_weight.reshape(self.embed_dim, self.embed_dim)
kv_weight = torch.stack(
(
self.k_v_proj.k_proj.weight.transpose(0, 1),
self.k_v_proj.v_proj.weight.transpose(0, 1),
),
dim=1,
)
kv_weight = kv_weight.reshape(self.embed_dim, 2 * self.embed_dim)
bias = torch.stack(
(self.q_proj.bias, self.k_v_proj.k_proj.bias, self.k_v_proj.v_proj.bias),
dim=0,
)
bias = bias.reshape(3 * self.embed_dim)
onnx_model_str = create_decoder_attention_graph(
query,
key,
q_weight,
kv_weight,
bias,
self.num_heads,
static_kv,
use_past_cache,
has_layer_state,
has_key_padding_mask,
)
self_p_k, self_p_v, enc_dec_p_k, enc_dec_p_v = layer_state
if self.encoder_decoder_attention:
key_cache, value_cache = enc_dec_p_k, enc_dec_p_v
else:
key_cache, value_cache = self_p_k, self_p_v
ort_inputs = {
"query": numpy.ascontiguousarray(query.cpu().numpy()),
"key": numpy.ascontiguousarray(key.cpu().numpy()),
"key_padding_mask": numpy.ascontiguousarray(key_padding_mask.cpu().numpy()),
"key_cache": numpy.ascontiguousarray(key_cache.detach().cpu().numpy()),
"value_cache": numpy.ascontiguousarray(value_cache.detach().cpu().numpy()),
}
from onnxruntime import InferenceSession, SessionOptions
sess_options = SessionOptions()
ort_session = InferenceSession(onnx_model_str, sess_options, providers=["CUDAExecutionProvider"])
ort_output = ort_session.run(None, ort_inputs)
output, new_key_cache, new_value_cache = ort_output
output = torch.tensor(output)
attn_output = self.out_proj(output)
return attn_output, torch.tensor(new_key_cache), torch.tensor(new_value_cache)
def create_decoder_attention_graph(
query,
key,
q_weight,
kv_weight,
bias,
num_heads_,
static_kv,
use_past,
has_layer_state,
has_key_padding_mask,
):
from onnx import TensorProto, helper
S, B, NH = query.size()
S2 = key.size()[0]
N = num_heads_
H = int(NH / N)
nodes = [
helper.make_node(
"DecoderAttention",
[
"query",
"key",
"q_weight",
"kv_weight",
"bias",
"key_padding_mask",
"key_cache",
"value_cache",
"static_kv",
"use_past",
"has_layer_state",
"has_key_padding_mask",
],
["output", "new_key_cache", "new_value_cache"],
"DecoderAttention_0",
num_heads=num_heads_,
domain="com.microsoft",
),
]
initializers = [
helper.make_tensor("q_weight", TensorProto.FLOAT, [NH, NH], q_weight.flatten().tolist()),
helper.make_tensor("kv_weight", TensorProto.FLOAT, [NH, 2 * NH], kv_weight.flatten().tolist()),
helper.make_tensor("bias", TensorProto.FLOAT, [3 * NH], bias.flatten().tolist()),
helper.make_tensor("static_kv", TensorProto.BOOL, [1], [static_kv]),
helper.make_tensor("use_past", TensorProto.BOOL, [1], [use_past]),
helper.make_tensor("has_layer_state", TensorProto.BOOL, [1], [has_layer_state]),
helper.make_tensor("has_key_padding_mask", TensorProto.BOOL, [1], [has_key_padding_mask]),
]
graph = helper.make_graph(
nodes,
"DecoderAttention_Graph",
[
helper.make_tensor_value_info("query", TensorProto.FLOAT, [S, B, NH]),
helper.make_tensor_value_info("key", TensorProto.FLOAT, [S2, B, NH]),
helper.make_tensor_value_info("key_padding_mask", TensorProto.BOOL, [B, "mask_len"]),
helper.make_tensor_value_info("key_cache", TensorProto.FLOAT, [B, N, "cache_len", H]),
helper.make_tensor_value_info("value_cache", TensorProto.FLOAT, [B, N, "cache_len", H]),
],
[
helper.make_tensor_value_info("output", TensorProto.FLOAT, [S, B, NH]),
helper.make_tensor_value_info("new_key_cache", TensorProto.FLOAT, [B, N, "new_cache_len", H]),
helper.make_tensor_value_info("new_value_cache", TensorProto.FLOAT, [B, N, "new_cache_len", H]),
],
initializers,
)
model = helper.make_model(graph)
return model.SerializeToString()
def create_inputs(
config: Config,
has_layer_state: bool,
use_past: bool,
encoder_decoder_attention: bool,
):
query = torch.normal(
mean=0.0,
std=0.1,
size=(config.sequence_length, config.batch_size, config.embed_dim),
).to(torch.float32)
key = torch.normal(
mean=0.0,
std=0.1,
size=(config.kv_sequence_length, config.batch_size, config.embed_dim),
).to(torch.float32)
key_length = None
if not has_layer_state or not use_past:
if not encoder_decoder_attention:
key_length = config.sequence_length
else:
key_length = config.kv_sequence_length
else:
if not encoder_decoder_attention:
key_length = config.sequence_length + config.kv_sequence_length
else:
key_length = config.kv_sequence_length
key_padding_mask = torch.normal(mean=0.0, std=0.1, size=(config.batch_size, key_length)) > 0
# The following line ensure not all the mask are true
key_padding_mask[0][0] = False
cache = torch.normal(
mean=0.0,
std=0.1,
size=(
config.batch_size,
config.num_heads,
config.kv_sequence_length,
config.head_size,
),
).to(torch.float32)
layer_state = [cache, cache, cache, cache]
return query, key, key_padding_mask, layer_state, torch.tensor(use_past)
def parity_check(
config,
has_layer_state,
use_past,
static_kv,
has_key_padding_mask,
rtol=1e-4,
atol=1e-4,
):
query, key, key_padding_mask, layer_state, use_past = create_inputs(config, has_layer_state, use_past, static_kv)
attn = AttentionForONNX(config.embed_dim, config.num_heads, encoder_decoder_attention=static_kv)
attn_output, new_key_cache, new_value_cache = attn.forward(
query,
key,
key_padding_mask,
layer_state,
None,
False,
use_past,
has_key_padding_mask,
)
attn_output_ort, new_key_cache_ort, new_value_cache_ort = attn.ORT_forward(
query,
key,
key_padding_mask,
layer_state,
None,
False,
use_past,
has_key_padding_mask,
)
attn_output_ort_1, _, _ = attn.ORT_forward(
query,
key,
key_padding_mask,
layer_state,
None,
False,
use_past,
has_key_padding_mask,
)
print(
" B:",
config.batch_size,
" S:",
config.sequence_length,
" S*:",
config.kv_sequence_length,
" h:",
config.embed_dim,
" has_layer_state:",
has_layer_state,
" use_past:",
use_past,
" static_kv:",
static_kv,
" has_key_padding_mask:",
has_key_padding_mask,
"[attn_output, randomness, key, value] parity:",
numpy.allclose(
attn_output.detach().numpy(),
attn_output_ort.detach().numpy(),
rtol=rtol,
atol=atol,
equal_nan=True,
),
numpy.allclose(
attn_output_ort_1.detach().numpy(),
attn_output_ort.detach().numpy(),
rtol=rtol,
atol=atol,
equal_nan=True,
),
numpy.allclose(
new_key_cache.detach().numpy(),
new_key_cache_ort.detach().numpy(),
rtol=rtol,
atol=atol,
equal_nan=True,
),
numpy.allclose(
new_value_cache.detach().numpy(),
new_value_cache_ort.detach().numpy(),
rtol=rtol,
atol=atol,
equal_nan=True,
),
)
if __name__ == "__main__":
for b in [1, 32, 128]:
for s in [1, 2, 128]:
for s2 in [1, 64, 256]:
for n in [8]:
for h in [64]:
config = Config(b, s, s2, n, h)
parity_check(
config,
has_layer_state=True,
use_past=True,
static_kv=True,
has_key_padding_mask=False,
)
parity_check(
config,
has_layer_state=True,
use_past=True,
static_kv=False,
has_key_padding_mask=False,
)
parity_check(
config,
has_layer_state=True,
use_past=False,
static_kv=True,
has_key_padding_mask=False,
)
parity_check(
config,
has_layer_state=True,
use_past=False,
static_kv=False,
has_key_padding_mask=False,
)
parity_check(
config,
has_layer_state=True,
use_past=True,
static_kv=True,
has_key_padding_mask=True,
)
parity_check(
config,
has_layer_state=True,
use_past=True,
static_kv=False,
has_key_padding_mask=True,
)
parity_check(
config,
has_layer_state=True,
use_past=False,
static_kv=True,
has_key_padding_mask=True,
)
parity_check(
config,
has_layer_state=True,
use_past=False,
static_kv=False,
has_key_padding_mask=True,
)
| mit |
vigilv/scikit-learn | examples/preprocessing/plot_function_transformer.py | 160 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
williamFalcon/pytorch-lightning | pytorch_lightning/trainer/connectors/checkpoint_connector.py | 1 | 20162 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from pathlib import Path
from typing import Any, Dict, Optional, Union
import torch
from torchmetrics import Metric
import pytorch_lightning as pl
from pytorch_lightning.loops.fit_loop import FitLoop
from pytorch_lightning.utilities import _OMEGACONF_AVAILABLE, rank_zero_deprecation, rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.cloud_io import atomic_save, get_filesystem
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.upgrade_checkpoint import KEYS_MAPPING as DEPRECATED_CHECKPOINT_KEYS
if _OMEGACONF_AVAILABLE:
from omegaconf import Container
class CheckpointConnector:
def __init__(self, trainer, resume_from_checkpoint: Optional[Union[str, Path]] = None):
self.trainer = trainer
self.resume_checkpoint_path = resume_from_checkpoint
self._loaded_checkpoint = {}
@property
def hpc_resume_path(self) -> Optional[str]:
dir_path_hpc = str(self.trainer.weights_save_path)
max_version = self.max_ckpt_version_in_folder(dir_path_hpc, "hpc_ckpt_")
if max_version is not None:
return os.path.join(dir_path_hpc, f"hpc_ckpt_{max_version}.ckpt")
def resume_start(self) -> None:
"""
Attempts to pre-load the checkpoint file to memory, with the source path determined in this priority:
1. from HPC weights if found
2. from `resume_from_checkpoint` file if provided
3. don't restore
Raises:
FileNotFoundError: If the path to the checkpoint file is provided but the file does not exist.
"""
self.resume_checkpoint_path = self.hpc_resume_path or self.resume_checkpoint_path
checkpoint_path = self.resume_checkpoint_path
if not checkpoint_path:
return
rank_zero_info(f"Restoring states from the checkpoint path at {checkpoint_path}")
self._loaded_checkpoint = self.trainer.training_type_plugin.load_checkpoint(checkpoint_path)
def resume_end(self) -> None:
"""Signal the connector that all states have resumed and memory for the checkpoint object can be released."""
if self.resume_checkpoint_path:
rank_zero_info(f"Restored all states from the checkpoint file at {self.resume_checkpoint_path}")
self.resume_checkpoint_path = None
self._loaded_checkpoint = {}
# clear cache after restore
torch.cuda.empty_cache()
# wait for all to catch up
self.trainer.training_type_plugin.barrier("CheckpointConnector.resume_end")
def restore(self, checkpoint_path: Optional[Union[Path, str]] = None) -> None:
"""
Attempt to restore everything at once from a 'PyTorch-Lightning checkpoint' file
through file-read and state-restore, in this priority:
1. from HPC weights if found
2. from `resume_from_checkpoint` file if provided
3. don't restore
All restored states are listed in return value description of `dump_checkpoint`.
Args:
checkpoint_path: Path to a PyTorch Lightning checkpoint file.
"""
self.resume_checkpoint_path = checkpoint_path
self.resume_start()
# restore module states
self.restore_datamodule()
self.restore_model()
# restore callback states
self.restore_callbacks()
# restore training state
self.restore_training_state()
self.resume_end()
def restore_datamodule(self) -> None:
"""Calls hooks on the datamodule to give it a chance to restore its state from the checkpoint."""
if not self._loaded_checkpoint:
return
datamodule = self.trainer.datamodule
if datamodule is not None:
datamodule.on_load_checkpoint(self._loaded_checkpoint)
def restore_model(self) -> None:
"""
Restores a model's weights from a PyTorch Lightning checkpoint. Hooks are called first go give
the LightningModule a chance to modify the contents, then finally the model gets updated with
the loaded weights.
"""
if not self._loaded_checkpoint:
return
model = self.trainer.lightning_module
# hook: give user access to checkpoint if needed.
model.on_load_checkpoint(self._loaded_checkpoint)
# call hpc specific hook
if self.hpc_resume_path is not None:
model.on_hpc_load(self._loaded_checkpoint)
# restore model state_dict
self.trainer.training_type_plugin.load_model_state_dict(self._loaded_checkpoint)
# reset metrics states on non-rank 0 as all states have been accumulated on rank 0 via syncing on checkpointing.
if not self.trainer.is_global_zero:
for module in self.trainer.lightning_module.modules():
if isinstance(module, Metric):
module.reset()
def restore_model_weights(self, checkpoint_path: Optional[Union[str, Path]]) -> None:
"""Restore only the model weights."""
checkpoint = self._loaded_checkpoint
if checkpoint_path is not None:
checkpoint = self.trainer.training_type_plugin.load_checkpoint(checkpoint_path)
self.trainer.lightning_module.on_load_checkpoint(checkpoint)
self.trainer.training_type_plugin.load_model_state_dict(checkpoint)
def restore_training_state(self) -> None:
"""
Restore the trainer state from the pre-loaded checkpoint. This includes the precision settings, loop progress,
optimizer states and learning rate scheduler states.
"""
if not self._loaded_checkpoint:
return
# restore precision plugin (scaler etc.)
self.trainer.precision_plugin.on_load_checkpoint(self._loaded_checkpoint)
# restore loops and their progress
self.restore_loops()
self.restore_optimizers_and_schedulers()
def restore_callbacks(self) -> None:
"""Restores all callbacks from the pre-loaded checkpoint."""
if not self._loaded_checkpoint:
return
if any(key in self._loaded_checkpoint for key in DEPRECATED_CHECKPOINT_KEYS):
raise ValueError(
"The checkpoint you're attempting to load follows an"
" outdated schema. You can upgrade to the current schema by running"
" `python -m pytorch_lightning.utilities.upgrade_checkpoint --file model.ckpt`"
" where `model.ckpt` is your checkpoint file."
)
self.trainer.on_load_checkpoint(self._loaded_checkpoint)
def restore_loops(self) -> None:
"""
Restores the loop progress from the pre-loaded checkpoint.
Calls hooks on the loops to give it a chance to restore its state from the checkpoint.
"""
if not self._loaded_checkpoint:
return
self.trainer.fit_loop.global_step = self._loaded_checkpoint["global_step"]
self.trainer.fit_loop.current_epoch = self._loaded_checkpoint["epoch"]
# crash if max_epochs is lower then the current epoch from the checkpoint
if (
FitLoop._is_max_limit_enabled(self.trainer.max_epochs)
and self.trainer.current_epoch > self.trainer.max_epochs
):
raise MisconfigurationException(
f"You restored a checkpoint with current_epoch={self.trainer.current_epoch},"
f" but you have set Trainer(max_epochs={self.trainer.max_epochs})."
)
# Division deals with global step stepping once per accumulated batch
# Inequality deals with different global step for odd vs even num_training_batches
self.trainer.accumulate_grad_batches = self.trainer.accumulation_scheduler.get_accumulate_grad_batches(
self.trainer.current_epoch
)
n_accum = 1 if self.trainer.accumulate_grad_batches is None else self.trainer.accumulate_grad_batches
expected_steps = self.trainer.num_training_batches / n_accum
if self.trainer.num_training_batches != 0 and self.trainer.global_step % expected_steps > 1:
rank_zero_warn(
"You're resuming from a checkpoint that ended mid-epoch."
" Training will start from the beginning of the next epoch."
" This can cause unreliable results if further training is done,"
" consider using an end of epoch checkpoint."
)
state_dict = self._loaded_checkpoint.get("loops")
if state_dict:
self.trainer.fit_loop.load_state_dict(state_dict["fit_loop"])
self.trainer.validate_loop.load_state_dict(state_dict["validate_loop"])
self.trainer.test_loop.load_state_dict(state_dict["test_loop"])
self.trainer.predict_loop.load_state_dict(state_dict["predict_loop"])
def restore_optimizers_and_schedulers(self) -> None:
"""Restores the optimizers and learning rate scheduler states from the pre-loaded checkpoint."""
if (
not self._loaded_checkpoint
or not self.trainer.training_type_plugin.lightning_restore_optimizer_and_schedulers
):
return
# validation
if "optimizer_states" not in self._loaded_checkpoint or "lr_schedulers" not in self._loaded_checkpoint:
raise KeyError(
"Trying to restore training state but checkpoint contains only the model."
" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."
)
self.restore_optimizers()
self.restore_lr_schedulers()
def restore_optimizers(self) -> None:
"""Restores the optimizer states from the pre-loaded checkpoint."""
if not self._loaded_checkpoint:
return
# restore the optimizers
self.trainer.training_type_plugin.load_optimizer_state_dict(self._loaded_checkpoint)
for optimizer in self.trainer.optimizers:
# move optimizer to GPU 1 weight at a time
# avoids OOM
if self.trainer.root_gpu is not None:
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(self.trainer.root_gpu)
def restore_lr_schedulers(self) -> None:
"""Restores the learning rate scheduler states from the pre-loaded checkpoint."""
if not self._loaded_checkpoint:
return
# restore the lr schedulers
lr_schedulers = self._loaded_checkpoint["lr_schedulers"]
for scheduler, lrs_state in zip(self.trainer.lr_schedulers, lr_schedulers):
scheduler["scheduler"].load_state_dict(lrs_state)
# ----------------------------------
# PRIVATE OPS
# ----------------------------------
def hpc_save(self, folderpath: str, logger):
# make sure the checkpoint folder exists
folderpath = str(folderpath) # because the tests pass a path object
fs = get_filesystem(folderpath)
fs.makedirs(folderpath, exist_ok=True)
# save logger to make sure we get all the metrics
logger.save()
max_suffix = self.max_ckpt_version_in_folder(folderpath)
ckpt_number = (max_suffix if max_suffix is not None else 0) + 1
fs.makedirs(folderpath, exist_ok=True)
filepath = os.path.join(folderpath, f"hpc_ckpt_{ckpt_number}.ckpt")
# give model a chance to do something on hpc_save
model = self.trainer.lightning_module
checkpoint = self.dump_checkpoint()
model.on_hpc_save(checkpoint)
# do the actual save
# TODO: fix for anything with multiprocess DP, DDP, DDP2
try:
atomic_save(checkpoint, filepath)
except AttributeError as err:
if pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:
del checkpoint[pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
rank_zero_warn(f"warning, `hyper_parameters` dropped from checkpoint. An attribute is not picklable {err}")
atomic_save(checkpoint, filepath)
return filepath
def dump_checkpoint(self, weights_only: bool = False) -> dict:
"""Creating a model checkpoint dictionary object from various component states.
Args:
weights_only: saving model weights only
Return:
structured dictionary: {
'epoch': training epoch
'global_step': training global step
'pytorch-lightning_version': The version of PyTorch Lightning that produced this checkpoint
'callbacks': "callback specific state"[] # if not weights_only
'optimizer_states': "PT optim's state_dict"[] # if not weights_only
'lr_schedulers': "PT sched's state_dict"[] # if not weights_only
'native_amp_scaling_state': PT amp's state_dict # if not weights_only and use native amp
'amp_scaling_state': Apex's state_dict # if not weights_only and use apex amp
'state_dict': Model's state_dict (e.g. network weights)
CHECKPOINT_HYPER_PARAMS_NAME:
CHECKPOINT_HYPER_PARAMS_KEY:
CHECKPOINT_HYPER_PARAMS_TYPE:
something_cool_i_want_to_save: anything you define through model.on_save_checkpoint
LightningDataModule.__class__.__name__: pl DataModule's state
}
"""
# dump epoch/global_step/pytorch-lightning_version
current_epoch = self.trainer.current_epoch
global_step = self.trainer.global_step
has_reached_max_steps = self.trainer.max_steps and self.trainer.max_steps <= global_step
global_step += 1
if not has_reached_max_steps:
current_epoch += 1
model = self.trainer.lightning_module
checkpoint = {
"epoch": current_epoch,
"global_step": global_step,
"pytorch-lightning_version": pl.__version__,
"state_dict": self._get_lightning_module_state_dict(),
}
if _fault_tolerant_training():
checkpoint["loops"] = self._get_loops_state_dict()
if not weights_only:
# dump callbacks
checkpoint["callbacks"] = self.trainer.on_save_checkpoint(checkpoint)
optimizer_states = []
for i, optimizer in enumerate(self.trainer.optimizers):
# Rely on accelerator to dump optimizer state
optimizer_state = self.trainer.accelerator.optimizer_state(optimizer)
optimizer_states.append(optimizer_state)
checkpoint["optimizer_states"] = optimizer_states
# dump lr schedulers
lr_schedulers = []
for scheduler in self.trainer.lr_schedulers:
lr_schedulers.append(scheduler["scheduler"].state_dict())
checkpoint["lr_schedulers"] = lr_schedulers
self.trainer.precision_plugin.on_save_checkpoint(checkpoint)
# dump hyper-parameters
if model.hparams:
if hasattr(model, "_hparams_name"):
checkpoint[pl.LightningModule.CHECKPOINT_HYPER_PARAMS_NAME] = model._hparams_name
# dump arguments
if _OMEGACONF_AVAILABLE and isinstance(model.hparams, Container):
checkpoint[pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] = model.hparams
checkpoint[pl.LightningModule.CHECKPOINT_HYPER_PARAMS_TYPE] = type(model.hparams)
else:
checkpoint[pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] = dict(model.hparams)
# give the model a chance to dump a few things
model.on_save_checkpoint(checkpoint)
if self.trainer.datamodule is not None:
self.trainer.datamodule.on_save_checkpoint(checkpoint)
return checkpoint
def hpc_load(self, checkpoint_path: str) -> None:
"""
Attempts to restore the full training and model state from a HPC checkpoint file.
.. deprecated::v1.4
Will be removed in v1.6. Use :meth:`restore` instead.
"""
rank_zero_deprecation(
"`CheckpointConnector.hpc_load()` was deprecated in v1.4 and will be removed in v1.6."
" Use `CheckpointConnector.restore()` instead."
)
self.restore(checkpoint_path)
def max_ckpt_version_in_folder(self, dir_path: Union[str, Path], name_key: str = "ckpt_") -> Optional[int]:
"""List up files in `dir_path` with `name_key`, then yield maximum suffix number.
Args:
dir_path: path of directory which may contain files whose name include `name_key`
name_key: file name prefix
Returns:
None if no-corresponding-file else maximum suffix number
"""
# check directory existence
fs = get_filesystem(dir_path)
if not fs.exists(dir_path):
return None
# check corresponding file existence
files = [os.path.basename(f["name"]) for f in fs.listdir(dir_path)]
files = [x for x in files if name_key in x]
if len(files) == 0:
return None
# extract suffix number
ckpt_vs = []
for name in files:
name = name.split(name_key)[-1]
name = re.sub("[^0-9]", "", name)
ckpt_vs.append(int(name))
return max(ckpt_vs)
def get_max_ckpt_path_from_folder(self, folder_path: Union[str, Path]) -> str:
"""Get path of maximum-epoch checkpoint in the folder."""
max_suffix = self.max_ckpt_version_in_folder(folder_path)
ckpt_number = max_suffix if max_suffix is not None else 0
return f"{folder_path}/hpc_ckpt_{ckpt_number}.ckpt"
def save_checkpoint(self, filepath, weights_only: bool = False) -> None:
"""Save model/training states as a checkpoint file through state-dump and file-write.
Args:
filepath: write-target file's path
weights_only: saving model weights only
"""
_checkpoint = self.dump_checkpoint(weights_only)
self.trainer.accelerator.save_checkpoint(_checkpoint, filepath)
def _get_lightning_module_state_dict(self) -> Dict[str, torch.Tensor]:
metrics = (
[m for m in self.trainer.lightning_module.modules() if isinstance(m, Metric)]
if _fault_tolerant_training()
else []
)
for metric in metrics:
metric.persistent(True)
metric.sync()
state_dict = self.trainer.accelerator.lightning_module_state_dict()
for metric in metrics:
# sync can be a no-op (e.g. on cpu) so `unsync` would raise a user error exception if we don't check
if metric._is_synced:
metric.unsync()
return state_dict
def _get_loops_state_dict(self) -> Dict[str, Any]:
return {
"fit_loop": self.trainer.fit_loop.state_dict(),
"validate_loop": self.trainer.validate_loop.state_dict(),
"test_loop": self.trainer.test_loop.state_dict(),
"predict_loop": self.trainer.predict_loop.state_dict(),
}
| apache-2.0 |
williamFalcon/pytorch-lightning | pytorch_lightning/trainer/optimizers.py | 1 | 10658 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Any, Dict, List, Optional, Tuple
import torch
from torch import optim
from torch.optim.optimizer import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class TrainerOptimizersMixin(ABC):
_lightning_optimizers: Optional[List[LightningOptimizer]]
def init_optimizers(self, model: Optional["pl.LightningModule"]) -> Tuple[List, List, List]:
pl_module = self.lightning_module or model
self._lightning_optimizers = None
optim_conf = self.call_hook("configure_optimizers", pl_module=pl_module)
if optim_conf is None:
rank_zero_warn(
"`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer",
UserWarning,
)
optim_conf = _MockOptimizer()
optimizers, lr_schedulers, optimizer_frequencies = [], [], []
monitor = None
# single output, single optimizer
if isinstance(optim_conf, Optimizer):
optimizers = [optim_conf]
# two lists, optimizer + lr schedulers
elif (
isinstance(optim_conf, (list, tuple))
and len(optim_conf) == 2
and isinstance(optim_conf[0], list)
and all(isinstance(opt, Optimizer) for opt in optim_conf[0])
):
opt, sch = optim_conf
optimizers = opt
lr_schedulers = sch if isinstance(sch, list) else [sch]
# single dictionary
elif isinstance(optim_conf, dict):
optimizers = [optim_conf["optimizer"]]
monitor = optim_conf.get("monitor", None)
lr_schedulers = [optim_conf["lr_scheduler"]] if "lr_scheduler" in optim_conf else []
# multiple dictionaries
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(d, dict) for d in optim_conf):
optimizers = [opt_dict["optimizer"] for opt_dict in optim_conf]
scheduler_dict = (
lambda scheduler, opt_idx: dict(scheduler, opt_idx=opt_idx)
if isinstance(scheduler, dict)
else {"scheduler": scheduler, "opt_idx": opt_idx}
)
lr_schedulers = [
scheduler_dict(opt_dict["lr_scheduler"], opt_idx)
for opt_idx, opt_dict in enumerate(optim_conf)
if "lr_scheduler" in opt_dict
]
optimizer_frequencies = [
opt_dict["frequency"] for opt_dict in optim_conf if opt_dict.get("frequency", None) is not None
]
# assert that if frequencies are present, they are given for all optimizers
if optimizer_frequencies and len(optimizer_frequencies) != len(optimizers):
raise ValueError("A frequency must be given to each optimizer.")
# single list or tuple, multiple optimizer
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(opt, Optimizer) for opt in optim_conf):
optimizers = list(optim_conf)
# unknown configuration
else:
raise MisconfigurationException(
"Unknown configuration for model optimizers."
" Output from `model.configure_optimizers()` should either be:\n"
" * `torch.optim.Optimizer`\n"
" * [`torch.optim.Optimizer`]\n"
" * ([`torch.optim.Optimizer`], [`torch.optim.lr_scheduler`])\n"
' * {"optimizer": `torch.optim.Optimizer`, (optional) "lr_scheduler": `torch.optim.lr_scheduler`}\n'
' * A list of the previously described dict format, with an optional "frequency" key (int)'
)
is_manual_optimization = not pl_module.automatic_optimization
lr_schedulers = self.configure_schedulers(lr_schedulers, monitor, is_manual_optimization)
_validate_scheduler_optimizer(optimizers, lr_schedulers)
return optimizers, lr_schedulers, optimizer_frequencies
def convert_to_lightning_optimizers(self):
def _convert_to_lightning_optimizer(trainer, optimizer):
if not isinstance(optimizer, LightningOptimizer):
optimizer = LightningOptimizer(optimizer)
optimizer._on_trainer_init(trainer)
return optimizer
self._lightning_optimizers = {
opt_idx: _convert_to_lightning_optimizer(self, opt) for opt_idx, opt in enumerate(self.optimizers)
}
def configure_schedulers(
self, schedulers: list, monitor: Optional[str], is_manual_optimization: bool
) -> List[Dict[str, Any]]:
"""Convert each scheduler into dict structure with relevant information"""
lr_schedulers = []
default_config = _get_default_scheduler_config()
for scheduler in schedulers:
if is_manual_optimization:
if isinstance(scheduler, dict):
invalid_keys = {"interval", "frequency", "reduce_on_plateau", "monitor", "strict"}
keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys]
if keys_to_warn:
rank_zero_warn(
f"The lr scheduler dict contains the key(s) {keys_to_warn}, but the keys will be ignored."
" You need to call `lr_scheduler.step()` manually in manual optimization.",
RuntimeWarning,
)
scheduler = {key: scheduler[key] for key in scheduler if key not in invalid_keys}
lr_schedulers.append({**default_config, **scheduler})
else:
lr_schedulers.append({**default_config, "scheduler": scheduler})
else:
if isinstance(scheduler, dict):
# check provided keys
extra_keys = [k for k in scheduler.keys() if k not in default_config.keys()]
if extra_keys:
rank_zero_warn(f"Found unsupported keys in the lr scheduler dict: {extra_keys}", RuntimeWarning)
if "scheduler" not in scheduler:
raise MisconfigurationException(
'The lr scheduler dict must have the key "scheduler" with its item being an lr scheduler'
)
if "interval" in scheduler and scheduler["interval"] not in ("step", "epoch"):
raise MisconfigurationException(
'The "interval" key in lr scheduler dict must be "step" or "epoch"'
f' but is "{scheduler["interval"]}"'
)
scheduler["reduce_on_plateau"] = isinstance(
scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau
)
if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:
raise MisconfigurationException(
"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."
' For example: {"optimizer": optimizer, "lr_scheduler":'
' {"scheduler": scheduler, "monitor": "your_loss"}}'
)
lr_schedulers.append({**default_config, **scheduler})
elif isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
if monitor is None:
raise MisconfigurationException(
"`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"
" scheduler is used. For example:"
' {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}'
)
lr_schedulers.append(
{**default_config, "scheduler": scheduler, "reduce_on_plateau": True, "monitor": monitor}
)
elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):
lr_schedulers.append({**default_config, "scheduler": scheduler})
else:
raise ValueError(f'The provided lr scheduler "{scheduler}" is invalid')
return lr_schedulers
class _MockOptimizer(Optimizer):
"""The `_MockOptimizer` will be used inplace of an optimizer in the event that `None`
is returned from `configure_optimizers`.
"""
def __init__(self):
super().__init__([torch.zeros(1)], {})
def add_param_group(self, param_group):
pass # Do Nothing
def load_state_dict(self, state_dict):
pass # Do Nothing
def state_dict(self):
return {} # Return Empty
def step(self, closure=None):
if closure is not None:
closure()
def zero_grad(self):
pass # Do Nothing
def __repr__(self):
return "No Optimizer"
def _validate_scheduler_optimizer(optimizers, lr_schedulers):
if any(sch["scheduler"].optimizer not in optimizers for sch in lr_schedulers):
raise MisconfigurationException(
"Some schedulers are attatched with an optimizer that wasn't returned from `configure_optimizers`."
)
def _get_default_scheduler_config() -> Dict[str, Any]:
return {
"scheduler": None,
"name": None, # no custom name
"interval": "epoch", # after epoch is over
"frequency": 1, # every epoch/batch
"reduce_on_plateau": False, # most often not ReduceLROnPlateau scheduler
"monitor": None, # value to monitor for ReduceLROnPlateau
"strict": True, # enforce that the monitor exists for ReduceLROnPlateau
"opt_idx": None, # necessary to store opt_idx when optimizer frequencies are specified
}
| apache-2.0 |
williamFalcon/pytorch-lightning | tests/helpers/test_datasets.py | 1 | 1238 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import cloudpickle
import pytest
from tests import _PATH_DATASETS
from tests.helpers.datasets import AverageDataset, MNIST, TrialMNIST
@pytest.mark.parametrize(
"dataset_cls,args",
[(MNIST, dict(root=_PATH_DATASETS)), (TrialMNIST, dict(root=_PATH_DATASETS)), (AverageDataset, {})],
)
def test_pickling_dataset_mnist(tmpdir, dataset_cls, args):
mnist = dataset_cls(**args)
mnist_pickled = pickle.dumps(mnist)
pickle.loads(mnist_pickled)
# assert vars(mnist) == vars(mnist_loaded)
mnist_pickled = cloudpickle.dumps(mnist)
cloudpickle.loads(mnist_pickled)
# assert vars(mnist) == vars(mnist_loaded)
| apache-2.0 |
CAB-LAB/cablab-cubeio | esdl/cube_provider.py | 2 | 22425 | import glob
import os.path
import time
from abc import ABCMeta, abstractmethod, abstractproperty
from datetime import datetime
import gridtools.resampling as gtr
import netCDF4
import numpy as np
from typing import Tuple, Dict, Any
from .cube_config import CubeConfig
from .util import Config, NetCDFDatasetCache, aggregate_images, temporal_weight
def _get_us_method(var_attributes):
return gtr.__dict__['US_' + var_attributes.get('us_method', 'NEAREST')]
def _get_ds_method(var_attributes):
return gtr.__dict__['DS_' + var_attributes.get('ds_method', 'MEAN')]
class CubeSourceProvider(metaclass=ABCMeta):
"""
An abstract interface for objects representing data source providers for the data cube.
Cube source providers are passed to the :py:meth:`Cube.update` method.
:param cube_config: Specifies the fixed layout and conventions used for the cube.
:param name: The provider's registration name.
"""
def __init__(self, cube_config: CubeConfig, name: str):
if not cube_config:
raise ValueError('cube_config expected')
if not name:
raise ValueError('name expected')
self._name = name
self._cube_config = cube_config
@property
def name(self) -> str:
""" The provider's registration name. """
return self._name
@property
def cube_config(self) -> CubeConfig:
""" The data cube's configuration. """
return self._cube_config
@abstractmethod
def prepare(self):
"""
Called by a Cube instance's :py:meth:`update` method before any other provider methods are called.
Provider instances should prepare themselves w.r.t. the given cube configuration *cube_config*.
"""
pass
@abstractproperty
def temporal_coverage(self) -> Tuple[datetime, datetime]:
"""
Return the start and end time of the available source data.
:return: A tuple of datetime.datetime instances (start_time, end_time).
"""
return None
@abstractproperty
def spatial_coverage(self) -> Tuple[int, int, int, int]:
"""
Return the spatial coverage as a rectangle represented by a tuple of integers (x, y, width, height) in the
cube's image coordinates.
:return: A tuple of integers (x, y, width, height) in the cube's image coordinates.
"""
return None
@abstractproperty
def variable_descriptors(self) -> Dict[str, Dict[str, Any]]:
"""
Return a dictionary which maps target(!) variable names to a dictionary of target attribute values.
The following attributes have a special meaning and shall or should be provided:
* ``data_type``: A numpy data type. Mandatory attribute.
* ``fill_value``: The value used for indicating missing grid cells. Mandatory attribute.
* ``source_name``: the name of the variable in the source (files).
Optional, defaults to the target variable's name.
* ``scale_factor``: See CF conventions. Optional, defaults to one (``1.0``).
* ``add_offset``: See CF conventions. Optional, defaults to zero (``0.0``).
* ``units``: See CF conventions. Optional.
* ``standard_name``: See CF conventions. Optional.
* ``long_name``: See CF conventions. Optional.
:return: dictionary of variable names to attribute dictionaries
"""
return None
@abstractmethod
def compute_variable_images(self, period_start: datetime, period_end: datetime) -> Dict[str, np.ndarray]:
"""
Return variable name to variable image mapping of all provided variables.
Each image is a numpy array with the shape (height, width) derived from the :py:meth:`get_spatial_coverage`
method.
The images must be computed (by aggregation or interpolation or copy) from the source data in the given
time period *period_start* <= source_data_time < *period_end* and taking into account other data cube
configuration settings.
The method is called by a Cube instance's :py:meth:`update` method for all possible time periods in the time
range given by the :py:meth:`get_temporal_coverage` method. The times given are adjusted w.r.t. the cube's
reference time and temporal resolution.
:param period_start: The period start time as a datetime.datetime instance
:param period_end: The period end time as a datetime.datetime instance
:return: A dictionary variable name --> image. Each image must be numpy array-like object of shape
(grid_height, grid_width) as given by the :py:class:`CubeConfig`.
Return ``None`` if no such variables exists for the given target time range.
"""
return None
@abstractmethod
def close(self):
"""
Called by the cube's :py:meth:`update` method after all images have been retrieved and the provider is no
longer used.
"""
pass
class BaseCubeSourceProvider(CubeSourceProvider, metaclass=ABCMeta):
"""
A partial implementation of the :py:class:`CubeSourceProvider` interface that computes its output image data
using weighted averages. The weights are computed according to the overlap of source time ranges and a
requested target time range.
:param cube_config: Specifies the fixed layout and conventions used for the cube.
:param name: The provider's registration name.
"""
def __init__(self, cube_config: CubeConfig, name: str):
super(BaseCubeSourceProvider, self).__init__(cube_config, name)
self._source_time_ranges = None
def prepare(self):
"""
Calls **compute_source_time_ranges** and assigns the return value to the field **source_time_ranges**.
"""
self._source_time_ranges = self.compute_source_time_ranges()
@property
def source_time_ranges(self):
return self._source_time_ranges
@property
def spatial_coverage(self):
"""
Return the spatial grid coverage given in the Cube's configuration (default).
:return: A tuple of integers (x, y, width, height) in the cube's image coordinates.
"""
return 0, 0, self.cube_config.grid_width, self.cube_config.grid_height
@property
def temporal_coverage(self) -> (datetime, datetime):
"""
Return the temporal coverage derived from the value returned by **compute_source_time_ranges()**.
"""
if len(self._source_time_ranges) > 0:
return self._source_time_ranges[0][0], self._source_time_ranges[-1][1]
else:
raise KeyError("No datasets are available for the specified temporal coverage. "
"Consider changing the start_time or end_time in cube.config")
@abstractmethod
def compute_source_time_ranges(self) -> list or None:
"""
Return a sorted list of all time ranges of every source file.
Items in this list must be 4-element tuples of the form
(time_start: datetime, time_stop: datetime, file: str, time_index: int).
The method is called from the **prepare()** method in order to pre-compute all available time ranges.
This method must be implemented by derived classes.
"""
return None
def compute_variable_images(self, period_start: datetime, period_end: datetime):
"""
For each source time range that has an overlap with the given target time range compute a weight
according to the overlapping range. Pass these weights as source index to weight mapping
to **compute_variable_images_from_sources(index_to_weight)** and return the result.
:return: A dictionary variable name --> image. Each image must be numpy array-like object of shape
(grid_height, grid_width) as given by the **CubeConfig**.
Return ``None`` if no such variables exists for the given target time range.
"""
source_time_ranges = self._source_time_ranges
if len(source_time_ranges) == 0:
return None
index_to_weight = dict()
for i in range(len(source_time_ranges)):
source_start_time, source_end_time = source_time_ranges[i][0:2]
weight = temporal_weight(source_start_time, source_end_time,
period_start, period_end)
if weight > 0.0:
index_to_weight[i] = weight
if not index_to_weight:
return None
self.log('computing images for time range %s to %s from %d source(s)...' % (period_start, period_end,
len(index_to_weight)))
t1 = time.time()
result = self.compute_variable_images_from_sources(index_to_weight)
t2 = time.time()
self.log('images computed for %s, took %f seconds' % (str(list(result.keys())), t2 - t1))
return result
@abstractmethod
def compute_variable_images_from_sources(self, index_to_weight: Dict[int, float]):
"""
Compute the target images for all variables from the sources with the given time indices to weights mapping.
The time indices in *index_to_weight* are guaranteed to point into the time ranges list returned by
py:meth:`compute_source_time_ranges`.
The weight values in *index_to_weight* are float values computed from the overlap of source time ranges with
a requested target time range.
:param index_to_weight: A dictionary mapping time indexes --> weight values.
:return: A dictionary variable name --> image. Each image must be numpy array-like object of shape
(grid_height, grid_width) as specified by the cube's layout configuration **CubeConfig**.
Return ``None`` if no such variables exists for the given target time range.
"""
pass
def log(self, message: str):
"""
Log a *message*.
:param message: The message to be logged.
"""
print('%s: %s' % (self.name, message))
def _get_file_and_time_index(self, var_index: int):
"""
Return the file path and time dimension index as tuple.
To be used by derived classes only.
"""
return self._source_time_ranges[var_index][2:4]
class BaseStaticCubeSourceProvider(CubeSourceProvider, metaclass=ABCMeta):
"""
A CubeSourceProvider that
* uses a NetCDF source dataset read from a given *dir_path*;
* performs only spatial resampling.
:param cube_config: Specifies the fixed layout and conventions used for the cube.
:param name: The provider's registration name.
"""
def __init__(self, cube_config: CubeConfig, name: str):
super(BaseStaticCubeSourceProvider, self).__init__(cube_config, name)
self._variable_images_computed = False
def prepare(self):
"""Clear the flag that indicates that the static sources have been processed."""
self._variable_images_computed = False
@property
def spatial_coverage(self):
"""
Return the spatial grid coverage given in the Cube's configuration (default).
:return: A tuple of integers (x, y, width, height) in the cube's image coordinates.
"""
return 0, 0, self.cube_config.grid_width, self.cube_config.grid_height
@property
def temporal_coverage(self) -> (datetime, datetime):
"""
Return the temporal coverage derived from the value returned by **compute_source_time_ranges()**.
"""
return self.cube_config.start_time, self.cube_config.end_time
def compute_variable_images(self, period_start: datetime, period_end: datetime) -> Dict[str, np.ndarray]:
if self._variable_images_computed:
return None
dataset = self.open_dataset()
try:
var_descriptors = self.variable_descriptors
target_var_images = dict()
for var_name, var_attributes in var_descriptors.items():
source_name = var_attributes.get('source_name', var_name)
var_image = self.get_dataset_image(dataset, source_name)
var_image = self.transform_source_image(var_image)
var_image = gtr.resample_2d(var_image,
self.cube_config.grid_width,
self.cube_config.grid_height,
ds_method=_get_ds_method(var_attributes),
us_method=_get_us_method(var_attributes),
fill_value=var_attributes.get('fill_value', np.nan))
if var_image.shape[1] / var_image.shape[0] != 2.0:
print("Warning: wrong size ratio of image in '%s'. Expected 2, got %f" % (
self.get_dataset_file_path(dataset),
var_image.shape[1] / var_image.shape[0]))
target_var_images[var_name] = var_image
finally:
self.close_dataset(dataset)
self._variable_images_computed = True
return target_var_images
@abstractmethod
def open_dataset(self) -> object:
"""
Open the single dataset and return its representation.
:return: a dataset object
"""
@abstractmethod
def close_dataset(self, dataset: object):
"""
Close *dataset*.
:param dataset: the dataset returned by :py:meth:`open_dataset`
"""
@abstractmethod
def get_dataset_file_path(self, dataset: object) -> str:
"""
Get the file path for *dataset*.
:param dataset: the dataset returned by :py:meth:`open_dataset`
:return: a file path
"""
@abstractmethod
def get_dataset_image(self, dataset: object, name: str):
"""
Get a 2D-image for *dataset* for the given variable *name*.
:param dataset: the dataset returned by :py:meth:`open_dataset`.
:param name: the variable name.
:return: a 2D-image
"""
def transform_source_image(self, source_image):
"""
Does nothing but returning the source image. Override to implement transformations if needed.
:param source_image: 2D image
:return: *source_image*
"""
return source_image
def close(self):
"""Does nothing. Override to implement any required close operation."""
pass
class NetCDFStaticCubeSourceProvider(BaseStaticCubeSourceProvider, metaclass=ABCMeta):
"""
A CubeSourceProvider that
* Uses a NetCDF source dataset read from a given **dir_path**
* Performs only spatial resampling
:param cube_config: Specifies the fixed layout and conventions used for the cube.
:param name: The provider's registration name.
:param dir_path: Source directory to read the single file from. If relative path,
it will be resolved against the **cube_sources_root** path of the
global ESDL configuration (**esdl.util.Config.instance()**).
"""
def __init__(self, cube_config: CubeConfig, name: str, dir_path: str):
super(NetCDFStaticCubeSourceProvider, self).__init__(cube_config, name)
if dir_path is None:
raise ValueError('dir_path expected')
if not os.path.isabs(dir_path):
self._dir_path = Config.instance().get_cube_source_path(dir_path)
else:
self._dir_path = dir_path
self.cube_config.static_data = True
@property
def dir_path(self):
return self._dir_path
def open_dataset(self):
file_paths = glob.glob(os.path.join(self._dir_path, '*.nc'))
if not file_paths:
raise ValueError('No *.nc file found in %s' % self._dir_path)
file = file_paths[0]
return netCDF4.Dataset(file)
def close_dataset(self, dataset):
dataset.close()
def get_dataset_file_path(self, dataset):
return dataset.filepath
def get_dataset_image(self, dataset, var_name):
variable = dataset.variables[var_name]
if len(variable.shape) == 3:
var_image = variable[0, :, :]
elif len(variable.shape) == 2:
var_image = variable[:, :]
else:
raise ValueError("unexpected shape for variable '%s'" % var_name)
return var_image
class NetCDFCubeSourceProvider(BaseCubeSourceProvider, metaclass=ABCMeta):
"""
A BaseCubeSourceProvider that
* Uses NetCDF source datasets read from a given **dir_path**
* Performs temporal aggregation first and then spatial resampling
:param cube_config: Specifies the fixed layout and conventions used for the cube.
:param name: The provider's registration name.
:param dir_path: Source directory to read the files from. If relative path,
it will be resolved against the **cube_sources_root** path of the
global ESDL configuration (**esdl.util.Config.instance()**).
:param resampling_order: The order in which resampling is performed. One of 'time_first', 'space_first'.
"""
def __init__(self, cube_config: CubeConfig, name: str, dir_path: str, resampling_order: str):
super(NetCDFCubeSourceProvider, self).__init__(cube_config, name)
if dir_path is None:
raise ValueError('dir_path expected')
valid_resampling_order = ('time_first', 'space_first')
if resampling_order is None:
resampling_order = valid_resampling_order[0]
if resampling_order not in valid_resampling_order:
raise ValueError('resampling_order must be one of %s' % str(valid_resampling_order))
if not os.path.isabs(dir_path):
self._dir_path = Config.instance().get_cube_source_path(dir_path)
else:
self._dir_path = dir_path
self._resampling_order = resampling_order
self._dataset_cache = NetCDFDatasetCache(name)
self._old_indices = None
@property
def dir_path(self):
return self._dir_path
@property
def dataset_cache(self):
return self._dataset_cache
def compute_variable_images_from_sources(self, index_to_weight):
new_indices = self.close_unused_open_files(index_to_weight)
var_descriptors = self.variable_descriptors
target_var_images = dict()
for var_name, var_attributes in var_descriptors.items():
source_var_images = [None] * len(new_indices)
source_weights = [None] * len(new_indices)
var_image_index = 0
for i in new_indices:
file, time_index = self._get_file_and_time_index(i)
source_name = var_attributes.get('source_name', var_name)
variable = self._dataset_cache.get_dataset(file).variables[source_name]
if len(variable.shape) == 3:
var_image = variable[time_index, :, :]
elif len(variable.shape) == 2:
var_image = variable[:, :]
else:
raise ValueError("unexpected shape for variable '%s'" % var_name)
var_image = self.transform_source_image(var_image)
if self._resampling_order == 'space_first':
var_image = gtr.resample_2d(var_image,
self.cube_config.grid_width,
self.cube_config.grid_height,
ds_method=_get_ds_method(var_attributes),
us_method=_get_us_method(var_attributes),
fill_value=var_attributes.get('fill_value', np.nan))
if var_image.shape[1] / var_image.shape[0] != 2.0:
print("Warning: wrong size ratio of image in '%s'. Expected 2, got %f" % (
file, var_image.shape[1] / var_image.shape[0]))
source_var_images[var_image_index] = var_image
source_weights[var_image_index] = index_to_weight[i]
var_image_index += 1
if len(new_indices) > 1:
# Temporal aggregation
var_image = aggregate_images(source_var_images, weights=source_weights)
else:
# Temporal aggregation not required
var_image = source_var_images[0]
# Spatial resampling
if self._resampling_order == 'time_first':
var_image = gtr.resample_2d(var_image,
self.cube_config.grid_width,
self.cube_config.grid_height,
ds_method=_get_ds_method(var_attributes),
us_method=_get_us_method(var_attributes),
fill_value=var_attributes.get('fill_value', np.nan))
target_var_images[var_name] = var_image
return target_var_images
def transform_source_image(self, source_image):
"""
Returns the source image. Override to implement transformations if needed.
:param source_image: 2D image
:return: source_image
"""
return source_image
def close_unused_open_files(self, index_to_weight):
"""
Close all datasets that wont be used anymore w.r.t. the given **index_to_weight** dictionary passed to the
**compute_variable_images_from_sources()** method.
:param index_to_weight: A dictionary mapping time indexes --> weight values.
:return: set of time indexes into currently active files w.r.t. the given **index_to_weight** parameter.
"""
new_indices = set(index_to_weight.keys())
if self._old_indices:
unused_indices = self._old_indices - new_indices
for i in unused_indices:
file, _ = self._get_file_and_time_index(i)
self._dataset_cache.close_dataset(file)
self._old_indices = new_indices
return new_indices
def close(self):
self._dataset_cache.close_all_datasets()
| gpl-3.0 |
ipashchenko/ml4vs | ml4vs/knn.py | 1 | 5696 | # -*- coding: utf-8 -*-
import os
import hyperopt
import pprint
import numpy as np
from sklearn.cross_validation import StratifiedKFold
from sklearn.model_selection import cross_val_predict, cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from data_load import load_data, load_data_tgt
data_dir = '/home/ilya/code/ml4vs/data/LMC_SC20__corrected_list_of_variables/raw_index_values'
file_1 = 'vast_lightcurve_statistics_variables_only.log'
file_0 = 'vast_lightcurve_statistics_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
# names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
# 'Npts', 'CSSD']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X, y, df, features_names, delta = load_data([file_0, file_1], names,
names_to_delete)
kfold = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=1)
def objective(space):
pprint.pprint(space)
clf = KNeighborsClassifier(n_neighbors=space['n_neighbors'],
weights='distance', n_jobs=2)
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
# auc = np.mean(cross_val_score(pipeline, X, y, cv=kfold, scoring='roc_auc',
# verbose=1, n_jobs=4))
y_preds = cross_val_predict(pipeline, X, y, cv=kfold, n_jobs=4)
CMs = list()
for train_idx, test_idx in kfold:
CMs.append(confusion_matrix(y[test_idx], y_preds[test_idx]))
CM = np.sum(CMs, axis=0)
FN = CM[1][0]
TP = CM[1][1]
FP = CM[0][1]
print "TP = {}".format(TP)
print "FP = {}".format(FP)
print "FN = {}".format(FN)
f1 = 2. * TP / (2. * TP + FP + FN)
print "F1 : ", f1
return{'loss': 1-f1, 'status': STATUS_OK}
space = {'n_neighbors': hp.qloguniform("n_neighbors", 0, 6.55, 1)}
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=100,
trials=trials)
print hyperopt.space_eval(space, best)
best_pars = hyperopt.space_eval(space, best)
# # Load blind test data
# file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics.log'
# file_tgt = os.path.join(data_dir, file_tgt)
# X_tgt, feature_names, df, df_orig = load_data_tgt(file_tgt, names, names_to_delete,
# delta)
#
# # Fit model on all training data
# clf = KNeighborsClassifier(n_neighbors=best_pars['n_neighbors'],
# weights=best_pars['weights'], n_jobs=2)
# estimators = list()
# estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
# axis=0, verbose=2)))
# estimators.append(('scaler', StandardScaler()))
# estimators.append(('clf', clf))
# pipeline = Pipeline(estimators)
# pipeline.fit(X, y)
#
# # Predict clases on new data
# y_probs = pipeline.predict_proba(X_tgt)[:, 1]
# idx = y_probs > 0.5
# idx_ = y_probs < 0.5
# knn_no = list(df_orig['star_ID'][idx_])
# print("Found {} variables".format(np.count_nonzero(idx)))
#
# with open('knn_results.txt', 'w') as fo:
# for line in list(df_orig['star_ID'][idx]):
# fo.write(line + '\n')
#
# # Check F1
# with open('clean_list_of_new_variables.txt', 'r') as fo:
# news = fo.readlines()
# news = [line.strip().split(' ')[1] for line in news]
# news = set(news)
#
# with open('knn_results.txt', 'r') as fo:
# knn = fo.readlines()
# knn = [line.strip().split('_')[4].split('.')[0] for line in knn]
# knn = set(knn)
#
# print "Among new vars found {}".format(len(news.intersection(knn)))
#
# with open('candidates_50perc_threshold.txt', 'r') as fo:
# c50 = fo.readlines()
# c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
#
# with open('variables_not_in_catalogs.txt', 'r') as fo:
# not_in_cat = fo.readlines()
# nic = [line.strip().split(' ')[1] for line in not_in_cat]
#
# # Catalogue variables
# cat_vars = set(c50).difference(set(nic))
# # Non-catalogue variable
# noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST'
# not in line])
#
# # All variables
# all_vars = news.union(cat_vars).union(noncat_vars)
# knn_no = set([line.strip().split('_')[4].split('.')[0] for line in knn_no])
#
# found_bad = '181193' in knn
# print "Found known variable : ", found_bad
#
# FN = len(knn_no.intersection(all_vars))
# TP = len(all_vars.intersection(knn))
# TN = len(knn_no) - FN
# FP = len(knn) - TP
# recall = float(TP) / (TP + FN)
# precision = float(TP) / (TP + FP)
# F1 = 2 * precision * recall / (precision + recall)
# print "precision: {}".format(precision)
# print "recall: {}".format(recall)
# print "F1: {}".format(F1)
# print "TN={}, FP={}".format(TN, FP)
# print "FN={}, TP={}".format(FN, TP)
| mit |
williamFalcon/pytorch-lightning | tests/models/test_cpu.py | 1 | 9334 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint
from tests.helpers import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
def test_cpu_slurm_save_load(tmpdir):
"""Verify model save/load/checkpoint on CPU."""
model = BoringModel()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
version = logger.version
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
limit_train_batches=0.2,
limit_val_batches=0.2,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
real_global_step = trainer.global_step
# traning complete
assert trainer.state.finished, "cpu model failed to complete"
# predict with trained model before saving
# make a prediction
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
for batch in dataloader:
break
model.eval()
pred_before_saving = model(batch)
# test HPC saving
# simulate snapshot on slurm
saved_filepath = trainer.checkpoint_connector.hpc_save(trainer.weights_save_path, logger)
assert os.path.exists(saved_filepath)
# new logger file to get meta
logger = tutils.get_default_logger(tmpdir, version=version)
model = BoringModel()
class _StartCallback(Callback):
# set the epoch start hook so we can predict before the model does the full training
def on_train_epoch_start(self, trainer, model):
assert trainer.global_step == real_global_step and trainer.global_step > 0
# predict with loaded model to make sure answers are the same
mode = model.training
model.eval()
new_pred = model(batch)
assert torch.eq(pred_before_saving, new_pred).all()
model.train(mode)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[_StartCallback(), ModelCheckpoint(dirpath=tmpdir)],
)
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
def test_early_stopping_cpu_model(tmpdir):
class ModelTrainVal(BoringModel):
def validation_step(self, *args, **kwargs):
output = super().validation_step(*args, **kwargs)
self.log("val_loss", output["x"])
return output
tutils.reset_seed()
stopping = EarlyStopping(monitor="val_loss", min_delta=0.1)
trainer_options = dict(
callbacks=[stopping],
default_root_dir=tmpdir,
gradient_clip_val=1.0,
overfit_batches=0.20,
track_grad_norm=2,
progress_bar_refresh_rate=0,
accumulate_grad_batches=2,
limit_train_batches=0.1,
limit_val_batches=0.1,
)
model = ModelTrainVal()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
# test freeze on cpu
model.freeze()
model.unfreeze()
@RunIf(skip_windows=True)
def test_multi_cpu_model_ddp(tmpdir):
"""Make sure DDP works."""
tutils.set_random_master_port()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=None,
num_processes=2,
accelerator="ddp_cpu",
)
dm = ClassifDataModule()
model = ClassificationModel()
tpipes.run_model_test(trainer_options, model, data=dm, on_gpu=False)
def test_lbfgs_cpu_model(tmpdir):
"""Test each of the trainer options. Testing LBFGS optimizer"""
class ModelSpecifiedOptimizer(BoringModel):
def __init__(self, optimizer_name, learning_rate):
super().__init__()
self.optimizer_name = optimizer_name
self.learning_rate = learning_rate
self.save_hyperparameters()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
progress_bar_refresh_rate=0,
weights_summary="top",
limit_train_batches=0.2,
limit_val_batches=0.2,
)
model = ModelSpecifiedOptimizer(optimizer_name="LBFGS", learning_rate=0.004)
tpipes.run_model_test_without_loggers(trainer_options, model, min_acc=0.01)
def test_default_logger_callbacks_cpu_model(tmpdir):
"""Test each of the trainer options."""
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
gradient_clip_val=1.0,
overfit_batches=0.20,
progress_bar_refresh_rate=0,
limit_train_batches=0.01,
limit_val_batches=0.01,
)
model = BoringModel()
tpipes.run_model_test_without_loggers(trainer_options, model, min_acc=0.01)
# test freeze on cpu
model.freeze()
model.unfreeze()
def test_running_test_after_fitting(tmpdir):
"""Verify test() on fitted model."""
class ModelTrainValTest(BoringModel):
def validation_step(self, *args, **kwargs):
output = super().validation_step(*args, **kwargs)
self.log("val_loss", output["x"])
return output
def test_step(self, *args, **kwargs):
output = super().test_step(*args, **kwargs)
self.log("test_loss", output["y"])
return output
model = ModelTrainValTest()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.2,
limit_test_batches=0.2,
callbacks=[checkpoint],
logger=logger,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
trainer.test()
# test we have good test accuracy
tutils.assert_ok_model_acc(trainer, key="test_loss", thr=0.5)
def test_running_test_no_val(tmpdir):
"""Verify `test()` works on a model with no `val_dataloader`. It performs
train and test only"""
class ModelTrainTest(BoringModel):
def val_dataloader(self):
pass
def test_step(self, *args, **kwargs):
output = super().test_step(*args, **kwargs)
self.log("test_loss", output["y"])
return output
model = ModelTrainTest()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
limit_test_batches=0.2,
callbacks=[checkpoint],
logger=logger,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
trainer.test()
# test we have good test accuracy
tutils.assert_ok_model_acc(trainer, key="test_loss")
def test_simple_cpu(tmpdir):
"""Verify continue training session on CPU."""
model = BoringModel()
# fit model
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=20)
trainer.fit(model)
# traning complete
assert trainer.state.finished, "amp + ddp model failed to complete"
def test_cpu_model(tmpdir):
"""Make sure model trains on CPU."""
trainer_options = dict(
default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=1, limit_train_batches=4, limit_val_batches=4
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
def test_all_features_cpu_model(tmpdir):
"""Test each of the trainer options."""
trainer_options = dict(
default_root_dir=tmpdir,
gradient_clip_val=1.0,
overfit_batches=0.20,
track_grad_norm=2,
progress_bar_refresh_rate=0,
accumulate_grad_batches=2,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.4,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, min_acc=0.01)
| apache-2.0 |
williamFalcon/pytorch-lightning | pytorch_lightning/overrides/distributed.py | 1 | 5942 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Any, Iterator, List, Optional
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import BatchSampler, DistributedSampler, Sampler
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import _LightningModuleWrapperBase
class LightningDistributedModule(_LightningModuleWrapperBase):
def __init__(self, pl_module: "pl.LightningModule") -> None:
"""
Wraps the user's LightningModule and redirects the forward call to the appropriate
method, either ``training_step``, ``validation_step``, ``test_step`` or ``predict``.
This class is used in combination with :class:`~torch.nn.parallel.DistributedDataParallel` as
shown in the example.
Example:
ddp_model = torch.nn.parallel.DistributedDataParallel(
module=LightningDistributedModule(lightning_module),
device_ids=[local_rank],
...
)
Args:
pl_module: the model to wrap
"""
super().__init__(pl_module)
def _find_tensors(obj): # pragma: no-cover
r"""
Recursively find all tensors contained in the specified object.
"""
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
# In manual_optimization, we need to call reducer prepare_for_backward.
# Note: Keep track of Pytorch DDP and update if there is a change
# https://github.com/pytorch/pytorch/blob/v1.7.1/torch/nn/parallel/distributed.py#L626-L638
def prepare_for_backward(model: DistributedDataParallel, output: Any):
# `prepare_for_backward` is `DistributedDataParallel` specific.
if not isinstance(model, DistributedDataParallel):
return
if torch.is_grad_enabled() and model.require_backward_grad_sync:
model.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if model.find_unused_parameters:
model.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
model.reducer.prepare_for_backward([])
else:
model.require_forward_param_sync = False
class UnrepeatedDistributedSampler(DistributedSampler):
"""
A fork of the pytorch DistributedSampler that doesn't repeat data, instead
allowing the number of batches per process to be off-by-one from each other.
This makes this sampler usable for predictions (it's deterministic and
doesn't require shuffling). It is potentially unsafe to use this sampler for
training, because during training the DistributedDataParallel syncs buffers
on each forward pass, so it could freeze if one of the processes runs one
fewer batch. During prediction, buffers are only synced on the first batch,
so this is safe to use as long as each process runs at least one batch. We
verify this in an assert.
Taken from https://github.com/jpuigcerver/PyLaia/blob/v1.0.0/laia/data/unpadded_distributed_sampler.py
and https://github.com/pytorch/pytorch/issues/25162#issuecomment-634146002
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.num_samples = len(range(self.rank, len(self.dataset), self.num_replicas))
self.total_size = len(self.dataset)
# If any process has at least one batch, every other process needs to
# have at least one batch, or the DistributedDataParallel could lock up.
assert self.num_samples >= 1 or self.total_size == 0
def __iter__(self) -> Iterator[List[int]]:
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
class IndexBatchSamplerWrapper:
"""This class is used to wrap a :class:`torch.utils.data.BatchSampler` and capture its indices."""
def __init__(self, sampler: BatchSampler) -> None:
self._sampler = sampler
self.batch_indices: Optional[List[int]] = None
def __iter__(self) -> Iterator[List[int]]:
for batch in self._sampler:
self.batch_indices = batch
yield batch
def __len__(self) -> int:
return len(self._sampler)
@property
def drop_last(self) -> bool:
return self._sampler.drop_last
@property
def batch_size(self) -> int:
return self._sampler.batch_size
@property
def sampler(self) -> Sampler:
return self._sampler.sampler
| apache-2.0 |
jorge2703/scikit-learn | examples/svm/plot_svm_kernels.py | 326 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
CompPhysics/MachineLearning | doc/src/LectureNotes/_build/jupyter_execute/chapter10.py | 1 | 103230 | # Recurrent Neural Networks
[Overview video](https://www.youtube.com/watch?v=SEnXr6v2ifU&ab_channel=AlexanderAmini).
See also lecture on Thursday October 22 and examples from [week 42](https://compphysics.github.io/MachineLearning/doc/pub/week42/html/week42.html).
[IN5400 at UiO Lecture](https://www.uio.no/studier/emner/matnat/ifi/IN5400/v20/material/week10/in5400_2020_week10_recurrent_neural_network.pdf)
[CS231 at Stanford Lecture](https://www.youtube.com/watch?v=6niqTuYFZLQ&list=PLzUTmXVwsnXod6WNdg57Yc3zFx_f-RYsq&index=10&ab_channel=StanfordUniversitySchoolofEngineering)
## Recurrent neural networks: Overarching view
Till now our focus has been, including convolutional neural networks
as well, on feedforward neural networks. The output or the activations
flow only in one direction, from the input layer to the output layer.
A recurrent neural network (RNN) looks very much like a feedforward
neural network, except that it also has connections pointing
backward.
RNNs are used to analyze time series data such as stock prices, and
tell you when to buy or sell. In autonomous driving systems, they can
anticipate car trajectories and help avoid accidents. More generally,
they can work on sequences of arbitrary lengths, rather than on
fixed-sized inputs like all the nets we have discussed so far. For
example, they can take sentences, documents, or audio samples as
input, making them extremely useful for natural language processing
systems such as automatic translation and speech-to-text.
## Set up of an RNN
Text to come.
## A simple example
%matplotlib inline
# Start importing packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, SimpleRNN, LSTM, GRU
from tensorflow.keras import optimizers
from tensorflow.keras import regularizers
from tensorflow.keras.utils import to_categorical
# convert into dataset matrix
def convertToMatrix(data, step):
X, Y =[], []
for i in range(len(data)-step):
d=i+step
X.append(data[i:d,])
Y.append(data[d,])
return np.array(X), np.array(Y)
step = 4
N = 1000
Tp = 800
t=np.arange(0,N)
x=np.sin(0.02*t)+2*np.random.rand(N)
df = pd.DataFrame(x)
df.head()
plt.plot(df)
plt.show()
values=df.values
train,test = values[0:Tp,:], values[Tp:N,:]
# add step elements into train and test
test = np.append(test,np.repeat(test[-1,],step))
train = np.append(train,np.repeat(train[-1,],step))
trainX,trainY =convertToMatrix(train,step)
testX,testY =convertToMatrix(test,step)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(SimpleRNN(units=32, input_shape=(1,step), activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
model.summary()
model.fit(trainX,trainY, epochs=100, batch_size=16, verbose=2)
trainPredict = model.predict(trainX)
testPredict= model.predict(testX)
predicted=np.concatenate((trainPredict,testPredict),axis=0)
trainScore = model.evaluate(trainX, trainY, verbose=0)
print(trainScore)
index = df.index.values
plt.plot(index,df)
plt.plot(index,predicted)
plt.axvline(df.index[Tp], c="r")
plt.show()
## An extrapolation example
The following code provides an example of how recurrent neural
networks can be used to extrapolate to unknown values of physics data
sets. Specifically, the data sets used in this program come from
a quantum mechanical many-body calculation of energies as functions of the number of particles.
# For matrices and calculations
import numpy as np
# For machine learning (backend for keras)
import tensorflow as tf
# User-friendly machine learning library
# Front end for TensorFlow
import tensorflow.keras
# Different methods from Keras needed to create an RNN
# This is not necessary but it shortened function calls
# that need to be used in the code.
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.layers import Input
from tensorflow.keras import regularizers
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, SimpleRNN, LSTM, GRU
# For timing the code
from timeit import default_timer as timer
# For plotting
import matplotlib.pyplot as plt
# The data set
datatype='VaryDimension'
X_tot = np.arange(2, 42, 2)
y_tot = np.array([-0.03077640549, -0.08336233266, -0.1446729567, -0.2116753732, -0.2830637392, -0.3581341341, -0.436462435, -0.5177783846,
-0.6019067271, -0.6887363571, -0.7782028952, -0.8702784034, -0.9649652536, -1.062292565, -1.16231451,
-1.265109911, -1.370782966, -1.479465113, -1.591317992, -1.70653767])
## Formatting the Data
The way the recurrent neural networks are trained in this program
differs from how machine learning algorithms are usually trained.
Typically a machine learning algorithm is trained by learning the
relationship between the x data and the y data. In this program, the
recurrent neural network will be trained to recognize the relationship
in a sequence of y values. This is type of data formatting is
typically used time series forcasting, but it can also be used in any
extrapolation (time series forecasting is just a specific type of
extrapolation along the time axis). This method of data formatting
does not use the x data and assumes that the y data are evenly spaced.
For a standard machine learning algorithm, the training data has the
form of (x,y) so the machine learning algorithm learns to assiciate a
y value with a given x value. This is useful when the test data has x
values within the same range as the training data. However, for this
application, the x values of the test data are outside of the x values
of the training data and the traditional method of training a machine
learning algorithm does not work as well. For this reason, the
recurrent neural network is trained on sequences of y values of the
form ((y1, y2), y3), so that the network is concerned with learning
the pattern of the y data and not the relation between the x and y
data. As long as the pattern of y data outside of the training region
stays relatively stable compared to what was inside the training
region, this method of training can produce accurate extrapolations to
y values far removed from the training data set.
<!-- -->
<!-- The idea behind formatting the data in this way comes from [this resource](https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/) and [this one](https://fairyonice.github.io/Understand-Keras%27s-RNN-behind-the-scenes-with-a-sin-wave-example.html). -->
<!-- -->
<!-- The following method takes in a y data set and formats it so the "x data" are of the form (y1, y2) and the "y data" are of the form y3, with extra brackets added in to make the resulting arrays compatable with both Keras and Tensorflow. -->
<!-- -->
<!-- Note: Using a sequence length of two is not required for time series forecasting so any lenght of sequence could be used (for example instead of ((y1, y2) y3) you could change the length of sequence to be 4 and the resulting data points would have the form ((y1, y2, y3, y4), y5)). While the following method can be used to create a data set of any sequence length, the remainder of the code expects the length of sequence to be 2. This is because the data sets are very small and the higher the lenght of the sequence the less resulting data points. -->
# FORMAT_DATA
def format_data(data, length_of_sequence = 2):
"""
Inputs:
data(a numpy array): the data that will be the inputs to the recurrent neural
network
length_of_sequence (an int): the number of elements in one iteration of the
sequence patter. For a function approximator use length_of_sequence = 2.
Returns:
rnn_input (a 3D numpy array): the input data for the recurrent neural network. Its
dimensions are length of data - length of sequence, length of sequence,
dimnsion of data
rnn_output (a numpy array): the training data for the neural network
Formats data to be used in a recurrent neural network.
"""
X, Y = [], []
for i in range(len(data)-length_of_sequence):
# Get the next length_of_sequence elements
a = data[i:i+length_of_sequence]
# Get the element that immediately follows that
b = data[i+length_of_sequence]
# Reshape so that each data point is contained in its own array
a = np.reshape (a, (len(a), 1))
X.append(a)
Y.append(b)
rnn_input = np.array(X)
rnn_output = np.array(Y)
return rnn_input, rnn_output
# ## Defining the Recurrent Neural Network Using Keras
#
# The following method defines a simple recurrent neural network in keras consisting of one input layer, one hidden layer, and one output layer.
def rnn(length_of_sequences, batch_size = None, stateful = False):
"""
Inputs:
length_of_sequences (an int): the number of y values in "x data". This is determined
when the data is formatted
batch_size (an int): Default value is None. See Keras documentation of SimpleRNN.
stateful (a boolean): Default value is False. See Keras documentation of SimpleRNN.
Returns:
model (a Keras model): The recurrent neural network that is built and compiled by this
method
Builds and compiles a recurrent neural network with one hidden layer and returns the model.
"""
# Number of neurons in the input and output layers
in_out_neurons = 1
# Number of neurons in the hidden layer
hidden_neurons = 200
# Define the input layer
inp = Input(batch_shape=(batch_size,
length_of_sequences,
in_out_neurons))
# Define the hidden layer as a simple RNN layer with a set number of neurons and add it to
# the network immediately after the input layer
rnn = SimpleRNN(hidden_neurons,
return_sequences=False,
stateful = stateful,
name="RNN")(inp)
# Define the output layer as a dense neural network layer (standard neural network layer)
#and add it to the network immediately after the hidden layer.
dens = Dense(in_out_neurons,name="dense")(rnn)
# Create the machine learning model starting with the input layer and ending with the
# output layer
model = Model(inputs=[inp],outputs=[dens])
# Compile the machine learning model using the mean squared error function as the loss
# function and an Adams optimizer.
model.compile(loss="mean_squared_error", optimizer="adam")
return model
## Predicting New Points With A Trained Recurrent Neural Network
def test_rnn (x1, y_test, plot_min, plot_max):
"""
Inputs:
x1 (a list or numpy array): The complete x component of the data set
y_test (a list or numpy array): The complete y component of the data set
plot_min (an int or float): the smallest x value used in the training data
plot_max (an int or float): the largest x valye used in the training data
Returns:
None.
Uses a trained recurrent neural network model to predict future points in the
series. Computes the MSE of the predicted data set from the true data set, saves
the predicted data set to a csv file, and plots the predicted and true data sets w
while also displaying the data range used for training.
"""
# Add the training data as the first dim points in the predicted data array as these
# are known values.
y_pred = y_test[:dim].tolist()
# Generate the first input to the trained recurrent neural network using the last two
# points of the training data. Based on how the network was trained this means that it
# will predict the first point in the data set after the training data. All of the
# brackets are necessary for Tensorflow.
next_input = np.array([[[y_test[dim-2]], [y_test[dim-1]]]])
# Save the very last point in the training data set. This will be used later.
last = [y_test[dim-1]]
# Iterate until the complete data set is created.
for i in range (dim, len(y_test)):
# Predict the next point in the data set using the previous two points.
next = model.predict(next_input)
# Append just the number of the predicted data set
y_pred.append(next[0][0])
# Create the input that will be used to predict the next data point in the data set.
next_input = np.array([[last, next[0]]], dtype=np.float64)
last = next
# Print the mean squared error between the known data set and the predicted data set.
print('MSE: ', np.square(np.subtract(y_test, y_pred)).mean())
# Save the predicted data set as a csv file for later use
name = datatype + 'Predicted'+str(dim)+'.csv'
np.savetxt(name, y_pred, delimiter=',')
# Plot the known data set and the predicted data set. The red box represents the region that was used
# for the training data.
fig, ax = plt.subplots()
ax.plot(x1, y_test, label="true", linewidth=3)
ax.plot(x1, y_pred, 'g-.',label="predicted", linewidth=4)
ax.legend()
# Created a red region to represent the points used in the training data.
ax.axvspan(plot_min, plot_max, alpha=0.25, color='red')
plt.show()
# Check to make sure the data set is complete
assert len(X_tot) == len(y_tot)
# This is the number of points that will be used in as the training data
dim=12
# Separate the training data from the whole data set
X_train = X_tot[:dim]
y_train = y_tot[:dim]
# Generate the training data for the RNN, using a sequence of 2
rnn_input, rnn_training = format_data(y_train, 2)
# Create a recurrent neural network in Keras and produce a summary of the
# machine learning model
model = rnn(length_of_sequences = rnn_input.shape[1])
model.summary()
# Start the timer. Want to time training+testing
start = timer()
# Fit the model using the training data genenerated above using 150 training iterations and a 5%
# validation split. Setting verbose to True prints information about each training iteration.
hist = model.fit(rnn_input, rnn_training, batch_size=None, epochs=150,
verbose=True,validation_split=0.05)
for label in ["loss","val_loss"]:
plt.plot(hist.history[label],label=label)
plt.ylabel("loss")
plt.xlabel("epoch")
plt.title("The final validation loss: {}".format(hist.history["val_loss"][-1]))
plt.legend()
plt.show()
# Use the trained neural network to predict more points of the data set
test_rnn(X_tot, y_tot, X_tot[0], X_tot[dim-1])
# Stop the timer and calculate the total time needed.
end = timer()
print('Time: ', end-start)
## Other Things to Try
Changing the size of the recurrent neural network and its parameters
can drastically change the results you get from the model. The below
code takes the simple recurrent neural network from above and adds a
second hidden layer, changes the number of neurons in the hidden
layer, and explicitly declares the activation function of the hidden
layers to be a sigmoid function. The loss function and optimizer can
also be changed but are kept the same as the above network. These
parameters can be tuned to provide the optimal result from the
network. For some ideas on how to improve the performance of a
[recurrent neural network](https://danijar.com/tips-for-training-recurrent-neural-networks).
def rnn_2layers(length_of_sequences, batch_size = None, stateful = False):
"""
Inputs:
length_of_sequences (an int): the number of y values in "x data". This is determined
when the data is formatted
batch_size (an int): Default value is None. See Keras documentation of SimpleRNN.
stateful (a boolean): Default value is False. See Keras documentation of SimpleRNN.
Returns:
model (a Keras model): The recurrent neural network that is built and compiled by this
method
Builds and compiles a recurrent neural network with two hidden layers and returns the model.
"""
# Number of neurons in the input and output layers
in_out_neurons = 1
# Number of neurons in the hidden layer, increased from the first network
hidden_neurons = 500
# Define the input layer
inp = Input(batch_shape=(batch_size,
length_of_sequences,
in_out_neurons))
# Create two hidden layers instead of one hidden layer. Explicitly set the activation
# function to be the sigmoid function (the default value is hyperbolic tangent)
rnn1 = SimpleRNN(hidden_neurons,
return_sequences=True, # This needs to be True if another hidden layer is to follow
stateful = stateful, activation = 'sigmoid',
name="RNN1")(inp)
rnn2 = SimpleRNN(hidden_neurons,
return_sequences=False, activation = 'sigmoid',
stateful = stateful,
name="RNN2")(rnn1)
# Define the output layer as a dense neural network layer (standard neural network layer)
#and add it to the network immediately after the hidden layer.
dens = Dense(in_out_neurons,name="dense")(rnn2)
# Create the machine learning model starting with the input layer and ending with the
# output layer
model = Model(inputs=[inp],outputs=[dens])
# Compile the machine learning model using the mean squared error function as the loss
# function and an Adams optimizer.
model.compile(loss="mean_squared_error", optimizer="adam")
return model
# Check to make sure the data set is complete
assert len(X_tot) == len(y_tot)
# This is the number of points that will be used in as the training data
dim=12
# Separate the training data from the whole data set
X_train = X_tot[:dim]
y_train = y_tot[:dim]
# Generate the training data for the RNN, using a sequence of 2
rnn_input, rnn_training = format_data(y_train, 2)
# Create a recurrent neural network in Keras and produce a summary of the
# machine learning model
model = rnn_2layers(length_of_sequences = 2)
model.summary()
# Start the timer. Want to time training+testing
start = timer()
# Fit the model using the training data genenerated above using 150 training iterations and a 5%
# validation split. Setting verbose to True prints information about each training iteration.
hist = model.fit(rnn_input, rnn_training, batch_size=None, epochs=150,
verbose=True,validation_split=0.05)
# This section plots the training loss and the validation loss as a function of training iteration.
# This is not required for analyzing the couple cluster data but can help determine if the network is
# being overtrained.
for label in ["loss","val_loss"]:
plt.plot(hist.history[label],label=label)
plt.ylabel("loss")
plt.xlabel("epoch")
plt.title("The final validation loss: {}".format(hist.history["val_loss"][-1]))
plt.legend()
plt.show()
# Use the trained neural network to predict more points of the data set
test_rnn(X_tot, y_tot, X_tot[0], X_tot[dim-1])
# Stop the timer and calculate the total time needed.
end = timer()
print('Time: ', end-start)
## Other Types of Recurrent Neural Networks
Besides a simple recurrent neural network layer, there are two other
commonly used types of recurrent neural network layers: Long Short
Term Memory (LSTM) and Gated Recurrent Unit (GRU). For a short
introduction to these layers see <https://medium.com/mindboard/lstm-vs-gru-experimental-comparison-955820c21e8b>
and <https://medium.com/mindboard/lstm-vs-gru-experimental-comparison-955820c21e8b>.
The first network created below is similar to the previous network,
but it replaces the SimpleRNN layers with LSTM layers. The second
network below has two hidden layers made up of GRUs, which are
preceeded by two dense (feeddorward) neural network layers. These
dense layers "preprocess" the data before it reaches the recurrent
layers. This architecture has been shown to improve the performance
of recurrent neural networks (see the link above and also
<https://arxiv.org/pdf/1807.02857.pdf>.
def lstm_2layers(length_of_sequences, batch_size = None, stateful = False):
"""
Inputs:
length_of_sequences (an int): the number of y values in "x data". This is determined
when the data is formatted
batch_size (an int): Default value is None. See Keras documentation of SimpleRNN.
stateful (a boolean): Default value is False. See Keras documentation of SimpleRNN.
Returns:
model (a Keras model): The recurrent neural network that is built and compiled by this
method
Builds and compiles a recurrent neural network with two LSTM hidden layers and returns the model.
"""
# Number of neurons on the input/output layer and the number of neurons in the hidden layer
in_out_neurons = 1
hidden_neurons = 250
# Input Layer
inp = Input(batch_shape=(batch_size,
length_of_sequences,
in_out_neurons))
# Hidden layers (in this case they are LSTM layers instead if SimpleRNN layers)
rnn= LSTM(hidden_neurons,
return_sequences=True,
stateful = stateful,
name="RNN", use_bias=True, activation='tanh')(inp)
rnn1 = LSTM(hidden_neurons,
return_sequences=False,
stateful = stateful,
name="RNN1", use_bias=True, activation='tanh')(rnn)
# Output layer
dens = Dense(in_out_neurons,name="dense")(rnn1)
# Define the midel
model = Model(inputs=[inp],outputs=[dens])
# Compile the model
model.compile(loss='mean_squared_error', optimizer='adam')
# Return the model
return model
def dnn2_gru2(length_of_sequences, batch_size = None, stateful = False):
"""
Inputs:
length_of_sequences (an int): the number of y values in "x data". This is determined
when the data is formatted
batch_size (an int): Default value is None. See Keras documentation of SimpleRNN.
stateful (a boolean): Default value is False. See Keras documentation of SimpleRNN.
Returns:
model (a Keras model): The recurrent neural network that is built and compiled by this
method
Builds and compiles a recurrent neural network with four hidden layers (two dense followed by
two GRU layers) and returns the model.
"""
# Number of neurons on the input/output layers and hidden layers
in_out_neurons = 1
hidden_neurons = 250
# Input layer
inp = Input(batch_shape=(batch_size,
length_of_sequences,
in_out_neurons))
# Hidden Dense (feedforward) layers
dnn = Dense(hidden_neurons/2, activation='relu', name='dnn')(inp)
dnn1 = Dense(hidden_neurons/2, activation='relu', name='dnn1')(dnn)
# Hidden GRU layers
rnn1 = GRU(hidden_neurons,
return_sequences=True,
stateful = stateful,
name="RNN1", use_bias=True)(dnn1)
rnn = GRU(hidden_neurons,
return_sequences=False,
stateful = stateful,
name="RNN", use_bias=True)(rnn1)
# Output layer
dens = Dense(in_out_neurons,name="dense")(rnn)
# Define the model
model = Model(inputs=[inp],outputs=[dens])
# Compile the mdoel
model.compile(loss='mean_squared_error', optimizer='adam')
# Return the model
return model
# Check to make sure the data set is complete
assert len(X_tot) == len(y_tot)
# This is the number of points that will be used in as the training data
dim=12
# Separate the training data from the whole data set
X_train = X_tot[:dim]
y_train = y_tot[:dim]
# Generate the training data for the RNN, using a sequence of 2
rnn_input, rnn_training = format_data(y_train, 2)
# Create a recurrent neural network in Keras and produce a summary of the
# machine learning model
# Change the method name to reflect which network you want to use
model = dnn2_gru2(length_of_sequences = 2)
model.summary()
# Start the timer. Want to time training+testing
start = timer()
# Fit the model using the training data genenerated above using 150 training iterations and a 5%
# validation split. Setting verbose to True prints information about each training iteration.
hist = model.fit(rnn_input, rnn_training, batch_size=None, epochs=150,
verbose=True,validation_split=0.05)
# This section plots the training loss and the validation loss as a function of training iteration.
# This is not required for analyzing the couple cluster data but can help determine if the network is
# being overtrained.
for label in ["loss","val_loss"]:
plt.plot(hist.history[label],label=label)
plt.ylabel("loss")
plt.xlabel("epoch")
plt.title("The final validation loss: {}".format(hist.history["val_loss"][-1]))
plt.legend()
plt.show()
# Use the trained neural network to predict more points of the data set
test_rnn(X_tot, y_tot, X_tot[0], X_tot[dim-1])
# Stop the timer and calculate the total time needed.
end = timer()
print('Time: ', end-start)
# ### Training Recurrent Neural Networks in the Standard Way (i.e. learning the relationship between the X and Y data)
#
# Finally, comparing the performace of a recurrent neural network using the standard data formatting to the performance of the network with time sequence data formatting shows the benefit of this type of data formatting with extrapolation.
# Check to make sure the data set is complete
assert len(X_tot) == len(y_tot)
# This is the number of points that will be used in as the training data
dim=12
# Separate the training data from the whole data set
X_train = X_tot[:dim]
y_train = y_tot[:dim]
# Reshape the data for Keras specifications
X_train = X_train.reshape((dim, 1))
y_train = y_train.reshape((dim, 1))
# Create a recurrent neural network in Keras and produce a summary of the
# machine learning model
# Set the sequence length to 1 for regular data formatting
model = rnn(length_of_sequences = 1)
model.summary()
# Start the timer. Want to time training+testing
start = timer()
# Fit the model using the training data genenerated above using 150 training iterations and a 5%
# validation split. Setting verbose to True prints information about each training iteration.
hist = model.fit(X_train, y_train, batch_size=None, epochs=150,
verbose=True,validation_split=0.05)
# This section plots the training loss and the validation loss as a function of training iteration.
# This is not required for analyzing the couple cluster data but can help determine if the network is
# being overtrained.
for label in ["loss","val_loss"]:
plt.plot(hist.history[label],label=label)
plt.ylabel("loss")
plt.xlabel("epoch")
plt.title("The final validation loss: {}".format(hist.history["val_loss"][-1]))
plt.legend()
plt.show()
# Use the trained neural network to predict the remaining data points
X_pred = X_tot[dim:]
X_pred = X_pred.reshape((len(X_pred), 1))
y_model = model.predict(X_pred)
y_pred = np.concatenate((y_tot[:dim], y_model.flatten()))
# Plot the known data set and the predicted data set. The red box represents the region that was used
# for the training data.
fig, ax = plt.subplots()
ax.plot(X_tot, y_tot, label="true", linewidth=3)
ax.plot(X_tot, y_pred, 'g-.',label="predicted", linewidth=4)
ax.legend()
# Created a red region to represent the points used in the training data.
ax.axvspan(X_tot[0], X_tot[dim], alpha=0.25, color='red')
plt.show()
# Stop the timer and calculate the total time needed.
end = timer()
print('Time: ', end-start)
# Solving ODEs with Deep Learning
The Universal Approximation Theorem states that a neural network can
approximate any function at a single hidden layer along with one input
and output layer to any given precision.
## Ordinary Differential Equations
An ordinary differential equation (ODE) is an equation involving functions having one variable.
In general, an ordinary differential equation looks like
<!-- Equation labels as ordinary links -->
<div id="ode"></div>
$$
\begin{equation} \label{ode} \tag{1}
f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right) = 0
\end{equation}
$$
where $g(x)$ is the function to find, and $g^{(n)}(x)$ is the $n$-th derivative of $g(x)$.
The $f\left(x, g(x), g'(x), g''(x), \, \dots \, , g^{(n)}(x)\right)$ is just a way to write that there is an expression involving $x$ and $g(x), \ g'(x), \ g''(x), \, \dots \, , \text{ and } g^{(n)}(x)$ on the left side of the equality sign in ([1](#ode)).
The highest order of derivative, that is the value of $n$, determines to the order of the equation.
The equation is referred to as a $n$-th order ODE.
Along with ([1](#ode)), some additional conditions of the function $g(x)$ are typically given
for the solution to be unique.
## The trial solution
Let the trial solution $g_t(x)$ be
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
g_t(x) = h_1(x) + h_2(x,N(x,P))
\label{_auto1} \tag{2}
\end{equation}
$$
where $h_1(x)$ is a function that makes $g_t(x)$ satisfy a given set
of conditions, $N(x,P)$ a neural network with weights and biases
described by $P$ and $h_2(x, N(x,P))$ some expression involving the
neural network. The role of the function $h_2(x, N(x,P))$, is to
ensure that the output from $N(x,P)$ is zero when $g_t(x)$ is
evaluated at the values of $x$ where the given conditions must be
satisfied. The function $h_1(x)$ should alone make $g_t(x)$ satisfy
the conditions.
But what about the network $N(x,P)$?
As described previously, an optimization method could be used to minimize the parameters of a neural network, that being its weights and biases, through backward propagation.
## Minimization process
For the minimization to be defined, we need to have a cost function at hand to minimize.
It is given that $f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right)$ should be equal to zero in ([1](#ode)).
We can choose to consider the mean squared error as the cost function for an input $x$.
Since we are looking at one input, the cost function is just $f$ squared.
The cost function $c\left(x, P \right)$ can therefore be expressed as
$$
C\left(x, P\right) = \big(f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right)\big)^2
$$
If $N$ inputs are given as a vector $\boldsymbol{x}$ with elements $x_i$ for $i = 1,\dots,N$,
the cost function becomes
<!-- Equation labels as ordinary links -->
<div id="cost"></div>
$$
\begin{equation} \label{cost} \tag{3}
C\left(\boldsymbol{x}, P\right) = \frac{1}{N} \sum_{i=1}^N \big(f\left(x_i, \, g(x_i), \, g'(x_i), \, g''(x_i), \, \dots \, , \, g^{(n)}(x_i)\right)\big)^2
\end{equation}
$$
The neural net should then find the parameters $P$ that minimizes the cost function in
([3](#cost)) for a set of $N$ training samples $x_i$.
## Minimizing the cost function using gradient descent and automatic differentiation
To perform the minimization using gradient descent, the gradient of $C\left(\boldsymbol{x}, P\right)$ is needed.
It might happen so that finding an analytical expression of the gradient of $C(\boldsymbol{x}, P)$ from ([3](#cost)) gets too messy, depending on which cost function one desires to use.
Luckily, there exists libraries that makes the job for us through automatic differentiation.
Automatic differentiation is a method of finding the derivatives numerically with very high precision.
## Example: Exponential decay
An exponential decay of a quantity $g(x)$ is described by the equation
<!-- Equation labels as ordinary links -->
<div id="solve_expdec"></div>
$$
\begin{equation} \label{solve_expdec} \tag{4}
g'(x) = -\gamma g(x)
\end{equation}
$$
with $g(0) = g_0$ for some chosen initial value $g_0$.
The analytical solution of ([4](#solve_expdec)) is
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
g(x) = g_0 \exp\left(-\gamma x\right)
\label{_auto2} \tag{5}
\end{equation}
$$
Having an analytical solution at hand, it is possible to use it to compare how well a neural network finds a solution of ([4](#solve_expdec)).
## The function to solve for
The program will use a neural network to solve
<!-- Equation labels as ordinary links -->
<div id="solveode"></div>
$$
\begin{equation} \label{solveode} \tag{6}
g'(x) = -\gamma g(x)
\end{equation}
$$
where $g(0) = g_0$ with $\gamma$ and $g_0$ being some chosen values.
In this example, $\gamma = 2$ and $g_0 = 10$.
## The trial solution
To begin with, a trial solution $g_t(t)$ must be chosen. A general trial solution for ordinary differential equations could be
$$
g_t(x, P) = h_1(x) + h_2(x, N(x, P))
$$
with $h_1(x)$ ensuring that $g_t(x)$ satisfies some conditions and $h_2(x,N(x, P))$ an expression involving $x$ and the output from the neural network $N(x,P)$ with $P $ being the collection of the weights and biases for each layer. For now, it is assumed that the network consists of one input layer, one hidden layer, and one output layer.
## Setup of Network
In this network, there are no weights and bias at the input layer, so $P = \{ P_{\text{hidden}}, P_{\text{output}} \}$.
If there are $N_{\text{hidden} }$ neurons in the hidden layer, then $P_{\text{hidden}}$ is a $N_{\text{hidden} } \times (1 + N_{\text{input}})$ matrix, given that there are $N_{\text{input}}$ neurons in the input layer.
The first column in $P_{\text{hidden} }$ represents the bias for each neuron in the hidden layer and the second column represents the weights for each neuron in the hidden layer from the input layer.
If there are $N_{\text{output} }$ neurons in the output layer, then $P_{\text{output}} $ is a $N_{\text{output} } \times (1 + N_{\text{hidden} })$ matrix.
Its first column represents the bias of each neuron and the remaining columns represents the weights to each neuron.
It is given that $g(0) = g_0$. The trial solution must fulfill this condition to be a proper solution of ([6](#solveode)). A possible way to ensure that $g_t(0, P) = g_0$, is to let $F(N(x,P)) = x \cdot N(x,P)$ and $A(x) = g_0$. This gives the following trial solution:
<!-- Equation labels as ordinary links -->
<div id="trial"></div>
$$
\begin{equation} \label{trial} \tag{7}
g_t(x, P) = g_0 + x \cdot N(x, P)
\end{equation}
$$
## Reformulating the problem
We wish that our neural network manages to minimize a given cost function.
A reformulation of out equation, ([6](#solveode)), must therefore be done,
such that it describes the problem a neural network can solve for.
The neural network must find the set of weights and biases $P$ such that the trial solution in ([7](#trial)) satisfies ([6](#solveode)).
The trial solution
$$
g_t(x, P) = g_0 + x \cdot N(x, P)
$$
has been chosen such that it already solves the condition $g(0) = g_0$. What remains, is to find $P$ such that
<!-- Equation labels as ordinary links -->
<div id="nnmin"></div>
$$
\begin{equation} \label{nnmin} \tag{8}
g_t'(x, P) = - \gamma g_t(x, P)
\end{equation}
$$
is fulfilled as *best as possible*.
## More technicalities
The left hand side and right hand side of ([8](#nnmin)) must be computed separately, and then the neural network must choose weights and biases, contained in $P$, such that the sides are equal as best as possible.
This means that the absolute or squared difference between the sides must be as close to zero, ideally equal to zero.
In this case, the difference squared shows to be an appropriate measurement of how erroneous the trial solution is with respect to $P$ of the neural network.
This gives the following cost function our neural network must solve for:
$$
\min_{P}\Big\{ \big(g_t'(x, P) - ( -\gamma g_t(x, P) \big)^2 \Big\}
$$
(the notation $\min_{P}\{ f(x, P) \}$ means that we desire to find $P$ that yields the minimum of $f(x, P)$)
or, in terms of weights and biases for the hidden and output layer in our network:
$$
\min_{P_{\text{hidden} }, \ P_{\text{output} }}\Big\{ \big(g_t'(x, \{ P_{\text{hidden} }, P_{\text{output} }\}) - ( -\gamma g_t(x, \{ P_{\text{hidden} }, P_{\text{output} }\}) \big)^2 \Big\}
$$
for an input value $x$.
## More details
If the neural network evaluates $g_t(x, P)$ at more values for $x$, say $N$ values $x_i$ for $i = 1, \dots, N$, then the *total* error to minimize becomes
<!-- Equation labels as ordinary links -->
<div id="min"></div>
$$
\begin{equation} \label{min} \tag{9}
\min_{P}\Big\{\frac{1}{N} \sum_{i=1}^N \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2 \Big\}
\end{equation}
$$
Letting $\boldsymbol{x}$ be a vector with elements $x_i$ and $C(\boldsymbol{x}, P) = \frac{1}{N} \sum_i \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2$ denote the cost function, the minimization problem that our network must solve, becomes
$$
\min_{P} C(\boldsymbol{x}, P)
$$
In terms of $P_{\text{hidden} }$ and $P_{\text{output} }$, this could also be expressed as
$$
\min_{P_{\text{hidden} }, \ P_{\text{output} }} C(\boldsymbol{x}, \{P_{\text{hidden} }, P_{\text{output} }\})
$$
## A possible implementation of a neural network
For simplicity, it is assumed that the input is an array $\boldsymbol{x} = (x_1, \dots, x_N)$ with $N$ elements. It is at these points the neural network should find $P$ such that it fulfills ([9](#min)).
First, the neural network must feed forward the inputs.
This means that $\boldsymbol{x}s$ must be passed through an input layer, a hidden layer and a output layer. The input layer in this case, does not need to process the data any further.
The input layer will consist of $N_{\text{input} }$ neurons, passing its element to each neuron in the hidden layer. The number of neurons in the hidden layer will be $N_{\text{hidden} }$.
## Technicalities
For the $i$-th in the hidden layer with weight $w_i^{\text{hidden} }$ and bias $b_i^{\text{hidden} }$, the weighting from the $j$-th neuron at the input layer is:
$$
\begin{aligned}
z_{i,j}^{\text{hidden}} &= b_i^{\text{hidden}} + w_i^{\text{hidden}}x_j \\
&=
\begin{pmatrix}
b_i^{\text{hidden}} & w_i^{\text{hidden}}
\end{pmatrix}
\begin{pmatrix}
1 \\
x_j
\end{pmatrix}
\end{aligned}
$$
## Final technicalities I
The result after weighting the inputs at the $i$-th hidden neuron can be written as a vector:
$$
\begin{aligned}
\boldsymbol{z}_{i}^{\text{hidden}} &= \Big( b_i^{\text{hidden}} + w_i^{\text{hidden}}x_1 , \ b_i^{\text{hidden}} + w_i^{\text{hidden}} x_2, \ \dots \, , \ b_i^{\text{hidden}} + w_i^{\text{hidden}} x_N\Big) \\
&=
\begin{pmatrix}
b_i^{\text{hidden}} & w_i^{\text{hidden}}
\end{pmatrix}
\begin{pmatrix}
1 & 1 & \dots & 1 \\
x_1 & x_2 & \dots & x_N
\end{pmatrix} \\
&= \boldsymbol{p}_{i, \text{hidden}}^T X
\end{aligned}
$$
## Final technicalities II
The vector $\boldsymbol{p}_{i, \text{hidden}}^T$ constitutes each row in $P_{\text{hidden} }$, which contains the weights for the neural network to minimize according to ([9](#min)).
After having found $\boldsymbol{z}_{i}^{\text{hidden}} $ for every $i$-th neuron within the hidden layer, the vector will be sent to an activation function $a_i(\boldsymbol{z})$.
In this example, the sigmoid function has been chosen to be the activation function for each hidden neuron:
$$
f(z) = \frac{1}{1 + \exp{(-z)}}
$$
It is possible to use other activations functions for the hidden layer also.
The output $\boldsymbol{x}_i^{\text{hidden}}$ from each $i$-th hidden neuron is:
$$
\boldsymbol{x}_i^{\text{hidden} } = f\big( \boldsymbol{z}_{i}^{\text{hidden}} \big)
$$
The outputs $\boldsymbol{x}_i^{\text{hidden} } $ are then sent to the output layer.
The output layer consists of one neuron in this case, and combines the
output from each of the neurons in the hidden layers. The output layer
combines the results from the hidden layer using some weights $w_i^{\text{output}}$
and biases $b_i^{\text{output}}$. In this case,
it is assumes that the number of neurons in the output layer is one.
## Final technicalities III
The procedure of weighting the output neuron $j$ in the hidden layer to the $i$-th neuron in the output layer is similar as for the hidden layer described previously.
$$
\begin{aligned}
z_{1,j}^{\text{output}} & =
\begin{pmatrix}
b_1^{\text{output}} & \boldsymbol{w}_1^{\text{output}}
\end{pmatrix}
\begin{pmatrix}
1 \\
\boldsymbol{x}_j^{\text{hidden}}
\end{pmatrix}
\end{aligned}
$$
## Final technicalities IV
Expressing $z_{1,j}^{\text{output}}$ as a vector gives the following way of weighting the inputs from the hidden layer:
$$
\boldsymbol{z}_{1}^{\text{output}} =
\begin{pmatrix}
b_1^{\text{output}} & \boldsymbol{w}_1^{\text{output}}
\end{pmatrix}
\begin{pmatrix}
1 & 1 & \dots & 1 \\
\boldsymbol{x}_1^{\text{hidden}} & \boldsymbol{x}_2^{\text{hidden}} & \dots & \boldsymbol{x}_N^{\text{hidden}}
\end{pmatrix}
$$
In this case we seek a continuous range of values since we are approximating a function. This means that after computing $\boldsymbol{z}_{1}^{\text{output}}$ the neural network has finished its feed forward step, and $\boldsymbol{z}_{1}^{\text{output}}$ is the final output of the network.
## Back propagation
The next step is to decide how the parameters should be changed such that they minimize the cost function.
The chosen cost function for this problem is
$$
C(\boldsymbol{x}, P) = \frac{1}{N} \sum_i \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2
$$
In order to minimize the cost function, an optimization method must be chosen.
Here, gradient descent with a constant step size has been chosen.
## Gradient descent
The idea of the gradient descent algorithm is to update parameters in
a direction where the cost function decreases goes to a minimum.
In general, the update of some parameters $\boldsymbol{\omega}$ given a cost
function defined by some weights $\boldsymbol{\omega}$, $C(\boldsymbol{x},
\boldsymbol{\omega})$, goes as follows:
$$
\boldsymbol{\omega}_{\text{new} } = \boldsymbol{\omega} - \lambda \nabla_{\boldsymbol{\omega}} C(\boldsymbol{x}, \boldsymbol{\omega})
$$
for a number of iterations or until $ \big|\big| \boldsymbol{\omega}_{\text{new} } - \boldsymbol{\omega} \big|\big|$ becomes smaller than some given tolerance.
The value of $\lambda$ decides how large steps the algorithm must take
in the direction of $ \nabla_{\boldsymbol{\omega}} C(\boldsymbol{x}, \boldsymbol{\omega})$.
The notation $\nabla_{\boldsymbol{\omega}}$ express the gradient with respect
to the elements in $\boldsymbol{\omega}$.
In our case, we have to minimize the cost function $C(\boldsymbol{x}, P)$ with
respect to the two sets of weights and biases, that is for the hidden
layer $P_{\text{hidden} }$ and for the output layer $P_{\text{output}
}$ .
This means that $P_{\text{hidden} }$ and $P_{\text{output} }$ is updated by
$$
\begin{aligned}
P_{\text{hidden},\text{new}} &= P_{\text{hidden}} - \lambda \nabla_{P_{\text{hidden}}} C(\boldsymbol{x}, P) \\
P_{\text{output},\text{new}} &= P_{\text{output}} - \lambda \nabla_{P_{\text{output}}} C(\boldsymbol{x}, P)
\end{aligned}
$$
## The code for solving the ODE
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
# Assuming one input, hidden, and output layer
def neural_network(params, x):
# Find the weights (including and biases) for the hidden and output layer.
# Assume that params is a list of parameters for each layer.
# The biases are the first element for each array in params,
# and the weights are the remaning elements in each array in params.
w_hidden = params[0]
w_output = params[1]
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
## Hidden layer:
# Add a row of ones to include bias
x_input = np.concatenate((np.ones((1,num_values)), x_input ), axis = 0)
z_hidden = np.matmul(w_hidden, x_input)
x_hidden = sigmoid(z_hidden)
## Output layer:
# Include bias:
x_hidden = np.concatenate((np.ones((1,num_values)), x_hidden ), axis = 0)
z_output = np.matmul(w_output, x_hidden)
x_output = z_output
return x_output
# The trial solution using the deep neural network:
def g_trial(x,params, g0 = 10):
return g0 + x*neural_network(params,x)
# The right side of the ODE:
def g(x, g_trial, gamma = 2):
return -gamma*g_trial
# The cost function:
def cost_function(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial(x,P)
# Find the derivative w.r.t x of the neural network
d_net_out = elementwise_grad(neural_network,1)(P,x)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial,0)(x,P)
# The right side of the ODE
func = g(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum / np.size(err_sqr)
# Solve the exponential decay ODE using neural network with one input, hidden, and output layer
def solve_ode_neural_network(x, num_neurons_hidden, num_iter, lmb):
## Set up initial weights and biases
# For the hidden layer
p0 = npr.randn(num_neurons_hidden, 2 )
# For the output layer
p1 = npr.randn(1, num_neurons_hidden + 1 ) # +1 since bias is included
P = [p0, p1]
print('Initial cost: %g'%cost_function(P, x))
## Start finding the optimal weights using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of two arrays;
# one for the gradient w.r.t P_hidden and
# one for the gradient w.r.t P_output
cost_grad = cost_function_grad(P, x)
P[0] = P[0] - lmb * cost_grad[0]
P[1] = P[1] - lmb * cost_grad[1]
print('Final cost: %g'%cost_function(P, x))
return P
def g_analytic(x, gamma = 2, g0 = 10):
return g0*np.exp(-gamma*x)
# Solve the given problem
if __name__ == '__main__':
# Set seed such that the weight are initialized
# with same weights and biases for every run.
npr.seed(15)
## Decide the vales of arguments to the function to solve
N = 10
x = np.linspace(0, 1, N)
## Set up the initial parameters
num_hidden_neurons = 10
num_iter = 10000
lmb = 0.001
# Use the network
P = solve_ode_neural_network(x, num_hidden_neurons, num_iter, lmb)
# Print the deviation from the trial solution and true solution
res = g_trial(x,P)
res_analytical = g_analytic(x)
print('Max absolute difference: %g'%np.max(np.abs(res - res_analytical)))
# Plot the results
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, res_analytical)
plt.plot(x, res[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
plt.show()
## The network with one input layer, specified number of hidden layers, and one output layer
It is also possible to extend the construction of our network into a more general one, allowing the network to contain more than one hidden layers.
The number of neurons within each hidden layer are given as a list of integers in the program below.
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
# The neural network with one input layer and one output layer,
# but with number of hidden layers specified by the user.
def deep_neural_network(deep_params, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consists of
# parameters to all the hidden
# layers AND the output layer.
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
# The trial solution using the deep neural network:
def g_trial_deep(x,params, g0 = 10):
return g0 + x*deep_neural_network(params, x)
# The right side of the ODE:
def g(x, g_trial, gamma = 2):
return -gamma*g_trial
# The same cost function as before, but calls deep_neural_network instead.
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the neural network
d_net_out = elementwise_grad(deep_neural_network,1)(P,x)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial_deep,0)(x,P)
# The right side of the ODE
func = g(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum / np.size(err_sqr)
# Solve the exponential decay ODE using neural network with one input and one output layer,
# but with specified number of hidden layers from the user.
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# The number of elements in the list num_hidden_neurons thus represents
# the number of hidden layers.
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weights and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weights using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
def g_analytic(x, gamma = 2, g0 = 10):
return g0*np.exp(-gamma*x)
# Solve the given problem
if __name__ == '__main__':
npr.seed(15)
## Decide the vales of arguments to the function to solve
N = 10
x = np.linspace(0, 1, N)
## Set up the initial parameters
num_hidden_neurons = np.array([10,10])
num_iter = 10000
lmb = 0.001
P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
res = g_trial_deep(x,P)
res_analytical = g_analytic(x)
plt.figure(figsize=(10,10))
plt.title('Performance of a deep neural network solving an ODE compared to the analytical solution')
plt.plot(x, res_analytical)
plt.plot(x, res[0,:])
plt.legend(['analytical','dnn'])
plt.ylabel('g(x)')
plt.show()
## Example: Population growth
A logistic model of population growth assumes that a population converges toward an equilibrium.
The population growth can be modeled by
<!-- Equation labels as ordinary links -->
<div id="log"></div>
$$
\begin{equation} \label{log} \tag{10}
g'(t) = \alpha g(t)(A - g(t))
\end{equation}
$$
where $g(t)$ is the population density at time $t$, $\alpha > 0$ the growth rate and $A > 0$ is the maximum population number in the environment.
Also, at $t = 0$ the population has the size $g(0) = g_0$, where $g_0$ is some chosen constant.
In this example, similar network as for the exponential decay using Autograd has been used to solve the equation. However, as the implementation might suffer from e.g numerical instability
and high execution time (this might be more apparent in the examples solving PDEs),
using a library like TensorFlow is recommended.
Here, we stay with a more simple approach and implement for comparison, the simple forward Euler method.
## Setting up the problem
Here, we will model a population $g(t)$ in an environment having carrying capacity $A$.
The population follows the model
<!-- Equation labels as ordinary links -->
<div id="solveode_population"></div>
$$
\begin{equation} \label{solveode_population} \tag{11}
g'(t) = \alpha g(t)(A - g(t))
\end{equation}
$$
where $g(0) = g_0$.
In this example, we let $\alpha = 2$, $A = 1$, and $g_0 = 1.2$.
## The trial solution
We will get a slightly different trial solution, as the boundary conditions are different
compared to the case for exponential decay.
A possible trial solution satisfying the condition $g(0) = g_0$ could be
$$
h_1(t) = g_0 + t \cdot N(t,P)
$$
with $N(t,P)$ being the output from the neural network with weights and biases for each layer collected in the set $P$.
The analytical solution is
$$
g(t) = \frac{Ag_0}{g_0 + (A - g_0)\exp(-\alpha A t)}
$$
## The program using Autograd
The network will be the similar as for the exponential decay example, but with some small modifications for our problem.
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
# Function to get the parameters.
# Done such that one can easily change the paramaters after one's liking.
def get_parameters():
alpha = 2
A = 1
g0 = 1.2
return alpha, A, g0
def deep_neural_network(P, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(P) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = P[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = P[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial_deep,0)(x,P)
# The right side of the ODE
func = f(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum / np.size(err_sqr)
# The right side of the ODE:
def f(x, g_trial):
alpha,A, g0 = get_parameters()
return alpha*g_trial*(A - g_trial)
# The trial solution using the deep neural network:
def g_trial_deep(x, params):
alpha,A, g0 = get_parameters()
return g0 + x*deep_neural_network(params,x)
# The analytical solution:
def g_analytic(t):
alpha,A, g0 = get_parameters()
return A*g0/(g0 + (A - g0)*np.exp(-alpha*A*t))
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nt = 10
T = 1
t = np.linspace(0,T, Nt)
## Set up the initial parameters
num_hidden_neurons = [100, 50, 25]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(t,P)
g_analytical = g_analytic(t)
# Find the maximum absolute difference between the solutons:
diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))
print("The max absolute difference between the solutions is: %g"%diff_ag)
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(t, g_analytical)
plt.plot(t, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('t')
plt.ylabel('g(t)')
plt.show()
## Using forward Euler to solve the ODE
A straightforward way of solving an ODE numerically, is to use Euler's method.
Euler's method uses Taylor series to approximate the value at a function $f$ at a step $\Delta x$ from $x$:
$$
f(x + \Delta x) \approx f(x) + \Delta x f'(x)
$$
In our case, using Euler's method to approximate the value of $g$ at a step $\Delta t$ from $t$ yields
$$
\begin{aligned}
g(t + \Delta t) &\approx g(t) + \Delta t g'(t) \\
&= g(t) + \Delta t \big(\alpha g(t)(A - g(t))\big)
\end{aligned}
$$
along with the condition that $g(0) = g_0$.
Let $t_i = i \cdot \Delta t$ where $\Delta t = \frac{T}{N_t-1}$ where $T$ is the final time our solver must solve for and $N_t$ the number of values for $t \in [0, T]$ for $i = 0, \dots, N_t-1$.
For $i \geq 1$, we have that
$$
\begin{aligned}
t_i &= i\Delta t \\
&= (i - 1)\Delta t + \Delta t \\
&= t_{i-1} + \Delta t
\end{aligned}
$$
Now, if $g_i = g(t_i)$ then
<!-- Equation labels as ordinary links -->
<div id="odenum"></div>
$$
\begin{equation}
\begin{aligned}
g_i &= g(t_i) \\
&= g(t_{i-1} + \Delta t) \\
&\approx g(t_{i-1}) + \Delta t \big(\alpha g(t_{i-1})(A - g(t_{i-1}))\big) \\
&= g_{i-1} + \Delta t \big(\alpha g_{i-1}(A - g_{i-1})\big)
\end{aligned}
\end{equation} \label{odenum} \tag{12}
$$
for $i \geq 1$ and $g_0 = g(t_0) = g(0) = g_0$.
Equation ([12](#odenum)) could be implemented in the following way,
extending the program that uses the network using Autograd:
# Assume that all function definitions from the example program using Autograd
# are located here.
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nt = 10
T = 1
t = np.linspace(0,T, Nt)
## Set up the initial parameters
num_hidden_neurons = [100,50,25]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(t,P)
g_analytical = g_analytic(t)
# Find the maximum absolute difference between the solutons:
diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))
print("The max absolute difference between the solutions is: %g"%diff_ag)
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(t, g_analytical)
plt.plot(t, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('t')
plt.ylabel('g(t)')
## Find an approximation to the funtion using forward Euler
alpha, A, g0 = get_parameters()
dt = T/(Nt - 1)
# Perform forward Euler to solve the ODE
g_euler = np.zeros(Nt)
g_euler[0] = g0
for i in range(1,Nt):
g_euler[i] = g_euler[i-1] + dt*(alpha*g_euler[i-1]*(A - g_euler[i-1]))
# Print the errors done by each method
diff1 = np.max(np.abs(g_euler - g_analytical))
diff2 = np.max(np.abs(g_dnn_ag[0,:] - g_analytical))
print('Max absolute difference between Euler method and analytical: %g'%diff1)
print('Max absolute difference between deep neural network and analytical: %g'%diff2)
# Plot results
plt.figure(figsize=(10,10))
plt.plot(t,g_euler)
plt.plot(t,g_analytical)
plt.plot(t,g_dnn_ag[0,:])
plt.legend(['euler','analytical','dnn'])
plt.xlabel('Time t')
plt.ylabel('g(t)')
plt.show()
## Example: Solving the one dimensional Poisson equation
The Poisson equation for $g(x)$ in one dimension is
<!-- Equation labels as ordinary links -->
<div id="poisson"></div>
$$
\begin{equation} \label{poisson} \tag{13}
-g''(x) = f(x)
\end{equation}
$$
where $f(x)$ is a given function for $x \in (0,1)$.
The conditions that $g(x)$ is chosen to fulfill, are
$$
\begin{align*}
g(0) &= 0 \\
g(1) &= 0
\end{align*}
$$
This equation can be solved numerically using programs where e.g Autograd and TensorFlow are used.
The results from the networks can then be compared to the analytical solution.
In addition, it could be interesting to see how a typical method for numerically solving second order ODEs compares to the neural networks.
## The specific equation to solve for
Here, the function $g(x)$ to solve for follows the equation
$$
-g''(x) = f(x),\qquad x \in (0,1)
$$
where $f(x)$ is a given function, along with the chosen conditions
<!-- Equation labels as ordinary links -->
<div id="cond"></div>
$$
\begin{aligned}
g(0) = g(1) = 0
\end{aligned}\label{cond} \tag{14}
$$
In this example, we consider the case when $f(x) = (3x + x^2)\exp(x)$.
For this case, a possible trial solution satisfying the conditions could be
$$
g_t(x) = x \cdot (1-x) \cdot N(P,x)
$$
The analytical solution for this problem is
$$
g(x) = x(1 - x)\exp(x)
$$
## Solving the equation using Autograd
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
## Set up the cost function specified for this Poisson equation:
# The right side of the ODE
def f(x):
return (3*x + x**2)*np.exp(x)
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the trial function
d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)
right_side = f(x)
err_sqr = (-d2_g_t - right_side)**2
cost_sum = np.sum(err_sqr)
return cost_sum/np.size(err_sqr)
# The trial solution:
def g_trial_deep(x,P):
return x*(1-x)*deep_neural_network(P,x)
# The analytic solution;
def g_analytic(x):
return x*(1-x)*np.exp(x)
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nx = 10
x = np.linspace(0,1, Nx)
## Set up the initial parameters
num_hidden_neurons = [200,100]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(x,P)
g_analytical = g_analytic(x)
# Find the maximum absolute difference between the solutons:
max_diff = np.max(np.abs(g_dnn_ag - g_analytical))
print("The max absolute difference between the solutions is: %g"%max_diff)
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, g_analytical)
plt.plot(x, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
plt.show()
## Comparing with a numerical scheme
The Poisson equation is possible to solve using Taylor series to approximate the second derivative.
Using Taylor series, the second derivative can be expressed as
$$
g''(x) = \frac{g(x + \Delta x) - 2g(x) + g(x-\Delta x)}{\Delta x^2} + E_{\Delta x}(x)
$$
where $\Delta x$ is a small step size and $E_{\Delta x}(x)$ being the error term.
Looking away from the error terms gives an approximation to the second derivative:
<!-- Equation labels as ordinary links -->
<div id="approx"></div>
$$
\begin{equation} \label{approx} \tag{15}
g''(x) \approx \frac{g(x + \Delta x) - 2g(x) + g(x-\Delta x)}{\Delta x^2}
\end{equation}
$$
If $x_i = i \Delta x = x_{i-1} + \Delta x$ and $g_i = g(x_i)$ for $i = 1,\dots N_x - 2$ with $N_x$ being the number of values for $x$, ([15](#approx)) becomes
$$
\begin{aligned}
g''(x_i) &\approx \frac{g(x_i + \Delta x) - 2g(x_i) + g(x_i -\Delta x)}{\Delta x^2} \\
&= \frac{g_{i+1} - 2g_i + g_{i-1}}{\Delta x^2}
\end{aligned}
$$
Since we know from our problem that
$$
\begin{aligned}
-g''(x) &= f(x) \\
&= (3x + x^2)\exp(x)
\end{aligned}
$$
along with the conditions $g(0) = g(1) = 0$,
the following scheme can be used to find an approximate solution for $g(x)$ numerically:
<!-- Equation labels as ordinary links -->
<div id="odesys"></div>
$$
\begin{equation}
\begin{aligned}
-\Big( \frac{g_{i+1} - 2g_i + g_{i-1}}{\Delta x^2} \Big) &= f(x_i) \\
-g_{i+1} + 2g_i - g_{i-1} &= \Delta x^2 f(x_i)
\end{aligned}
\end{equation} \label{odesys} \tag{16}
$$
for $i = 1, \dots, N_x - 2$ where $g_0 = g_{N_x - 1} = 0$ and $f(x_i) = (3x_i + x_i^2)\exp(x_i)$, which is given for our specific problem.
The equation can be rewritten into a matrix equation:
$$
\begin{aligned}
\begin{pmatrix}
2 & -1 & 0 & \dots & 0 \\
-1 & 2 & -1 & \dots & 0 \\
\vdots & & \ddots & & \vdots \\
0 & \dots & -1 & 2 & -1 \\
0 & \dots & 0 & -1 & 2\\
\end{pmatrix}
\begin{pmatrix}
g_1 \\
g_2 \\
\vdots \\
g_{N_x - 3} \\
g_{N_x - 2}
\end{pmatrix}
&=
\Delta x^2
\begin{pmatrix}
f(x_1) \\
f(x_2) \\
\vdots \\
f(x_{N_x - 3}) \\
f(x_{N_x - 2})
\end{pmatrix} \\
\boldsymbol{A}\boldsymbol{g} &= \boldsymbol{f},
\end{aligned}
$$
which makes it possible to solve for the vector $\boldsymbol{g}$.
## Setting up the code
We can then compare the result from this numerical scheme with the output from our network using Autograd:
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
## Set up the cost function specified for this Poisson equation:
# The right side of the ODE
def f(x):
return (3*x + x**2)*np.exp(x)
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the trial function
d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)
right_side = f(x)
err_sqr = (-d2_g_t - right_side)**2
cost_sum = np.sum(err_sqr)
return cost_sum/np.size(err_sqr)
# The trial solution:
def g_trial_deep(x,P):
return x*(1-x)*deep_neural_network(P,x)
# The analytic solution;
def g_analytic(x):
return x*(1-x)*np.exp(x)
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nx = 10
x = np.linspace(0,1, Nx)
## Set up the initial parameters
num_hidden_neurons = [200,100]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(x,P)
g_analytical = g_analytic(x)
# Find the maximum absolute difference between the solutons:
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, g_analytical)
plt.plot(x, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
## Perform the computation using the numerical scheme
dx = 1/(Nx - 1)
# Set up the matrix A
A = np.zeros((Nx-2,Nx-2))
A[0,0] = 2
A[0,1] = -1
for i in range(1,Nx-3):
A[i,i-1] = -1
A[i,i] = 2
A[i,i+1] = -1
A[Nx - 3, Nx - 4] = -1
A[Nx - 3, Nx - 3] = 2
# Set up the vector f
f_vec = dx**2 * f(x[1:-1])
# Solve the equation
g_res = np.linalg.solve(A,f_vec)
g_vec = np.zeros(Nx)
g_vec[1:-1] = g_res
# Print the differences between each method
max_diff1 = np.max(np.abs(g_dnn_ag - g_analytical))
max_diff2 = np.max(np.abs(g_vec - g_analytical))
print("The max absolute difference between the analytical solution and DNN Autograd: %g"%max_diff1)
print("The max absolute difference between the analytical solution and numerical scheme: %g"%max_diff2)
# Plot the results
plt.figure(figsize=(10,10))
plt.plot(x,g_vec)
plt.plot(x,g_analytical)
plt.plot(x,g_dnn_ag[0,:])
plt.legend(['numerical scheme','analytical','dnn'])
plt.show()
## Partial Differential Equations
A partial differential equation (PDE) has a solution here the function
is defined by multiple variables. The equation may involve all kinds
of combinations of which variables the function is differentiated with
respect to.
In general, a partial differential equation for a function $g(x_1,\dots,x_N)$ with $N$ variables may be expressed as
<!-- Equation labels as ordinary links -->
<div id="PDE"></div>
$$
\begin{equation} \label{PDE} \tag{17}
f\left(x_1, \, \dots \, , x_N, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1}, \dots , \frac{\partial g(x_1,\dots,x_N) }{\partial x_N}, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(x_1,\dots,x_N) }{\partial x_N^n} \right) = 0
\end{equation}
$$
where $f$ is an expression involving all kinds of possible mixed derivatives of $g(x_1,\dots,x_N)$ up to an order $n$. In order for the solution to be unique, some additional conditions must also be given.
## Type of problem
The problem our network must solve for, is similar to the ODE case.
We must have a trial solution $g_t$ at hand.
For instance, the trial solution could be expressed as
$$
\begin{align*}
g_t(x_1,\dots,x_N) = h_1(x_1,\dots,x_N) + h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P))
\end{align*}
$$
where $h_1(x_1,\dots,x_N)$ is a function that ensures $g_t(x_1,\dots,x_N)$ satisfies some given conditions.
The neural network $N(x_1,\dots,x_N,P)$ has weights and biases described by $P$ and $h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P))$ is an expression using the output from the neural network in some way.
The role of the function $h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P))$, is to ensure that the output of $N(x_1,\dots,x_N,P)$ is zero when $g_t(x_1,\dots,x_N)$ is evaluated at the values of $x_1,\dots,x_N$ where the given conditions must be satisfied. The function $h_1(x_1,\dots,x_N)$ should alone make $g_t(x_1,\dots,x_N)$ satisfy the conditions.
## Network requirements
The network tries then the minimize the cost function following the
same ideas as described for the ODE case, but now with more than one
variables to consider. The concept still remains the same; find a set
of parameters $P$ such that the expression $f$ in ([17](#PDE)) is as
close to zero as possible.
As for the ODE case, the cost function is the mean squared error that
the network must try to minimize. The cost function for the network to
minimize is
$$
C\left(x_1, \dots, x_N, P\right) = \left( f\left(x_1, \, \dots \, , x_N, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1}, \dots , \frac{\partial g(x_1,\dots,x_N) }{\partial x_N}, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(x_1,\dots,x_N) }{\partial x_N^n} \right) \right)^2
$$
## More details
If we let $\boldsymbol{x} = \big( x_1, \dots, x_N \big)$ be an array containing the values for $x_1, \dots, x_N$ respectively, the cost function can be reformulated into the following:
$$
C\left(\boldsymbol{x}, P\right) = f\left( \left( \boldsymbol{x}, \frac{\partial g(\boldsymbol{x}) }{\partial x_1}, \dots , \frac{\partial g(\boldsymbol{x}) }{\partial x_N}, \frac{\partial g(\boldsymbol{x}) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(\boldsymbol{x}) }{\partial x_N^n} \right) \right)^2
$$
If we also have $M$ different sets of values for $x_1, \dots, x_N$, that is $\boldsymbol{x}_i = \big(x_1^{(i)}, \dots, x_N^{(i)}\big)$ for $i = 1,\dots,M$ being the rows in matrix $X$, the cost function can be generalized into
$$
C\left(X, P \right) = \sum_{i=1}^M f\left( \left( \boldsymbol{x}_i, \frac{\partial g(\boldsymbol{x}_i) }{\partial x_1}, \dots , \frac{\partial g(\boldsymbol{x}_i) }{\partial x_N}, \frac{\partial g(\boldsymbol{x}_i) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(\boldsymbol{x}_i) }{\partial x_N^n} \right) \right)^2.
$$
## Example: The diffusion equation
In one spatial dimension, the equation reads
$$
\frac{\partial g(x,t)}{\partial t} = \frac{\partial^2 g(x,t)}{\partial x^2}
$$
where a possible choice of conditions are
$$
\begin{align*}
g(0,t) &= 0 ,\qquad t \geq 0 \\
g(1,t) &= 0, \qquad t \geq 0 \\
g(x,0) &= u(x),\qquad x\in [0,1]
\end{align*}
$$
with $u(x)$ being some given function.
## Defining the problem
For this case, we want to find $g(x,t)$ such that
<!-- Equation labels as ordinary links -->
<div id="diffonedim"></div>
$$
\begin{equation}
\frac{\partial g(x,t)}{\partial t} = \frac{\partial^2 g(x,t)}{\partial x^2}
\end{equation} \label{diffonedim} \tag{18}
$$
and
$$
\begin{align*}
g(0,t) &= 0 ,\qquad t \geq 0 \\
g(1,t) &= 0, \qquad t \geq 0 \\
g(x,0) &= u(x),\qquad x\in [0,1]
\end{align*}
$$
with $u(x) = \sin(\pi x)$.
First, let us set up the deep neural network.
The deep neural network will follow the same structure as discussed in the examples solving the ODEs.
First, we will look into how Autograd could be used in a network tailored to solve for bivariate functions.
## Setting up the network using Autograd
The only change to do here, is to extend our network such that
functions of multiple parameters are correctly handled. In this case
we have two variables in our function to solve for, that is time $t$
and position $x$. The variables will be represented by a
one-dimensional array in the program. The program will evaluate the
network at each possible pair $(x,t)$, given an array for the desired
$x$-values and $t$-values to approximate the solution at.
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# x is now a point and a 1D numpy array; make it a column vector
num_coordinates = np.size(x,0)
x = x.reshape(num_coordinates,-1)
num_points = np.size(x,1)
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assume that the input layer does nothing to the input x
x_input = x
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output[0][0]
## Setting up the network using Autograd; The trial solution
The cost function must then iterate through the given arrays
containing values for $x$ and $t$, defines a point $(x,t)$ the deep
neural network and the trial solution is evaluated at, and then finds
the Jacobian of the trial solution.
A possible trial solution for this PDE is
$$
g_t(x,t) = h_1(x,t) + x(1-x)tN(x,t,P)
$$
with $A(x,t)$ being a function ensuring that $g_t(x,t)$ satisfies our given conditions, and $N(x,t,P)$ being the output from the deep neural network using weights and biases for each layer from $P$.
To fulfill the conditions, $A(x,t)$ could be:
$$
h_1(x,t) = (1-t)\Big(u(x) - \big((1-x)u(0) + x u(1)\big)\Big) = (1-t)u(x) = (1-t)\sin(\pi x)
$$
since $(0) = u(1) = 0$ and $u(x) = \sin(\pi x)$.
## Why the jacobian?
The Jacobian is used because the program must find the derivative of
the trial solution with respect to $x$ and $t$.
This gives the necessity of computing the Jacobian matrix, as we want
to evaluate the gradient with respect to $x$ and $t$ (note that the
Jacobian of a scalar-valued multivariate function is simply its
gradient).
In Autograd, the differentiation is by default done with respect to
the first input argument of your Python function. Since the points is
an array representing $x$ and $t$, the Jacobian is calculated using
the values of $x$ and $t$.
To find the second derivative with respect to $x$ and $t$, the
Jacobian can be found for the second time. The result is a Hessian
matrix, which is the matrix containing all the possible second order
mixed derivatives of $g(x,t)$.
# Set up the trial function:
def u(x):
return np.sin(np.pi*x)
def g_trial(point,P):
x,t = point
return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)
# The right side of the ODE:
def f(point):
return 0.
# The cost function:
def cost_function(P, x, t):
cost_sum = 0
g_t_jacobian_func = jacobian(g_trial)
g_t_hessian_func = hessian(g_trial)
for x_ in x:
for t_ in t:
point = np.array([x_,t_])
g_t = g_trial(point,P)
g_t_jacobian = g_t_jacobian_func(point,P)
g_t_hessian = g_t_hessian_func(point,P)
g_t_dt = g_t_jacobian[1]
g_t_d2x = g_t_hessian[0][0]
func = f(point)
err_sqr = ( (g_t_dt - g_t_d2x) - func)**2
cost_sum += err_sqr
return cost_sum
## Setting up the network using Autograd; The full program
Having set up the network, along with the trial solution and cost function, we can now see how the deep neural network performs by comparing the results to the analytical solution.
The analytical solution of our problem is
$$
g(x,t) = \exp(-\pi^2 t)\sin(\pi x)
$$
A possible way to implement a neural network solving the PDE, is given below.
Be aware, though, that it is fairly slow for the parameters used.
A better result is possible, but requires more iterations, and thus longer time to complete.
Indeed, the program below is not optimal in its implementation, but rather serves as an example on how to implement and use a neural network to solve a PDE.
Using TensorFlow results in a much better execution time. Try it!
import autograd.numpy as np
from autograd import jacobian,hessian,grad
import autograd.numpy.random as npr
from matplotlib import cm
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
## Set up the network
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# x is now a point and a 1D numpy array; make it a column vector
num_coordinates = np.size(x,0)
x = x.reshape(num_coordinates,-1)
num_points = np.size(x,1)
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assume that the input layer does nothing to the input x
x_input = x
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output[0][0]
## Define the trial solution and cost function
def u(x):
return np.sin(np.pi*x)
def g_trial(point,P):
x,t = point
return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)
# The right side of the ODE:
def f(point):
return 0.
# The cost function:
def cost_function(P, x, t):
cost_sum = 0
g_t_jacobian_func = jacobian(g_trial)
g_t_hessian_func = hessian(g_trial)
for x_ in x:
for t_ in t:
point = np.array([x_,t_])
g_t = g_trial(point,P)
g_t_jacobian = g_t_jacobian_func(point,P)
g_t_hessian = g_t_hessian_func(point,P)
g_t_dt = g_t_jacobian[1]
g_t_d2x = g_t_hessian[0][0]
func = f(point)
err_sqr = ( (g_t_dt - g_t_d2x) - func)**2
cost_sum += err_sqr
return cost_sum /( np.size(x)*np.size(t) )
## For comparison, define the analytical solution
def g_analytic(point):
x,t = point
return np.exp(-np.pi**2*t)*np.sin(np.pi*x)
## Set up a function for training the network to solve for the equation
def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):
## Set up initial weigths and biases
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: ',cost_function(P, x, t))
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
cost_grad = cost_function_grad(P, x , t)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_grad[l]
print('Final cost: ',cost_function(P, x, t))
return P
if __name__ == '__main__':
### Use the neural network:
npr.seed(15)
## Decide the vales of arguments to the function to solve
Nx = 10; Nt = 10
x = np.linspace(0, 1, Nx)
t = np.linspace(0,1,Nt)
## Set up the parameters for the network
num_hidden_neurons = [100, 25]
num_iter = 250
lmb = 0.01
P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)
## Store the results
g_dnn_ag = np.zeros((Nx, Nt))
G_analytical = np.zeros((Nx, Nt))
for i,x_ in enumerate(x):
for j, t_ in enumerate(t):
point = np.array([x_, t_])
g_dnn_ag[i,j] = g_trial(point,P)
G_analytical[i,j] = g_analytic(point)
# Find the map difference between the analytical and the computed solution
diff_ag = np.abs(g_dnn_ag - G_analytical)
print('Max absolute difference between the analytical solution and the network: %g'%np.max(diff_ag))
## Plot the solutions in two dimensions, that being in position and time
T,X = np.meshgrid(t,x)
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))
s = ax.plot_surface(T,X,g_dnn_ag,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Analytical solution')
s = ax.plot_surface(T,X,G_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Difference')
s = ax.plot_surface(T,X,diff_ag,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
## Take some slices of the 3D plots just to see the solutions at particular times
indx1 = 0
indx2 = int(Nt/2)
indx3 = Nt-1
t1 = t[indx1]
t2 = t[indx2]
t3 = t[indx3]
# Slice the results from the DNN
res1 = g_dnn_ag[:,indx1]
res2 = g_dnn_ag[:,indx2]
res3 = g_dnn_ag[:,indx3]
# Slice the analytical results
res_analytical1 = G_analytical[:,indx1]
res_analytical2 = G_analytical[:,indx2]
res_analytical3 = G_analytical[:,indx3]
# Plot the slices
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t1)
plt.plot(x, res1)
plt.plot(x,res_analytical1)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t2)
plt.plot(x, res2)
plt.plot(x,res_analytical2)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t3)
plt.plot(x, res3)
plt.plot(x,res_analytical3)
plt.legend(['dnn','analytical'])
plt.show()
## Example: Solving the wave equation with Neural Networks
The wave equation is
$$
\frac{\partial^2 g(x,t)}{\partial t^2} = c^2\frac{\partial^2 g(x,t)}{\partial x^2}
$$
with $c$ being the specified wave speed.
Here, the chosen conditions are
$$
\begin{align*}
g(0,t) &= 0 \\
g(1,t) &= 0 \\
g(x,0) &= u(x) \\
\frac{\partial g(x,t)}{\partial t} \Big |_{t = 0} &= v(x)
\end{align*}
$$
where $\frac{\partial g(x,t)}{\partial t} \Big |_{t = 0}$ means the derivative of $g(x,t)$ with respect to $t$ is evaluated at $t = 0$, and $u(x)$ and $v(x)$ being given functions.
## The problem to solve for
The wave equation to solve for, is
<!-- Equation labels as ordinary links -->
<div id="wave"></div>
$$
\begin{equation} \label{wave} \tag{19}
\frac{\partial^2 g(x,t)}{\partial t^2} = c^2 \frac{\partial^2 g(x,t)}{\partial x^2}
\end{equation}
$$
where $c$ is the given wave speed.
The chosen conditions for this equation are
<!-- Equation labels as ordinary links -->
<div id="condwave"></div>
$$
\begin{aligned}
g(0,t) &= 0, &t \geq 0 \\
g(1,t) &= 0, &t \geq 0 \\
g(x,0) &= u(x), &x\in[0,1] \\
\frac{\partial g(x,t)}{\partial t}\Big |_{t = 0} &= v(x), &x \in [0,1]
\end{aligned} \label{condwave} \tag{20}
$$
In this example, let $c = 1$ and $u(x) = \sin(\pi x)$ and $v(x) = -\pi\sin(\pi x)$.
## The trial solution
Setting up the network is done in similar matter as for the example of solving the diffusion equation.
The only things we have to change, is the trial solution such that it satisfies the conditions from ([20](#condwave)) and the cost function.
The trial solution becomes slightly different since we have other conditions than in the example of solving the diffusion equation. Here, a possible trial solution $g_t(x,t)$ is
$$
g_t(x,t) = h_1(x,t) + x(1-x)t^2N(x,t,P)
$$
where
$$
h_1(x,t) = (1-t^2)u(x) + tv(x)
$$
Note that this trial solution satisfies the conditions only if $u(0) = v(0) = u(1) = v(1) = 0$, which is the case in this example.
## The analytical solution
The analytical solution for our specific problem, is
$$
g(x,t) = \sin(\pi x)\cos(\pi t) - \sin(\pi x)\sin(\pi t)
$$
## Solving the wave equation - the full program using Autograd
import autograd.numpy as np
from autograd import hessian,grad
import autograd.numpy.random as npr
from matplotlib import cm
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
## Set up the trial function:
def u(x):
return np.sin(np.pi*x)
def v(x):
return -np.pi*np.sin(np.pi*x)
def h1(point):
x,t = point
return (1 - t**2)*u(x) + t*v(x)
def g_trial(point,P):
x,t = point
return h1(point) + x*(1-x)*t**2*deep_neural_network(P,point)
## Define the cost function
def cost_function(P, x, t):
cost_sum = 0
g_t_hessian_func = hessian(g_trial)
for x_ in x:
for t_ in t:
point = np.array([x_,t_])
g_t_hessian = g_t_hessian_func(point,P)
g_t_d2x = g_t_hessian[0][0]
g_t_d2t = g_t_hessian[1][1]
err_sqr = ( (g_t_d2t - g_t_d2x) )**2
cost_sum += err_sqr
return cost_sum / (np.size(t) * np.size(x))
## The neural network
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# x is now a point and a 1D numpy array; make it a column vector
num_coordinates = np.size(x,0)
x = x.reshape(num_coordinates,-1)
num_points = np.size(x,1)
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assume that the input layer does nothing to the input x
x_input = x
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output[0][0]
## The analytical solution
def g_analytic(point):
x,t = point
return np.sin(np.pi*x)*np.cos(np.pi*t) - np.sin(np.pi*x)*np.sin(np.pi*t)
def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):
## Set up initial weigths and biases
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: ',cost_function(P, x, t))
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
cost_grad = cost_function_grad(P, x , t)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_grad[l]
print('Final cost: ',cost_function(P, x, t))
return P
if __name__ == '__main__':
### Use the neural network:
npr.seed(15)
## Decide the vales of arguments to the function to solve
Nx = 10; Nt = 10
x = np.linspace(0, 1, Nx)
t = np.linspace(0,1,Nt)
## Set up the parameters for the network
num_hidden_neurons = [50,20]
num_iter = 1000
lmb = 0.01
P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)
## Store the results
res = np.zeros((Nx, Nt))
res_analytical = np.zeros((Nx, Nt))
for i,x_ in enumerate(x):
for j, t_ in enumerate(t):
point = np.array([x_, t_])
res[i,j] = g_trial(point,P)
res_analytical[i,j] = g_analytic(point)
diff = np.abs(res - res_analytical)
print("Max difference between analytical and solution from nn: %g"%np.max(diff))
## Plot the solutions in two dimensions, that being in position and time
T,X = np.meshgrid(t,x)
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))
s = ax.plot_surface(T,X,res,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Analytical solution')
s = ax.plot_surface(T,X,res_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Difference')
s = ax.plot_surface(T,X,diff,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
## Take some slices of the 3D plots just to see the solutions at particular times
indx1 = 0
indx2 = int(Nt/2)
indx3 = Nt-1
t1 = t[indx1]
t2 = t[indx2]
t3 = t[indx3]
# Slice the results from the DNN
res1 = res[:,indx1]
res2 = res[:,indx2]
res3 = res[:,indx3]
# Slice the analytical results
res_analytical1 = res_analytical[:,indx1]
res_analytical2 = res_analytical[:,indx2]
res_analytical3 = res_analytical[:,indx3]
# Plot the slices
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t1)
plt.plot(x, res1)
plt.plot(x,res_analytical1)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t2)
plt.plot(x, res2)
plt.plot(x,res_analytical2)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t3)
plt.plot(x, res3)
plt.plot(x,res_analytical3)
plt.legend(['dnn','analytical'])
plt.show()
## Resources on differential equations and deep learning
1. [Artificial neural networks for solving ordinary and partial differential equations by I.E. Lagaris et al](https://pdfs.semanticscholar.org/d061/df393e0e8fbfd0ea24976458b7d42419040d.pdf)
2. [Neural networks for solving differential equations by A. Honchar](https://becominghuman.ai/neural-networks-for-solving-differential-equations-fa230ac5e04c)
3. [Solving differential equations using neural networks by M.M Chiaramonte and M. Kiener](http://cs229.stanford.edu/proj2013/ChiaramonteKiener-SolvingDifferentialEquationsUsingNeuralNetworks.pdf)
4. [Introduction to Partial Differential Equations by A. Tveito, R. Winther](https://www.springer.com/us/book/9783540225515) | cc0-1.0 |
Clyde-fare/scikit-learn | examples/model_selection/plot_validation_curve.py | 228 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
williamFalcon/pytorch-lightning | pytorch_lightning/tuner/batch_size_scaling.py | 1 | 10893 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import logging
import os
from typing import Optional, Tuple
import pytorch_lightning as pl
from pytorch_lightning.loggers.base import DummyLogger
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.data import has_len
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import garbage_collection_cuda, is_oom_error
from pytorch_lightning.utilities.parsing import lightning_getattr, lightning_hasattr, lightning_setattr
log = logging.getLogger(__name__)
def scale_batch_size(
trainer: "pl.Trainer",
model: "pl.LightningModule",
mode: str = "power",
steps_per_trial: int = 3,
init_val: int = 2,
max_trials: int = 25,
batch_arg_name: str = "batch_size",
) -> Optional[int]:
"""See :meth:`~pytorch_lightning.tuner.tuning.Tuner.scale_batch_size`"""
if trainer.fast_dev_run:
rank_zero_warn("Skipping batch size scaler since fast_dev_run is enabled.", UserWarning)
return
if not lightning_hasattr(model, batch_arg_name):
raise MisconfigurationException(f"Field {batch_arg_name} not found in both `model` and `model.hparams`")
if hasattr(model, batch_arg_name) and hasattr(model, "hparams") and batch_arg_name in model.hparams:
rank_zero_warn(
f"Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!"
f" `model.{batch_arg_name}` will be used as the initial batch size for scaling."
" If this is not the intended behavior, please remove either one."
)
if hasattr(model.train_dataloader, "patch_loader_code"):
raise MisconfigurationException(
"The batch scaling feature cannot be used with dataloaders passed directly to `.fit()`."
" Please disable the feature or incorporate the dataloader into the model."
)
# Arguments we adjust during the batch size finder, save for restoring
__scale_batch_dump_params(trainer)
# Set to values that are required by the algorithm
__scale_batch_reset_params(trainer, model, steps_per_trial)
# Save initial model, that is loaded after batch size is found
save_path = os.path.join(trainer.default_root_dir, "scale_batch_size_temp_model.ckpt")
trainer.save_checkpoint(str(save_path))
if trainer.progress_bar_callback:
trainer.progress_bar_callback.disable()
# Initially we just double in size until an OOM is encountered
new_size, _ = _adjust_batch_size(trainer, batch_arg_name, value=init_val) # initially set to init_val
if mode == "power":
new_size = _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials)
elif mode == "binsearch":
new_size = _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials)
else:
raise ValueError("mode in method `scale_batch_size` could either be `power` or `binsearch`")
garbage_collection_cuda()
log.info(f"Finished batch size finder, will continue with full run using batch size {new_size}")
# Restore initial state of model
if trainer.is_global_zero:
trainer.checkpoint_connector.restore(str(save_path))
fs = get_filesystem(str(save_path))
if fs.exists(save_path):
fs.rm(save_path)
# Finish by resetting variables so trainer is ready to fit model
__scale_batch_restore_params(trainer)
if trainer.progress_bar_callback:
trainer.progress_bar_callback.enable()
return new_size
def __scale_batch_dump_params(trainer: "pl.Trainer") -> None:
# Prevent going into infinite loop
trainer.__dumped_params = {
"auto_lr_find": trainer.auto_lr_find,
"current_epoch": trainer.current_epoch,
"max_steps": trainer.max_steps,
"weights_summary": trainer.weights_summary,
"logger": trainer.logger,
"callbacks": trainer.callbacks,
"checkpoint_callback": trainer.checkpoint_callback,
"auto_scale_batch_size": trainer.auto_scale_batch_size,
"limit_train_batches": trainer.limit_train_batches,
"model": trainer.model,
}
def __scale_batch_reset_params(trainer: "pl.Trainer", model: "pl.LightningModule", steps_per_trial: int) -> None:
trainer.auto_scale_batch_size = None # prevent recursion
trainer.auto_lr_find = False # avoid lr find being called multiple times
trainer.fit_loop.current_epoch = 0
trainer.fit_loop.max_steps = steps_per_trial # take few steps
trainer.weights_summary = None # not needed before full run
trainer.logger = DummyLogger()
trainer.callbacks = [] # not needed before full run
trainer.limit_train_batches = 1.0
trainer.optimizers, trainer.lr_schedulers = [], [] # required for saving
trainer.model = model # required for saving
def __scale_batch_restore_params(trainer: "pl.Trainer") -> None:
trainer.auto_lr_find = trainer.__dumped_params["auto_lr_find"]
trainer.fit_loop.current_epoch = trainer.__dumped_params["current_epoch"]
trainer.fit_loop.max_steps = trainer.__dumped_params["max_steps"]
trainer.weights_summary = trainer.__dumped_params["weights_summary"]
trainer.logger = trainer.__dumped_params["logger"]
trainer.callbacks = trainer.__dumped_params["callbacks"]
trainer.auto_scale_batch_size = trainer.__dumped_params["auto_scale_batch_size"]
trainer.limit_train_batches = trainer.__dumped_params["limit_train_batches"]
trainer.model = trainer.__dumped_params["model"]
del trainer.__dumped_params
def _run_power_scaling(
trainer: "pl.Trainer", model: "pl.LightningModule", new_size: int, batch_arg_name: str, max_trials: int
) -> int:
"""Batch scaling mode where the size is doubled at each iteration until an OOM error is encountered."""
for _ in range(max_trials):
garbage_collection_cuda()
trainer.fit_loop.global_step = 0 # reset after each try
try:
# Try fit
trainer.tuner._run(model)
# Double in size
new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc="succeeded")
except RuntimeError as exception:
# Only these errors should trigger an adjustment
if is_oom_error(exception):
# If we fail in power mode, half the size and return
garbage_collection_cuda()
new_size, _ = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc="failed")
break
else:
raise # some other error not memory related
if changed:
# Force the train dataloader to reset as the batch size has changed
trainer.reset_train_dataloader(model)
else:
break
return new_size
def _run_binsearch_scaling(
trainer: "pl.Trainer", model: "pl.LightningModule", new_size: int, batch_arg_name: str, max_trials: int
) -> int:
"""Batch scaling mode where the size is initially is doubled at each iteration
until an OOM error is encountered. Hereafter, the batch size is further
refined using a binary search"""
low = 1
high = None
count = 0
while True:
garbage_collection_cuda()
trainer.fit_loop.global_step = 0 # reset after each try
try:
# Try fit
trainer.tuner._run(model)
count += 1
if count > max_trials:
break
# Double in size
low = new_size
if high:
if high - low <= 1:
break
midval = (high + low) // 2
new_size, changed = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc="succeeded")
else:
new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc="succeeded")
if changed:
# Force the train dataloader to reset as the batch size has changed
trainer.reset_train_dataloader(model)
else:
break
except RuntimeError as exception:
# Only these errors should trigger an adjustment
if is_oom_error(exception):
# If we fail in power mode, half the size and return
garbage_collection_cuda()
high = new_size
midval = (high + low) // 2
new_size, _ = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc="failed")
if high - low <= 1:
break
else:
raise # some other error not memory related
return new_size
def _adjust_batch_size(
trainer: "pl.Trainer",
batch_arg_name: str = "batch_size",
factor: float = 1.0,
value: Optional[int] = None,
desc: Optional[str] = None,
) -> Tuple[int, bool]:
"""Helper function for adjusting the batch size.
Args:
trainer: instance of pytorch_lightning.Trainer
batch_arg_name: name of the field where batch_size is stored.
factor: value which the old batch size is multiplied by to get the
new batch size
value: if a value is given, will override the batch size with this value.
Note that the value of `factor` will not have an effect in this case
desc: either `succeeded` or `failed`. Used purely for logging
Returns:
The new batch size for the next trial and a bool that signals whether the
new value is different than the previous batch size.
"""
model = trainer.lightning_module
batch_size = lightning_getattr(model, batch_arg_name)
new_size = value if value is not None else int(batch_size * factor)
if desc:
log.info(f"Batch size {batch_size} {desc}, trying batch size {new_size}")
if not _is_valid_batch_size(new_size, trainer.train_dataloader):
new_size = min(new_size, len(trainer.train_dataloader.dataset))
changed = new_size != batch_size
lightning_setattr(model, batch_arg_name, new_size)
return new_size, changed
def _is_valid_batch_size(current_size, dataloader):
return not has_len(dataloader) or current_size <= len(dataloader)
| apache-2.0 |
jorge2703/scikit-learn | examples/svm/plot_iris.py | 223 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/storage/_format.py | 1 | 4437 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Table transformer for storage commands"""
from azure.cli.core.commands.transform import build_table_output
from knack.log import get_logger
logger = get_logger(__name__)
def transform_container_list(result):
return build_table_output(result, [
('Name', 'name'),
('Lease Status', 'properties.leaseStatus'),
('Last Modified', 'properties.lastModified')
])
def transform_container_show(result):
return build_table_output(result, [
('Name', 'name'),
('Lease Status', 'properties.lease.status'),
('Last Modified', 'properties.lastModified')
])
def transform_blob_output(result):
return build_table_output(result, [
('Name', 'name'),
('Blob Type', 'properties.blobType'),
('Blob Tier', 'properties.blobTier'),
('Length', 'properties.contentLength'),
('Content Type', 'properties.contentSettings.contentType'),
('Last Modified', 'properties.lastModified'),
('Snapshot', 'snapshot')
])
def transform_share_list(result):
return build_table_output(result, [
('Name', 'name'),
('Quota', 'properties.quota'),
('Last Modified', 'properties.lastModified')
])
def transform_file_output(result):
""" Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. """
from collections import OrderedDict
new_result = []
iterable = result if isinstance(result, list) else result.get('items', result)
for item in iterable:
new_entry = OrderedDict()
entity_type = item['type'] # type property is added by transform_file_directory_result
is_dir = entity_type == 'dir'
new_entry['Name'] = item['name'] + '/' if is_dir else item['name']
new_entry['Content Length'] = ' ' if is_dir else item['properties']['contentLength']
new_entry['Type'] = item['type']
new_entry['Last Modified'] = item['properties']['lastModified'] or ' '
new_result.append(new_entry)
return sorted(new_result, key=lambda k: k['Name'])
def transform_entity_show(result):
from collections import OrderedDict
timestamp = result.pop('Timestamp')
result.pop('etag')
# Reassemble the output
new_result = OrderedDict()
new_result['Partition'] = result.pop('PartitionKey')
new_result['Row'] = result.pop('RowKey')
for key in sorted(result.keys()):
new_result[key] = result[key]
new_result['Timestamp'] = timestamp
return new_result
def transform_message_show(result):
from collections import OrderedDict
ordered_result = []
for item in result:
new_result = OrderedDict()
new_result['MessageId'] = item.pop('id')
new_result['Content'] = item.pop('content')
new_result['InsertionTime'] = item.pop('insertionTime')
new_result['ExpirationTime'] = item.pop('expirationTime')
for key in sorted(item.keys()):
new_result[key] = item[key]
ordered_result.append(new_result)
return ordered_result
def transform_boolean_for_table(result):
for key in result:
result[key] = str(result[key])
return result
def transform_file_directory_result(result):
"""
Transform a the result returned from file and directory listing API.
This transformer add and remove properties from File and Directory objects in the given list
in order to align the object's properties so as to offer a better view to the file and dir
list.
"""
from ._transformers import transform_share_directory_json_output, transform_share_file_json_output
return_list = []
for each in result:
if getattr(each, 'is_directory', None):
setattr(each, 'type', 'dir')
each = transform_share_directory_json_output(each)
else:
setattr(each, 'type', 'file')
each = transform_share_file_json_output(each)
return_list.append(each)
return return_list
def transform_metadata_show(result):
return result.metadata
| mit |
microsoft/onnxruntime | samples/python/training/orttrainer/pytorch_transformer/pt_train.py | 1 | 3379 | import argparse
import torch
import torch.nn as nn
from pt_model import TransformerModel
from utils import get_batch, prepare_data
def train(model, data_source, device, epoch, args, bptt=35):
total_loss = 0.0
model.train()
for batch, i in enumerate(range(0, data_source.size(0) - 1, bptt)):
data, targets = get_batch(data_source, i)
optimizer.zero_grad()
output = model(data)
loss = criterion(output.view(-1, 28785), targets)
loss.backward()
optimizer.step()
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
print(
"epoch {:3d} | {:5d}/{:5d} batches | loss {:5.2f}".format(
epoch, batch, len(data_source) // bptt, cur_loss
)
)
total_loss = 0
def evaluate(model, data_source, criterion, bptt=35):
total_loss = 0.0
model.eval()
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i)
output = model(data)
output_flat = output.view(-1, 28785)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description="PyTorch TransformerModel example")
parser.add_argument(
"--batch-size", type=int, default=20, metavar="N", help="input batch size for training (default: 20)"
)
parser.add_argument(
"--test-batch-size", type=int, default=20, metavar="N", help="input batch size for testing (default: 20)"
)
parser.add_argument("--epochs", type=int, default=2, metavar="N", help="number of epochs to train (default: 2)")
parser.add_argument("--lr", type=float, default=0.001, metavar="LR", help="learning rate (default: 0.001)")
parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training")
parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument(
"--log-interval",
type=int,
default=200,
metavar="N",
help="how many batches to wait before logging training status (default: 200)",
)
# Basic setup
args = parser.parse_args()
if not args.no_cuda and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
torch.manual_seed(args.seed)
# Model
criterion = nn.CrossEntropyLoss()
lr = 0.001
model = TransformerModel(28785, 200, 2, 200, 2, 0.2).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# Preparing data
train_data, val_data, test_data = prepare_data(device, args.batch_size, args.test_batch_size)
# Train
for epoch in range(1, args.epochs + 1):
train(model, train_data, device, epoch, args)
val_loss = evaluate(model, val_data, criterion)
print("-" * 89)
print("| end of epoch {:3d} | valid loss {:5.2f} | ".format(epoch, val_loss))
print("-" * 89)
# Evaluate
test_loss = evaluate(model, test_data, criterion)
print("=" * 89)
print("| End of training | test loss {:5.2f}".format(test_loss))
print("=" * 89)
| mit |
vigilv/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 226 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
jorge2703/scikit-learn | examples/plot_digits_pipe.py | 249 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
Clyde-fare/scikit-learn | examples/linear_model/plot_theilsen.py | 231 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
jorge2703/scikit-learn | sklearn/tests/test_qda.py | 154 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/benchmark/dummy_mt.py | 1 | 3605 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task('dummy_mt')
class DummyMTTask(FairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--dict-size', default=49996, type=int)
parser.add_argument('--dataset-size', default=100000, type=int)
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments '
'per sample for BERT dataset')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
seq = torch.arange(args.tokens_per_sample + 1) + dictionary.pad() + 1
self.dummy_src = seq[:-1]
self.dummy_tgt = seq[1:]
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task. """
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol('word{}'.format(i))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.args.max_sentences is not None:
bsz = self.args.max_sentences
else:
bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample)
tgt = torch.stack([self.dummy_tgt for _ in range(bsz)])
self.datasets[split] = DummyDataset(
{
'id': 1,
'net_input': {
'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]),
'src_lengths': torch.full(
(bsz, ), self.args.tokens_per_sample, dtype=torch.long
),
'prev_output_tokens': tgt.clone(),
},
'target': tgt,
'nsentences': bsz,
'ntokens': bsz * self.args.tokens_per_sample,
},
num_items=self.args.dataset_size,
item_size=self.args.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
| bsd-3-clause |
a-doumoulakis/tensorflow | tensorflow/contrib/learn/python/learn/datasets/text_datasets.py | 122 | 2703 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import gfile
DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz'
def maybe_download_dbpedia(data_dir):
"""Download if DBpedia data is not present."""
train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv')
if not (gfile.Exists(train_path) and gfile.Exists(test_path)):
archive_path = base.maybe_download(
'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL)
tfile = tarfile.open(archive_path, 'r:*')
tfile.extractall(data_dir)
def load_dbpedia(size='small', test_with_fake_data=False):
"""Get DBpedia datasets from CSV files."""
if not test_with_fake_data:
data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data')
maybe_download_dbpedia(data_dir)
train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv')
if size == 'small':
# Reduce the size of original data by a factor of 1000.
base.shrink_csv(train_path, 1000)
base.shrink_csv(test_path, 1000)
train_path = train_path.replace('train.csv', 'train_small.csv')
test_path = test_path.replace('test.csv', 'test_small.csv')
else:
module_path = os.path.dirname(__file__)
train_path = os.path.join(module_path, 'data', 'text_train.csv')
test_path = os.path.join(module_path, 'data', 'text_test.csv')
train = base.load_csv_without_header(
train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
test = base.load_csv_without_header(
test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
return base.Datasets(train=train, validation=None, test=test)
| apache-2.0 |
CompPhysics/MachineLearning | doc/src/DecisionTrees/Programs/dtcancer.py | 2 | 1507 | import os
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.tree import export_graphviz
from IPython.display import Image
from pydot import graph_from_dot_data
import pandas as pd
import numpy as np
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
cancer = load_breast_cancer()
X = pd.DataFrame(cancer.data, columns=cancer.feature_names)
print(X)
y = pd.Categorical.from_codes(cancer.target, cancer.target_names)
y = pd.get_dummies(y)
print(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
tree_clf = DecisionTreeClassifier(max_depth=5)
tree_clf.fit(X_train, y_train)
export_graphviz(
tree_clf,
out_file="DataFiles/cancer.dot",
feature_names=cancer.feature_names,
class_names=cancer.target_names,
rounded=True,
filled=True
)
cmd = 'dot -Tpng DataFiles/cancer.dot -o DataFiles/cancer.png'
os.system(cmd)
| cc0-1.0 |
yukisakurai/hhntup | higgstautau/trigger/matching.py | 4 | 7006 | from rootpy.tree.filtering import EventFilter
from math import *
from . import utils as triggerutils
from . import log; log = log[__name__]
from .. import utils
from ..units import GeV
from .. import datasets
class TauTriggerMatchIndex(EventFilter):
"""
Match reco taus to trigger taus. If there are more than two EF taus, take
the leading two.
"""
def __init__(self,
config,
datatype,
year,
dR=0.2,
passthrough=False,
**kwargs):
super(TauTriggerMatchIndex, self).__init__(
passthrough=passthrough,
**kwargs)
if not passthrough:
self.config = config
self.dR = dR
year %= 1000
self.year = year
"""
WARNING: possible bias if matching between MC and data differs
"""
if datatype == datasets.DATA:
if year == 11:
self.passes = self.passes_data11
elif year == 12:
self.passes = self.passes_data12
else:
raise ValueError(
"No data trigger matching defined for year %d" %
year)
elif datatype == datasets.MC:
if year == 11:
self.passes = self.passes_mc11
elif year == 12:
self.passes = self.passes_mc12
else:
raise ValueError(
"No MC trigger matching defined for year %d" %
year)
else:
raise ValueError(
"No trigger matching defined for datatype %d" %
datatype)
def passes_mc11(self, event):
"""
Matching performed during trigger emulation with CoEPPTrigTool
"""
event.taus.select(lambda tau: tau.trigger_match_index > -1)
self.match_sanity(event)
return len(event.taus) >= 2
def passes_data11(self, event):
if 177986 <= event.RunNumber <= 187815: # Periods B-K
trigger = 'EF_tau29_medium1_tau20_medium1'
elif 188902 <= event.RunNumber <= 191933: # Periods L-M
trigger = 'EF_tau29T_medium1_tau20T_medium1'
else:
raise ValueError("No trigger defined for run %i" % event.RunNumber)
self.match_index(event, trigger)
event.taus.select(lambda tau: tau.trigger_match_index > -1)
self.match_sanity(event)
return len(event.taus) >= 2
def passes_mc12(self, event):
self.match_index(event, 'EF_tau29Ti_medium1_tau20Ti_medium1')
event.taus.select(lambda tau: tau.trigger_match_index > -1)
self.match_sanity(event)
return len(event.taus) >= 2
def passes_data12(self, event):
self.match_index(event, 'EF_tau29Ti_medium1_tau20Ti_medium1')
event.taus.select(lambda tau: tau.trigger_match_index > -1)
self.match_sanity(event)
return len(event.taus) >= 2
def match_index(self, event, trigger):
# get indices of trigger taus associated with this trigger
trigger_idx = triggerutils.get_tau_trigger_obj_idx(
self.config,
event,
trigger)
# trigger_idx can contain 3 indices
# will need to take the leading two
taus = list(event.taus)
# for each EF tau find closest matching reco tau
for EF_idx in trigger_idx:
trigger_tau = event.taus_EF.getitem(EF_idx)
closest_dR = 99999
closest_tau = None
for tau in taus:
dR = utils.dR(
trigger_tau.eta, trigger_tau.phi,
tau.eta, tau.phi)
if dR < self.dR and dR < closest_dR:
closest_dR = dR
closest_tau = tau
if closest_tau is not None:
closest_tau.trigger_match_index = EF_idx
# remove match from future matches (greedy match)
taus.remove(closest_tau)
def match_sanity(self, event):
# sanity check
if len(event.taus) < 3:
return
print '-' * 20
print "Run: %d, Event %d" % (event.RunNumber, event.EventNumber)
fmt = "%d: match index: %d reco pT: %f EF pT: %f"
for i, tau in enumerate(event.taus):
print fmt % (i, tau.trigger_match_index, tau.pt,
event.taus_EF.getitem(tau.trigger_match_index).pt)
print "dR with all other taus:"
for j, tau2 in enumerate(event.taus):
if tau2 != tau:
print j, utils.dR(
tau.eta, tau.phi,
tau2.eta, tau2.phi)
print '='
class TauTriggerMatchThreshold(EventFilter):
"""
Match previously matched reco taus to thresholds of the trigger
"""
def __init__(self,
datatype,
tree,
passthrough=False,
**kwargs):
self.datatype = datatype
self.tree = tree
super(TauTriggerMatchThreshold, self).__init__(
passthrough=passthrough,
**kwargs)
def passes(self, event):
if self.datatype == datasets.EMBED:
assert len(event.taus) == 2
assert event.taus[0].pt >= event.taus[1].pt
# taus are already sorted in descending order by pT by TauLeadSublead
tau1, tau2 = event.taus
tau1.trigger_match_thresh = 29
tau2.trigger_match_thresh = 20
else:
self.match_threshold(event, (29, 20))
return True
def match_threshold(self, event, thresholds):
"""
thresholds must be in descending order
TODO: Use the info stored in the D3PD for 2012:
trig_EF_tau_EF_tau29Ti_medium1_tau20Ti_medium1
if(trig_EF_tau_EF_tau20Ti_medium1==1) tau_trigger_match_thresh = 20
if(trig_EF_tau_EF_tau29Ti_medium1==1) tau_trigger_match_thresh = 29
"""
assert len(event.taus) == 2
assert len(thresholds) == 2
# assume only matched taus remain in event.taus
taus = [(tau, event.taus_EF.getitem(tau.trigger_match_index)) for
tau in event.taus]
# sort by pT of EF tau
taus = sorted(taus, key=lambda tau: tau[1].pt, reverse=True)
# assign thresholds in descending order
for i in xrange(len(taus)):
taus[i][0].trigger_match_thresh = thresholds[i]
# sanity check THIS SOMETIMES FAILS!
if taus[i][1].pt < thresholds[i] * GeV:
log.warning("EF pT %f less than trigger threshold %f" % (
taus[i][1].pt, thresholds[i] * GeV))
self.tree.tau_trigger_match_error = True
| gpl-3.0 |
Clyde-fare/scikit-learn | examples/linear_model/plot_ridge_path.py | 253 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
psf/black | src/black/trans.py | 1 | 82244 | """
String transformers that can split and merge strings.
"""
import re
import sys
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass
from typing import (
Any,
Callable,
ClassVar,
Collection,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
if sys.version_info < (3, 8):
from typing_extensions import Final, Literal
else:
from typing import Literal, Final
from mypy_extensions import trait
from black.brackets import BracketMatchError
from black.comments import contains_pragma_comment
from black.lines import Line, append_leaves
from black.mode import Feature
from black.nodes import (
CLOSING_BRACKETS,
OPENING_BRACKETS,
STANDALONE_COMMENT,
is_empty_lpar,
is_empty_par,
is_empty_rpar,
parent_type,
replace_child,
syms,
)
from black.rusty import Err, Ok, Result
from black.strings import (
assert_is_leaf_string,
get_string_prefix,
has_triple_quotes,
normalize_string_quotes,
)
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
class CannotTransform(Exception):
"""Base class for errors raised by Transformers."""
# types
T = TypeVar("T")
LN = Union[Leaf, Node]
Transformer = Callable[[Line, Collection[Feature]], Iterator[Line]]
Index = int
NodeType = int
ParserState = int
StringID = int
TResult = Result[T, CannotTransform] # (T)ransform Result
TMatchResult = TResult[Index]
def TErr(err_msg: str) -> Err[CannotTransform]:
"""(T)ransform Err
Convenience function used when working with the TResult type.
"""
cant_transform = CannotTransform(err_msg)
return Err(cant_transform)
def hug_power_op(line: Line, features: Collection[Feature]) -> Iterator[Line]:
"""A transformer which normalizes spacing around power operators."""
# Performance optimization to avoid unnecessary Leaf clones and other ops.
for leaf in line.leaves:
if leaf.type == token.DOUBLESTAR:
break
else:
raise CannotTransform("No doublestar token was found in the line.")
def is_simple_lookup(index: int, step: Literal[1, -1]) -> bool:
# Brackets and parentheses indicate calls, subscripts, etc. ...
# basically stuff that doesn't count as "simple". Only a NAME lookup
# or dotted lookup (eg. NAME.NAME) is OK.
if step == -1:
disallowed = {token.RPAR, token.RSQB}
else:
disallowed = {token.LPAR, token.LSQB}
while 0 <= index < len(line.leaves):
current = line.leaves[index]
if current.type in disallowed:
return False
if current.type not in {token.NAME, token.DOT} or current.value == "for":
# If the current token isn't disallowed, we'll assume this is simple as
# only the disallowed tokens are semantically attached to this lookup
# expression we're checking. Also, stop early if we hit the 'for' bit
# of a comprehension.
return True
index += step
return True
def is_simple_operand(index: int, kind: Literal["base", "exponent"]) -> bool:
# An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple
# lookup (see above), with or without a preceding unary operator.
start = line.leaves[index]
if start.type in {token.NAME, token.NUMBER}:
return is_simple_lookup(index, step=(1 if kind == "exponent" else -1))
if start.type in {token.PLUS, token.MINUS, token.TILDE}:
if line.leaves[index + 1].type in {token.NAME, token.NUMBER}:
# step is always one as bases with a preceding unary op will be checked
# for simplicity starting from the next token (so it'll hit the check
# above).
return is_simple_lookup(index + 1, step=1)
return False
new_line = line.clone()
should_hug = False
for idx, leaf in enumerate(line.leaves):
new_leaf = leaf.clone()
if should_hug:
new_leaf.prefix = ""
should_hug = False
should_hug = (
(0 < idx < len(line.leaves) - 1)
and leaf.type == token.DOUBLESTAR
and is_simple_operand(idx - 1, kind="base")
and line.leaves[idx - 1].value != "lambda"
and is_simple_operand(idx + 1, kind="exponent")
)
if should_hug:
new_leaf.prefix = ""
# We have to be careful to make a new line properly:
# - bracket related metadata must be maintained (handled by Line.append)
# - comments need to copied over, updating the leaf IDs they're attached to
new_line.append(new_leaf, preformatted=True)
for comment_leaf in line.comments_after(leaf):
new_line.append(comment_leaf, preformatted=True)
yield new_line
class StringTransformer(ABC):
"""
An implementation of the Transformer protocol that relies on its
subclasses overriding the template methods `do_match(...)` and
`do_transform(...)`.
This Transformer works exclusively on strings (for example, by merging
or splitting them).
The following sections can be found among the docstrings of each concrete
StringTransformer subclass.
Requirements:
Which requirements must be met of the given Line for this
StringTransformer to be applied?
Transformations:
If the given Line meets all of the above requirements, which string
transformations can you expect to be applied to it by this
StringTransformer?
Collaborations:
What contractual agreements does this StringTransformer have with other
StringTransfomers? Such collaborations should be eliminated/minimized
as much as possible.
"""
__name__: Final = "StringTransformer"
# Ideally this would be a dataclass, but unfortunately mypyc breaks when used with
# `abc.ABC`.
def __init__(self, line_length: int, normalize_strings: bool) -> None:
self.line_length = line_length
self.normalize_strings = normalize_strings
@abstractmethod
def do_match(self, line: Line) -> TMatchResult:
"""
Returns:
* Ok(string_idx) such that `line.leaves[string_idx]` is our target
string, if a match was able to be made.
OR
* Err(CannotTransform), if a match was not able to be made.
"""
@abstractmethod
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
"""
Yields:
* Ok(new_line) where new_line is the new transformed line.
OR
* Err(CannotTransform) if the transformation failed for some reason. The
`do_match(...)` template method should usually be used to reject
the form of the given Line, but in some cases it is difficult to
know whether or not a Line meets the StringTransformer's
requirements until the transformation is already midway.
Side Effects:
This method should NOT mutate @line directly, but it MAY mutate the
Line's underlying Node structure. (WARNING: If the underlying Node
structure IS altered, then this method should NOT be allowed to
yield an CannotTransform after that point.)
"""
def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]:
"""
StringTransformer instances have a call signature that mirrors that of
the Transformer type.
Raises:
CannotTransform(...) if the concrete StringTransformer class is unable
to transform @line.
"""
# Optimization to avoid calling `self.do_match(...)` when the line does
# not contain any string.
if not any(leaf.type == token.STRING for leaf in line.leaves):
raise CannotTransform("There are no strings in this line.")
match_result = self.do_match(line)
if isinstance(match_result, Err):
cant_transform = match_result.err()
raise CannotTransform(
f"The string transformer {self.__class__.__name__} does not recognize"
" this line as one that it can transform."
) from cant_transform
string_idx = match_result.ok()
for line_result in self.do_transform(line, string_idx):
if isinstance(line_result, Err):
cant_transform = line_result.err()
raise CannotTransform(
"StringTransformer failed while attempting to transform string."
) from cant_transform
line = line_result.ok()
yield line
@dataclass
class CustomSplit:
"""A custom (i.e. manual) string split.
A single CustomSplit instance represents a single substring.
Examples:
Consider the following string:
```
"Hi there friend."
" This is a custom"
f" string {split}."
```
This string will correspond to the following three CustomSplit instances:
```
CustomSplit(False, 16)
CustomSplit(False, 17)
CustomSplit(True, 16)
```
"""
has_prefix: bool
break_idx: int
@trait
class CustomSplitMapMixin:
"""
This mixin class is used to map merged strings to a sequence of
CustomSplits, which will then be used to re-split the strings iff none of
the resultant substrings go over the configured max line length.
"""
_Key: ClassVar = Tuple[StringID, str]
_CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict(
tuple
)
@staticmethod
def _get_key(string: str) -> "CustomSplitMapMixin._Key":
"""
Returns:
A unique identifier that is used internally to map @string to a
group of custom splits.
"""
return (id(string), string)
def add_custom_splits(
self, string: str, custom_splits: Iterable[CustomSplit]
) -> None:
"""Custom Split Map Setter Method
Side Effects:
Adds a mapping from @string to the custom splits @custom_splits.
"""
key = self._get_key(string)
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
def pop_custom_splits(self, string: str) -> List[CustomSplit]:
"""Custom Split Map Getter Method
Returns:
* A list of the custom splits that are mapped to @string, if any
exist.
OR
* [], otherwise.
Side Effects:
Deletes the mapping between @string and its associated custom
splits (which are returned to the caller).
"""
key = self._get_key(string)
custom_splits = self._CUSTOM_SPLIT_MAP[key]
del self._CUSTOM_SPLIT_MAP[key]
return list(custom_splits)
def has_custom_splits(self, string: str) -> bool:
"""
Returns:
True iff @string is associated with a set of custom splits.
"""
key = self._get_key(string)
return key in self._CUSTOM_SPLIT_MAP
class StringMerger(StringTransformer, CustomSplitMapMixin):
"""StringTransformer that merges strings together.
Requirements:
(A) The line contains adjacent strings such that ALL of the validation checks
listed in StringMerger.__validate_msg(...)'s docstring pass.
OR
(B) The line contains a string which uses line continuation backslashes.
Transformations:
Depending on which of the two requirements above where met, either:
(A) The string group associated with the target string is merged.
OR
(B) All line-continuation backslashes are removed from the target string.
Collaborations:
StringMerger provides custom split information to StringSplitter.
"""
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
if (
leaf.type == token.STRING
and is_valid_index(i + 1)
and LL[i + 1].type == token.STRING
):
return Ok(i)
if leaf.type == token.STRING and "\\\n" in leaf.value:
return Ok(i)
return TErr("This line has no strings that need merging.")
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
new_line = line
rblc_result = self._remove_backslash_line_continuation_chars(
new_line, string_idx
)
if isinstance(rblc_result, Ok):
new_line = rblc_result.ok()
msg_result = self._merge_string_group(new_line, string_idx)
if isinstance(msg_result, Ok):
new_line = msg_result.ok()
if isinstance(rblc_result, Err) and isinstance(msg_result, Err):
msg_cant_transform = msg_result.err()
rblc_cant_transform = rblc_result.err()
cant_transform = CannotTransform(
"StringMerger failed to merge any strings in this line."
)
# Chain the errors together using `__cause__`.
msg_cant_transform.__cause__ = rblc_cant_transform
cant_transform.__cause__ = msg_cant_transform
yield Err(cant_transform)
else:
yield Ok(new_line)
@staticmethod
def _remove_backslash_line_continuation_chars(
line: Line, string_idx: int
) -> TResult[Line]:
"""
Merge strings that were split across multiple lines using
line-continuation backslashes.
Returns:
Ok(new_line), if @line contains backslash line-continuation
characters.
OR
Err(CannotTransform), otherwise.
"""
LL = line.leaves
string_leaf = LL[string_idx]
if not (
string_leaf.type == token.STRING
and "\\\n" in string_leaf.value
and not has_triple_quotes(string_leaf.value)
):
return TErr(
f"String leaf {string_leaf} does not contain any backslash line"
" continuation characters."
)
new_line = line.clone()
new_line.comments = line.comments.copy()
append_leaves(new_line, line, LL)
new_string_leaf = new_line.leaves[string_idx]
new_string_leaf.value = new_string_leaf.value.replace("\\\n", "")
return Ok(new_line)
def _merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]:
"""
Merges string group (i.e. set of adjacent strings) where the first
string in the group is `line.leaves[string_idx]`.
Returns:
Ok(new_line), if ALL of the validation checks found in
__validate_msg(...) pass.
OR
Err(CannotTransform), otherwise.
"""
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
vresult = self._validate_msg(line, string_idx)
if isinstance(vresult, Err):
return vresult
# If the string group is wrapped inside an Atom node, we must make sure
# to later replace that Atom with our new (merged) string leaf.
atom_node = LL[string_idx].parent
# We will place BREAK_MARK in between every two substrings that we
# merge. We will then later go through our final result and use the
# various instances of BREAK_MARK we find to add the right values to
# the custom split map.
BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@"
QUOTE = LL[string_idx].value[-1]
def make_naked(string: str, string_prefix: str) -> str:
"""Strip @string (i.e. make it a "naked" string)
Pre-conditions:
* assert_is_leaf_string(@string)
Returns:
A string that is identical to @string except that
@string_prefix has been stripped, the surrounding QUOTE
characters have been removed, and any remaining QUOTE
characters have been escaped.
"""
assert_is_leaf_string(string)
RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)"
naked_string = string[len(string_prefix) + 1 : -1]
naked_string = re.sub(
"(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string
)
return naked_string
# Holds the CustomSplit objects that will later be added to the custom
# split map.
custom_splits = []
# Temporary storage for the 'has_prefix' part of the CustomSplit objects.
prefix_tracker = []
# Sets the 'prefix' variable. This is the prefix that the final merged
# string will have.
next_str_idx = string_idx
prefix = ""
while (
not prefix
and is_valid_index(next_str_idx)
and LL[next_str_idx].type == token.STRING
):
prefix = get_string_prefix(LL[next_str_idx].value).lower()
next_str_idx += 1
# The next loop merges the string group. The final string will be
# contained in 'S'.
#
# The following convenience variables are used:
#
# S: string
# NS: naked string
# SS: next string
# NSS: naked next string
S = ""
NS = ""
num_of_strings = 0
next_str_idx = string_idx
while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING:
num_of_strings += 1
SS = LL[next_str_idx].value
next_prefix = get_string_prefix(SS).lower()
# If this is an f-string group but this substring is not prefixed
# with 'f'...
if "f" in prefix and "f" not in next_prefix:
# Then we must escape any braces contained in this substring.
SS = re.sub(r"(\{|\})", r"\1\1", SS)
NSS = make_naked(SS, next_prefix)
has_prefix = bool(next_prefix)
prefix_tracker.append(has_prefix)
S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE
NS = make_naked(S, prefix)
next_str_idx += 1
# Take a note on the index of the non-STRING leaf.
non_string_idx = next_str_idx
S_leaf = Leaf(token.STRING, S)
if self.normalize_strings:
S_leaf.value = normalize_string_quotes(S_leaf.value)
# Fill the 'custom_splits' list with the appropriate CustomSplit objects.
temp_string = S_leaf.value[len(prefix) + 1 : -1]
for has_prefix in prefix_tracker:
mark_idx = temp_string.find(BREAK_MARK)
assert (
mark_idx >= 0
), "Logic error while filling the custom string breakpoint cache."
temp_string = temp_string[mark_idx + len(BREAK_MARK) :]
breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1
custom_splits.append(CustomSplit(has_prefix, breakpoint_idx))
string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, ""))
if atom_node is not None:
# If not all children of the atom node are merged (this can happen
# when there is a standalone comment in the middle) ...
if non_string_idx - string_idx < len(atom_node.children):
# We need to replace the old STRING leaves with the new string leaf.
first_child_idx = LL[string_idx].remove()
for idx in range(string_idx + 1, non_string_idx):
LL[idx].remove()
if first_child_idx is not None:
atom_node.insert_child(first_child_idx, string_leaf)
else:
# Else replace the atom node with the new string leaf.
replace_child(atom_node, string_leaf)
# Build the final line ('new_line') that this method will later return.
new_line = line.clone()
for i, leaf in enumerate(LL):
if i == string_idx:
new_line.append(string_leaf)
if string_idx <= i < string_idx + num_of_strings:
for comment_leaf in line.comments_after(LL[i]):
new_line.append(comment_leaf, preformatted=True)
continue
append_leaves(new_line, line, [leaf])
self.add_custom_splits(string_leaf.value, custom_splits)
return Ok(new_line)
@staticmethod
def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
"""Validate (M)erge (S)tring (G)roup
Transform-time string validation logic for __merge_string_group(...).
Returns:
* Ok(None), if ALL validation checks (listed below) pass.
OR
* Err(CannotTransform), if any of the following are true:
- The target string group does not contain ANY stand-alone comments.
- The target string is not in a string group (i.e. it has no
adjacent strings).
- The string group has more than one inline comment.
- The string group has an inline comment that appears to be a pragma.
- The set of all string prefixes in the string group is of
length greater than one and is not equal to {"", "f"}.
- The string group consists of raw strings.
"""
# We first check for "inner" stand-alone comments (i.e. stand-alone
# comments that have a string leaf before them AND after them).
for inc in [1, -1]:
i = string_idx
found_sa_comment = False
is_valid_index = is_valid_index_factory(line.leaves)
while is_valid_index(i) and line.leaves[i].type in [
token.STRING,
STANDALONE_COMMENT,
]:
if line.leaves[i].type == STANDALONE_COMMENT:
found_sa_comment = True
elif found_sa_comment:
return TErr(
"StringMerger does NOT merge string groups which contain "
"stand-alone comments."
)
i += inc
num_of_inline_string_comments = 0
set_of_prefixes = set()
num_of_strings = 0
for leaf in line.leaves[string_idx:]:
if leaf.type != token.STRING:
# If the string group is trailed by a comma, we count the
# comments trailing the comma to be one of the string group's
# comments.
if leaf.type == token.COMMA and id(leaf) in line.comments:
num_of_inline_string_comments += 1
break
if has_triple_quotes(leaf.value):
return TErr("StringMerger does NOT merge multiline strings.")
num_of_strings += 1
prefix = get_string_prefix(leaf.value).lower()
if "r" in prefix:
return TErr("StringMerger does NOT merge raw strings.")
set_of_prefixes.add(prefix)
if id(leaf) in line.comments:
num_of_inline_string_comments += 1
if contains_pragma_comment(line.comments[id(leaf)]):
return TErr("Cannot merge strings which have pragma comments.")
if num_of_strings < 2:
return TErr(
f"Not enough strings to merge (num_of_strings={num_of_strings})."
)
if num_of_inline_string_comments > 1:
return TErr(
f"Too many inline string comments ({num_of_inline_string_comments})."
)
if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
return TErr(f"Too many different prefixes ({set_of_prefixes}).")
return Ok(None)
class StringParenStripper(StringTransformer):
"""StringTransformer that strips surrounding parentheses from strings.
Requirements:
The line contains a string which is surrounded by parentheses and:
- The target string is NOT the only argument to a function call.
- The target string is NOT a "pointless" string.
- If the target string contains a PERCENT, the brackets are not
preceded or followed by an operator with higher precedence than
PERCENT.
Transformations:
The parentheses mentioned in the 'Requirements' section are stripped.
Collaborations:
StringParenStripper has its own inherent usefulness, but it is also
relied on to clean up the parentheses created by StringParenWrapper (in
the event that they are no longer needed).
"""
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
for idx, leaf in enumerate(LL):
# Should be a string...
if leaf.type != token.STRING:
continue
# If this is a "pointless" string...
if (
leaf.parent
and leaf.parent.parent
and leaf.parent.parent.type == syms.simple_stmt
):
continue
# Should be preceded by a non-empty LPAR...
if (
not is_valid_index(idx - 1)
or LL[idx - 1].type != token.LPAR
or is_empty_lpar(LL[idx - 1])
):
continue
# That LPAR should NOT be preceded by a function name or a closing
# bracket (which could be a function which returns a function or a
# list/dictionary that contains a function)...
if is_valid_index(idx - 2) and (
LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS
):
continue
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
next_idx = string_parser.parse(LL, string_idx)
# if the leaves in the parsed string include a PERCENT, we need to
# make sure the initial LPAR is NOT preceded by an operator with
# higher or equal precedence to PERCENT
if is_valid_index(idx - 2):
# mypy can't quite follow unless we name this
before_lpar = LL[idx - 2]
if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and (
(
before_lpar.type
in {
token.STAR,
token.AT,
token.SLASH,
token.DOUBLESLASH,
token.PERCENT,
token.TILDE,
token.DOUBLESTAR,
token.AWAIT,
token.LSQB,
token.LPAR,
}
)
or (
# only unary PLUS/MINUS
before_lpar.parent
and before_lpar.parent.type == syms.factor
and (before_lpar.type in {token.PLUS, token.MINUS})
)
):
continue
# Should be followed by a non-empty RPAR...
if (
is_valid_index(next_idx)
and LL[next_idx].type == token.RPAR
and not is_empty_rpar(LL[next_idx])
):
# That RPAR should NOT be followed by anything with higher
# precedence than PERCENT
if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in {
token.DOUBLESTAR,
token.LSQB,
token.LPAR,
token.DOT,
}:
continue
return Ok(string_idx)
return TErr("This line has no strings wrapped in parens.")
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
string_parser = StringParser()
rpar_idx = string_parser.parse(LL, string_idx)
for leaf in (LL[string_idx - 1], LL[rpar_idx]):
if line.comments_after(leaf):
yield TErr(
"Will not strip parentheses which have comments attached to them."
)
return
new_line = line.clone()
new_line.comments = line.comments.copy()
try:
append_leaves(new_line, line, LL[: string_idx - 1])
except BracketMatchError:
# HACK: I believe there is currently a bug somewhere in
# right_hand_split() that is causing brackets to not be tracked
# properly by a shared BracketTracker.
append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True)
string_leaf = Leaf(token.STRING, LL[string_idx].value)
LL[string_idx - 1].remove()
replace_child(LL[string_idx], string_leaf)
new_line.append(string_leaf)
append_leaves(
new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :]
)
LL[rpar_idx].remove()
yield Ok(new_line)
class BaseStringSplitter(StringTransformer):
"""
Abstract class for StringTransformers which transform a Line's strings by splitting
them or placing them on their own lines where necessary to avoid going over
the configured line length.
Requirements:
* The target string value is responsible for the line going over the
line length limit. It follows that after all of black's other line
split methods have been exhausted, this line (or one of the resulting
lines after all line splits are performed) would still be over the
line_length limit unless we split this string.
AND
* The target string is NOT a "pointless" string (i.e. a string that has
no parent or siblings).
AND
* The target string is not followed by an inline comment that appears
to be a pragma.
AND
* The target string is not a multiline (i.e. triple-quote) string.
"""
STRING_OPERATORS: Final = [
token.EQEQUAL,
token.GREATER,
token.GREATEREQUAL,
token.LESS,
token.LESSEQUAL,
token.NOTEQUAL,
token.PERCENT,
token.PLUS,
token.STAR,
]
@abstractmethod
def do_splitter_match(self, line: Line) -> TMatchResult:
"""
BaseStringSplitter asks its clients to override this method instead of
`StringTransformer.do_match(...)`.
Follows the same protocol as `StringTransformer.do_match(...)`.
Refer to `help(StringTransformer.do_match)` for more information.
"""
def do_match(self, line: Line) -> TMatchResult:
match_result = self.do_splitter_match(line)
if isinstance(match_result, Err):
return match_result
string_idx = match_result.ok()
vresult = self._validate(line, string_idx)
if isinstance(vresult, Err):
return vresult
return match_result
def _validate(self, line: Line, string_idx: int) -> TResult[None]:
"""
Checks that @line meets all of the requirements listed in this classes'
docstring. Refer to `help(BaseStringSplitter)` for a detailed
description of those requirements.
Returns:
* Ok(None), if ALL of the requirements are met.
OR
* Err(CannotTransform), if ANY of the requirements are NOT met.
"""
LL = line.leaves
string_leaf = LL[string_idx]
max_string_length = self._get_max_string_length(line, string_idx)
if len(string_leaf.value) <= max_string_length:
return TErr(
"The string itself is not what is causing this line to be too long."
)
if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [
token.STRING,
token.NEWLINE,
]:
return TErr(
f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
" no parent)."
)
if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment(
line.comments[id(line.leaves[string_idx])]
):
return TErr(
"Line appears to end with an inline pragma comment. Splitting the line"
" could modify the pragma's behavior."
)
if has_triple_quotes(string_leaf.value):
return TErr("We cannot split multiline strings.")
return Ok(None)
def _get_max_string_length(self, line: Line, string_idx: int) -> int:
"""
Calculates the max string length used when attempting to determine
whether or not the target string is responsible for causing the line to
go over the line length limit.
WARNING: This method is tightly coupled to both StringSplitter and
(especially) StringParenWrapper. There is probably a better way to
accomplish what is being done here.
Returns:
max_string_length: such that `line.leaves[string_idx].value >
max_string_length` implies that the target string IS responsible
for causing this line to exceed the line length limit.
"""
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
# We use the shorthand "WMA4" in comments to abbreviate "We must
# account for". When giving examples, we use STRING to mean some/any
# valid string.
#
# Finally, we use the following convenience variables:
#
# P: The leaf that is before the target string leaf.
# N: The leaf that is after the target string leaf.
# NN: The leaf that is after N.
# WMA4 the whitespace at the beginning of the line.
offset = line.depth * 4
if is_valid_index(string_idx - 1):
p_idx = string_idx - 1
if (
LL[string_idx - 1].type == token.LPAR
and LL[string_idx - 1].value == ""
and string_idx >= 2
):
# If the previous leaf is an empty LPAR placeholder, we should skip it.
p_idx -= 1
P = LL[p_idx]
if P.type in self.STRING_OPERATORS:
# WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`).
offset += len(str(P)) + 1
if P.type == token.COMMA:
# WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`].
offset += 3
if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]:
# This conditional branch is meant to handle dictionary keys,
# variable assignments, 'return STRING' statement lines, and
# 'else STRING' ternary expression lines.
# WMA4 a single space.
offset += 1
# WMA4 the lengths of any leaves that came before that space,
# but after any closing bracket before that space.
for leaf in reversed(LL[: p_idx + 1]):
offset += len(str(leaf))
if leaf.type in CLOSING_BRACKETS:
break
if is_valid_index(string_idx + 1):
N = LL[string_idx + 1]
if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2:
# If the next leaf is an empty RPAR placeholder, we should skip it.
N = LL[string_idx + 2]
if N.type == token.COMMA:
# WMA4 a single comma at the end of the string (e.g `STRING,`).
offset += 1
if is_valid_index(string_idx + 2):
NN = LL[string_idx + 2]
if N.type == token.DOT and NN.type == token.NAME:
# This conditional branch is meant to handle method calls invoked
# off of a string literal up to and including the LPAR character.
# WMA4 the '.' character.
offset += 1
if (
is_valid_index(string_idx + 3)
and LL[string_idx + 3].type == token.LPAR
):
# WMA4 the left parenthesis character.
offset += 1
# WMA4 the length of the method's name.
offset += len(NN.value)
has_comments = False
for comment_leaf in line.comments_after(LL[string_idx]):
if not has_comments:
has_comments = True
# WMA4 two spaces before the '#' character.
offset += 2
# WMA4 the length of the inline comment.
offset += len(comment_leaf.value)
max_string_length = self.line_length - offset
return max_string_length
@staticmethod
def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the "prefer paren wrap" statement
requirements listed in the 'Requirements' section of the StringParenWrapper
class's docstring.
OR
None, otherwise.
"""
# The line must start with a string.
if LL[0].type != token.STRING:
return None
# If the string is surrounded by commas (or is the first/last child)...
prev_sibling = LL[0].prev_sibling
next_sibling = LL[0].next_sibling
if not prev_sibling and not next_sibling and parent_type(LL[0]) == syms.atom:
# If it's an atom string, we need to check the parent atom's siblings.
parent = LL[0].parent
assert parent is not None # For type checkers.
prev_sibling = parent.prev_sibling
next_sibling = parent.next_sibling
if (not prev_sibling or prev_sibling.type == token.COMMA) and (
not next_sibling or next_sibling.type == token.COMMA
):
return 0
return None
def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:
"""
Yields spans corresponding to expressions in a given f-string.
Spans are half-open ranges (left inclusive, right exclusive).
Assumes the input string is a valid f-string, but will not crash if the input
string is invalid.
"""
stack: List[int] = [] # our curly paren stack
i = 0
while i < len(s):
if s[i] == "{":
# if we're in a string part of the f-string, ignore escaped curly braces
if not stack and i + 1 < len(s) and s[i + 1] == "{":
i += 2
continue
stack.append(i)
i += 1
continue
if s[i] == "}":
if not stack:
i += 1
continue
j = stack.pop()
# we've made it back out of the expression! yield the span
if not stack:
yield (j, i + 1)
i += 1
continue
# if we're in an expression part of the f-string, fast forward through strings
# note that backslashes are not legal in the expression portion of f-strings
if stack:
delim = None
if s[i : i + 3] in ("'''", '"""'):
delim = s[i : i + 3]
elif s[i] in ("'", '"'):
delim = s[i]
if delim:
i += len(delim)
while i < len(s) and s[i : i + len(delim)] != delim:
i += 1
i += len(delim)
continue
i += 1
def fstring_contains_expr(s: str) -> bool:
return any(iter_fexpr_spans(s))
class StringSplitter(BaseStringSplitter, CustomSplitMapMixin):
"""
StringTransformer that splits "atom" strings (i.e. strings which exist on
lines by themselves).
Requirements:
* The line consists ONLY of a single string (possibly prefixed by a
string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE
a trailing comma.
AND
* All of the requirements listed in BaseStringSplitter's docstring.
Transformations:
The string mentioned in the 'Requirements' section is split into as
many substrings as necessary to adhere to the configured line length.
In the final set of substrings, no substring should be smaller than
MIN_SUBSTR_SIZE characters.
The string will ONLY be split on spaces (i.e. each new substring should
start with a space). Note that the string will NOT be split on a space
which is escaped with a backslash.
If the string is an f-string, it will NOT be split in the middle of an
f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x
else bar()} is an f-expression).
If the string that is being split has an associated set of custom split
records and those custom splits will NOT result in any line going over
the configured line length, those custom splits are used. Otherwise the
string is split as late as possible (from left-to-right) while still
adhering to the transformation rules listed above.
Collaborations:
StringSplitter relies on StringMerger to construct the appropriate
CustomSplit objects and add them to the custom split map.
"""
MIN_SUBSTR_SIZE: Final = 6
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
if self._prefer_paren_wrap_match(LL) is not None:
return TErr("Line needs to be wrapped in parens first.")
is_valid_index = is_valid_index_factory(LL)
idx = 0
# The first two leaves MAY be the 'not in' keywords...
if (
is_valid_index(idx)
and is_valid_index(idx + 1)
and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME]
and str(LL[idx]) + str(LL[idx + 1]) == "not in"
):
idx += 2
# Else the first leaf MAY be a string operator symbol or the 'in' keyword...
elif is_valid_index(idx) and (
LL[idx].type in self.STRING_OPERATORS
or LL[idx].type == token.NAME
and str(LL[idx]) == "in"
):
idx += 1
# The next/first leaf MAY be an empty LPAR...
if is_valid_index(idx) and is_empty_lpar(LL[idx]):
idx += 1
# The next/first leaf MUST be a string...
if not is_valid_index(idx) or LL[idx].type != token.STRING:
return TErr("Line does not start with a string.")
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# That string MAY be followed by an empty RPAR...
if is_valid_index(idx) and is_empty_rpar(LL[idx]):
idx += 1
# That string / empty RPAR leaf MAY be followed by a comma...
if is_valid_index(idx) and LL[idx].type == token.COMMA:
idx += 1
# But no more leaves are allowed...
if is_valid_index(idx):
return TErr("This line does not end with a string.")
return Ok(string_idx)
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
QUOTE = LL[string_idx].value[-1]
is_valid_index = is_valid_index_factory(LL)
insert_str_child = insert_str_child_factory(LL[string_idx])
prefix = get_string_prefix(LL[string_idx].value).lower()
# We MAY choose to drop the 'f' prefix from substrings that don't
# contain any f-expressions, but ONLY if the original f-string
# contains at least one f-expression. Otherwise, we will alter the AST
# of the program.
drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr(
LL[string_idx].value
)
first_string_line = True
string_op_leaves = self._get_string_operator_leaves(LL)
string_op_leaves_length = (
sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1
if string_op_leaves
else 0
)
def maybe_append_string_operators(new_line: Line) -> None:
"""
Side Effects:
If @line starts with a string operator and this is the first
line we are constructing, this function appends the string
operator to @new_line and replaces the old string operator leaf
in the node structure. Otherwise this function does nothing.
"""
maybe_prefix_leaves = string_op_leaves if first_string_line else []
for i, prefix_leaf in enumerate(maybe_prefix_leaves):
replace_child(LL[i], prefix_leaf)
new_line.append(prefix_leaf)
ends_with_comma = (
is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA
)
def max_last_string() -> int:
"""
Returns:
The max allowed length of the string value used for the last
line we will construct.
"""
result = self.line_length
result -= line.depth * 4
result -= 1 if ends_with_comma else 0
result -= string_op_leaves_length
return result
# --- Calculate Max Break Index (for string value)
# We start with the line length limit
max_break_idx = self.line_length
# The last index of a string of length N is N-1.
max_break_idx -= 1
# Leading whitespace is not present in the string value (e.g. Leaf.value).
max_break_idx -= line.depth * 4
if max_break_idx < 0:
yield TErr(
f"Unable to split {LL[string_idx].value} at such high of a line depth:"
f" {line.depth}"
)
return
# Check if StringMerger registered any custom splits.
custom_splits = self.pop_custom_splits(LL[string_idx].value)
# We use them ONLY if none of them would produce lines that exceed the
# line limit.
use_custom_breakpoints = bool(
custom_splits
and all(csplit.break_idx <= max_break_idx for csplit in custom_splits)
)
# Temporary storage for the remaining chunk of the string line that
# can't fit onto the line currently being constructed.
rest_value = LL[string_idx].value
def more_splits_should_be_made() -> bool:
"""
Returns:
True iff `rest_value` (the remaining string value from the last
split), should be split again.
"""
if use_custom_breakpoints:
return len(custom_splits) > 1
else:
return len(rest_value) > max_last_string()
string_line_results: List[Ok[Line]] = []
while more_splits_should_be_made():
if use_custom_breakpoints:
# Custom User Split (manual)
csplit = custom_splits.pop(0)
break_idx = csplit.break_idx
else:
# Algorithmic Split (automatic)
max_bidx = max_break_idx - string_op_leaves_length
maybe_break_idx = self._get_break_idx(rest_value, max_bidx)
if maybe_break_idx is None:
# If we are unable to algorithmically determine a good split
# and this string has custom splits registered to it, we
# fall back to using them--which means we have to start
# over from the beginning.
if custom_splits:
rest_value = LL[string_idx].value
string_line_results = []
first_string_line = True
use_custom_breakpoints = True
continue
# Otherwise, we stop splitting here.
break
break_idx = maybe_break_idx
# --- Construct `next_value`
next_value = rest_value[:break_idx] + QUOTE
# HACK: The following 'if' statement is a hack to fix the custom
# breakpoint index in the case of either: (a) substrings that were
# f-strings but will have the 'f' prefix removed OR (b) substrings
# that were not f-strings but will now become f-strings because of
# redundant use of the 'f' prefix (i.e. none of the substrings
# contain f-expressions but one or more of them had the 'f' prefix
# anyway; in which case, we will prepend 'f' to _all_ substrings).
#
# There is probably a better way to accomplish what is being done
# here...
#
# If this substring is an f-string, we _could_ remove the 'f'
# prefix, and the current custom split did NOT originally use a
# prefix...
if (
next_value != self._normalize_f_string(next_value, prefix)
and use_custom_breakpoints
and not csplit.has_prefix
):
# Then `csplit.break_idx` will be off by one after removing
# the 'f' prefix.
break_idx += 1
next_value = rest_value[:break_idx] + QUOTE
if drop_pointless_f_prefix:
next_value = self._normalize_f_string(next_value, prefix)
# --- Construct `next_leaf`
next_leaf = Leaf(token.STRING, next_value)
insert_str_child(next_leaf)
self._maybe_normalize_string_quotes(next_leaf)
# --- Construct `next_line`
next_line = line.clone()
maybe_append_string_operators(next_line)
next_line.append(next_leaf)
string_line_results.append(Ok(next_line))
rest_value = prefix + QUOTE + rest_value[break_idx:]
first_string_line = False
yield from string_line_results
if drop_pointless_f_prefix:
rest_value = self._normalize_f_string(rest_value, prefix)
rest_leaf = Leaf(token.STRING, rest_value)
insert_str_child(rest_leaf)
# NOTE: I could not find a test case that verifies that the following
# line is actually necessary, but it seems to be. Otherwise we risk
# not normalizing the last substring, right?
self._maybe_normalize_string_quotes(rest_leaf)
last_line = line.clone()
maybe_append_string_operators(last_line)
# If there are any leaves to the right of the target string...
if is_valid_index(string_idx + 1):
# We use `temp_value` here to determine how long the last line
# would be if we were to append all the leaves to the right of the
# target string to the last string line.
temp_value = rest_value
for leaf in LL[string_idx + 1 :]:
temp_value += str(leaf)
if leaf.type == token.LPAR:
break
# Try to fit them all on the same line with the last substring...
if (
len(temp_value) <= max_last_string()
or LL[string_idx + 1].type == token.COMMA
):
last_line.append(rest_leaf)
append_leaves(last_line, line, LL[string_idx + 1 :])
yield Ok(last_line)
# Otherwise, place the last substring on one line and everything
# else on a line below that...
else:
last_line.append(rest_leaf)
yield Ok(last_line)
non_string_line = line.clone()
append_leaves(non_string_line, line, LL[string_idx + 1 :])
yield Ok(non_string_line)
# Else the target string was the last leaf...
else:
last_line.append(rest_leaf)
last_line.comments = line.comments.copy()
yield Ok(last_line)
def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
"""
Yields:
All ranges of @string which, if @string were to be split there,
would result in the splitting of an \\N{...} expression (which is NOT
allowed).
"""
# True - the previous backslash was unescaped
# False - the previous backslash was escaped *or* there was no backslash
previous_was_unescaped_backslash = False
it = iter(enumerate(string))
for idx, c in it:
if c == "\\":
previous_was_unescaped_backslash = not previous_was_unescaped_backslash
continue
if not previous_was_unescaped_backslash or c != "N":
previous_was_unescaped_backslash = False
continue
previous_was_unescaped_backslash = False
begin = idx - 1 # the position of backslash before \N{...}
for idx, c in it:
if c == "}":
end = idx
break
else:
# malformed nameescape expression?
# should have been detected by AST parsing earlier...
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
yield begin, end
def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
"""
Yields:
All ranges of @string which, if @string were to be split there,
would result in the splitting of an f-expression (which is NOT
allowed).
"""
if "f" not in get_string_prefix(string).lower():
return
yield from iter_fexpr_spans(string)
def _get_illegal_split_indices(self, string: str) -> Set[Index]:
illegal_indices: Set[Index] = set()
iterators = [
self._iter_fexpr_slices(string),
self._iter_nameescape_slices(string),
]
for it in iterators:
for begin, end in it:
illegal_indices.update(range(begin, end + 1))
return illegal_indices
def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]:
"""
This method contains the algorithm that StringSplitter uses to
determine which character to split each string at.
Args:
@string: The substring that we are attempting to split.
@max_break_idx: The ideal break index. We will return this value if it
meets all the necessary conditions. In the likely event that it
doesn't we will try to find the closest index BELOW @max_break_idx
that does. If that fails, we will expand our search by also
considering all valid indices ABOVE @max_break_idx.
Pre-Conditions:
* assert_is_leaf_string(@string)
* 0 <= @max_break_idx < len(@string)
Returns:
break_idx, if an index is able to be found that meets all of the
conditions listed in the 'Transformations' section of this classes'
docstring.
OR
None, otherwise.
"""
is_valid_index = is_valid_index_factory(string)
assert is_valid_index(max_break_idx)
assert_is_leaf_string(string)
_illegal_split_indices = self._get_illegal_split_indices(string)
def breaks_unsplittable_expression(i: Index) -> bool:
"""
Returns:
True iff returning @i would result in the splitting of an
unsplittable expression (which is NOT allowed).
"""
return i in _illegal_split_indices
def passes_all_checks(i: Index) -> bool:
"""
Returns:
True iff ALL of the conditions listed in the 'Transformations'
section of this classes' docstring would be be met by returning @i.
"""
is_space = string[i] == " "
is_not_escaped = True
j = i - 1
while is_valid_index(j) and string[j] == "\\":
is_not_escaped = not is_not_escaped
j -= 1
is_big_enough = (
len(string[i:]) >= self.MIN_SUBSTR_SIZE
and len(string[:i]) >= self.MIN_SUBSTR_SIZE
)
return (
is_space
and is_not_escaped
and is_big_enough
and not breaks_unsplittable_expression(i)
)
# First, we check all indices BELOW @max_break_idx.
break_idx = max_break_idx
while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx):
break_idx -= 1
if not passes_all_checks(break_idx):
# If that fails, we check all indices ABOVE @max_break_idx.
#
# If we are able to find a valid index here, the next line is going
# to be longer than the specified line length, but it's probably
# better than doing nothing at all.
break_idx = max_break_idx + 1
while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx):
break_idx += 1
if not is_valid_index(break_idx) or not passes_all_checks(break_idx):
return None
return break_idx
def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None:
if self.normalize_strings:
leaf.value = normalize_string_quotes(leaf.value)
def _normalize_f_string(self, string: str, prefix: str) -> str:
"""
Pre-Conditions:
* assert_is_leaf_string(@string)
Returns:
* If @string is an f-string that contains no f-expressions, we
return a string identical to @string except that the 'f' prefix
has been stripped and all double braces (i.e. '{{' or '}}') have
been normalized (i.e. turned into '{' or '}').
OR
* Otherwise, we return @string.
"""
assert_is_leaf_string(string)
if "f" in prefix and not fstring_contains_expr(string):
new_prefix = prefix.replace("f", "")
temp = string[len(prefix) :]
temp = re.sub(r"\{\{", "{", temp)
temp = re.sub(r"\}\}", "}", temp)
new_string = temp
return f"{new_prefix}{new_string}"
else:
return string
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]:
LL = list(leaves)
string_op_leaves = []
i = 0
while LL[i].type in self.STRING_OPERATORS + [token.NAME]:
prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip())
string_op_leaves.append(prefix_leaf)
i += 1
return string_op_leaves
class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin):
"""
StringTransformer that wraps strings in parens and then splits at the LPAR.
Requirements:
All of the requirements listed in BaseStringSplitter's docstring in
addition to the requirements listed below:
* The line is a return/yield statement, which returns/yields a string.
OR
* The line is part of a ternary expression (e.g. `x = y if cond else
z`) such that the line starts with `else <string>`, where <string> is
some string.
OR
* The line is an assert statement, which ends with a string.
OR
* The line is an assignment statement (e.g. `x = <string>` or `x +=
<string>`) such that the variable is being assigned the value of some
string.
OR
* The line is a dictionary key assignment where some valid key is being
assigned the value of some string.
OR
* The line starts with an "atom" string that prefers to be wrapped in
parens. It's preferred to be wrapped when the string is surrounded by
commas (or is the first/last child).
Transformations:
The chosen string is wrapped in parentheses and then split at the LPAR.
We then have one line which ends with an LPAR and another line that
starts with the chosen string. The latter line is then split again at
the RPAR. This results in the RPAR (and possibly a trailing comma)
being placed on its own line.
NOTE: If any leaves exist to the right of the chosen string (except
for a trailing comma, which would be placed after the RPAR), those
leaves are placed inside the parentheses. In effect, the chosen
string is not necessarily being "wrapped" by parentheses. We can,
however, count on the LPAR being placed directly before the chosen
string.
In other words, StringParenWrapper creates "atom" strings. These
can then be split again by StringSplitter, if necessary.
Collaborations:
In the event that a string line split by StringParenWrapper is
changed such that it no longer needs to be given its own line,
StringParenWrapper relies on StringParenStripper to clean up the
parentheses it created.
For "atom" strings that prefers to be wrapped in parens, it requires
StringSplitter to hold the split until the string is wrapped in parens.
"""
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
if line.leaves[-1].type in OPENING_BRACKETS:
return TErr(
"Cannot wrap parens around a line that ends in an opening bracket."
)
string_idx = (
self._return_match(LL)
or self._else_match(LL)
or self._assert_match(LL)
or self._assign_match(LL)
or self._dict_match(LL)
or self._prefer_paren_wrap_match(LL)
)
if string_idx is not None:
string_value = line.leaves[string_idx].value
# If the string has no spaces...
if " " not in string_value:
# And will still violate the line length limit when split...
max_string_length = self.line_length - ((line.depth + 1) * 4)
if len(string_value) > max_string_length:
# And has no associated custom splits...
if not self.has_custom_splits(string_value):
# Then we should NOT put this string on its own line.
return TErr(
"We do not wrap long strings in parentheses when the"
" resultant line would still be over the specified line"
" length and can't be split further by StringSplitter."
)
return Ok(string_idx)
return TErr("This line does not contain any non-atomic strings.")
@staticmethod
def _return_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the return/yield statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of a return/yield statement and the first leaf
# contains either the "return" or "yield" keywords...
if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
0
].value in ["return", "yield"]:
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _else_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the ternary expression
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of a ternary expression and the first leaf
# contains the "else" keyword...
if (
parent_type(LL[0]) == syms.test
and LL[0].type == token.NAME
and LL[0].value == "else"
):
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _assert_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the assert statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of an assert statement and the first leaf
# contains the "assert" keyword...
if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a comma...
if leaf.type == token.COMMA:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That comma MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _assign_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the assignment statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is apart of an expression statement or is a function
# argument AND the first leaf contains a variable name...
if (
parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power]
and LL[0].type == token.NAME
):
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find either an '=' or '+=' symbol...
if leaf.type in [token.EQUAL, token.PLUSEQUAL]:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That symbol MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# The next leaf MAY be a comma iff this line is apart
# of a function argument...
if (
parent_type(LL[0]) == syms.argument
and is_valid_index(idx)
and LL[idx].type == token.COMMA
):
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _dict_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the dictionary key assignment
statement requirements listed in the 'Requirements' section of this
classes' docstring.
OR
None, otherwise.
"""
# If this line is apart of a dictionary key assignment...
if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]:
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a colon...
if leaf.type == token.COLON:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That colon MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# That string MAY be followed by a comma...
if is_valid_index(idx) and LL[idx].type == token.COMMA:
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
insert_str_child = insert_str_child_factory(LL[string_idx])
comma_idx = -1
ends_with_comma = False
if LL[comma_idx].type == token.COMMA:
ends_with_comma = True
leaves_to_steal_comments_from = [LL[string_idx]]
if ends_with_comma:
leaves_to_steal_comments_from.append(LL[comma_idx])
# --- First Line
first_line = line.clone()
left_leaves = LL[:string_idx]
# We have to remember to account for (possibly invisible) LPAR and RPAR
# leaves that already wrapped the target string. If these leaves do
# exist, we will replace them with our own LPAR and RPAR leaves.
old_parens_exist = False
if left_leaves and left_leaves[-1].type == token.LPAR:
old_parens_exist = True
leaves_to_steal_comments_from.append(left_leaves[-1])
left_leaves.pop()
append_leaves(first_line, line, left_leaves)
lpar_leaf = Leaf(token.LPAR, "(")
if old_parens_exist:
replace_child(LL[string_idx - 1], lpar_leaf)
else:
insert_str_child(lpar_leaf)
first_line.append(lpar_leaf)
# We throw inline comments that were originally to the right of the
# target string to the top line. They will now be shown to the right of
# the LPAR.
for leaf in leaves_to_steal_comments_from:
for comment_leaf in line.comments_after(leaf):
first_line.append(comment_leaf, preformatted=True)
yield Ok(first_line)
# --- Middle (String) Line
# We only need to yield one (possibly too long) string line, since the
# `StringSplitter` will break it down further if necessary.
string_value = LL[string_idx].value
string_line = Line(
mode=line.mode,
depth=line.depth + 1,
inside_brackets=True,
should_split_rhs=line.should_split_rhs,
magic_trailing_comma=line.magic_trailing_comma,
)
string_leaf = Leaf(token.STRING, string_value)
insert_str_child(string_leaf)
string_line.append(string_leaf)
old_rpar_leaf = None
if is_valid_index(string_idx + 1):
right_leaves = LL[string_idx + 1 :]
if ends_with_comma:
right_leaves.pop()
if old_parens_exist:
assert right_leaves and right_leaves[-1].type == token.RPAR, (
"Apparently, old parentheses do NOT exist?!"
f" (left_leaves={left_leaves}, right_leaves={right_leaves})"
)
old_rpar_leaf = right_leaves.pop()
append_leaves(string_line, line, right_leaves)
yield Ok(string_line)
# --- Last Line
last_line = line.clone()
last_line.bracket_tracker = first_line.bracket_tracker
new_rpar_leaf = Leaf(token.RPAR, ")")
if old_rpar_leaf is not None:
replace_child(old_rpar_leaf, new_rpar_leaf)
else:
insert_str_child(new_rpar_leaf)
last_line.append(new_rpar_leaf)
# If the target string ended with a comma, we place this comma to the
# right of the RPAR on the last line.
if ends_with_comma:
comma_leaf = Leaf(token.COMMA, ",")
replace_child(LL[comma_idx], comma_leaf)
last_line.append(comma_leaf)
yield Ok(last_line)
class StringParser:
"""
A state machine that aids in parsing a string's "trailer", which can be
either non-existent, an old-style formatting sequence (e.g. `% varX` or `%
(varX, varY)`), or a method-call / attribute access (e.g. `.format(varX,
varY)`).
NOTE: A new StringParser object MUST be instantiated for each string
trailer we need to parse.
Examples:
We shall assume that `line` equals the `Line` object that corresponds
to the following line of python code:
```
x = "Some {}.".format("String") + some_other_string
```
Furthermore, we will assume that `string_idx` is some index such that:
```
assert line.leaves[string_idx].value == "Some {}."
```
The following code snippet then holds:
```
string_parser = StringParser()
idx = string_parser.parse(line.leaves, string_idx)
assert line.leaves[idx].type == token.PLUS
```
"""
DEFAULT_TOKEN: Final = 20210605
# String Parser States
START: Final = 1
DOT: Final = 2
NAME: Final = 3
PERCENT: Final = 4
SINGLE_FMT_ARG: Final = 5
LPAR: Final = 6
RPAR: Final = 7
DONE: Final = 8
# Lookup Table for Next State
_goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = {
# A string trailer may start with '.' OR '%'.
(START, token.DOT): DOT,
(START, token.PERCENT): PERCENT,
(START, DEFAULT_TOKEN): DONE,
# A '.' MUST be followed by an attribute or method name.
(DOT, token.NAME): NAME,
# A method name MUST be followed by an '(', whereas an attribute name
# is the last symbol in the string trailer.
(NAME, token.LPAR): LPAR,
(NAME, DEFAULT_TOKEN): DONE,
# A '%' symbol can be followed by an '(' or a single argument (e.g. a
# string or variable name).
(PERCENT, token.LPAR): LPAR,
(PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG,
# If a '%' symbol is followed by a single argument, that argument is
# the last leaf in the string trailer.
(SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE,
# If present, a ')' symbol is the last symbol in a string trailer.
# (NOTE: LPARS and nested RPARS are not included in this lookup table,
# since they are treated as a special case by the parsing logic in this
# classes' implementation.)
(RPAR, DEFAULT_TOKEN): DONE,
}
def __init__(self) -> None:
self._state = self.START
self._unmatched_lpars = 0
def parse(self, leaves: List[Leaf], string_idx: int) -> int:
"""
Pre-conditions:
* @leaves[@string_idx].type == token.STRING
Returns:
The index directly after the last leaf which is apart of the string
trailer, if a "trailer" exists.
OR
@string_idx + 1, if no string "trailer" exists.
"""
assert leaves[string_idx].type == token.STRING
idx = string_idx + 1
while idx < len(leaves) and self._next_state(leaves[idx]):
idx += 1
return idx
def _next_state(self, leaf: Leaf) -> bool:
"""
Pre-conditions:
* On the first call to this function, @leaf MUST be the leaf that
was directly after the string leaf in question (e.g. if our target
string is `line.leaves[i]` then the first call to this method must
be `line.leaves[i + 1]`).
* On the next call to this function, the leaf parameter passed in
MUST be the leaf directly following @leaf.
Returns:
True iff @leaf is apart of the string's trailer.
"""
# We ignore empty LPAR or RPAR leaves.
if is_empty_par(leaf):
return True
next_token = leaf.type
if next_token == token.LPAR:
self._unmatched_lpars += 1
current_state = self._state
# The LPAR parser state is a special case. We will return True until we
# find the matching RPAR token.
if current_state == self.LPAR:
if next_token == token.RPAR:
self._unmatched_lpars -= 1
if self._unmatched_lpars == 0:
self._state = self.RPAR
# Otherwise, we use a lookup table to determine the next state.
else:
# If the lookup table matches the current state to the next
# token, we use the lookup table.
if (current_state, next_token) in self._goto:
self._state = self._goto[current_state, next_token]
else:
# Otherwise, we check if a the current state was assigned a
# default.
if (current_state, self.DEFAULT_TOKEN) in self._goto:
self._state = self._goto[current_state, self.DEFAULT_TOKEN]
# If no default has been assigned, then this parser has a logic
# error.
else:
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
if self._state == self.DONE:
return False
return True
def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]:
"""
Factory for a convenience function that is used to orphan @string_leaf
and then insert multiple new leaves into the same part of the node
structure that @string_leaf had originally occupied.
Examples:
Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N =
string_leaf.parent`. Assume the node `N` has the following
original structure:
Node(
expr_stmt, [
Leaf(NAME, 'x'),
Leaf(EQUAL, '='),
Leaf(STRING, '"foo"'),
]
)
We then run the code snippet shown below.
```
insert_str_child = insert_str_child_factory(string_leaf)
lpar = Leaf(token.LPAR, '(')
insert_str_child(lpar)
bar = Leaf(token.STRING, '"bar"')
insert_str_child(bar)
rpar = Leaf(token.RPAR, ')')
insert_str_child(rpar)
```
After which point, it follows that `string_leaf.parent is None` and
the node `N` now has the following structure:
Node(
expr_stmt, [
Leaf(NAME, 'x'),
Leaf(EQUAL, '='),
Leaf(LPAR, '('),
Leaf(STRING, '"bar"'),
Leaf(RPAR, ')'),
]
)
"""
string_parent = string_leaf.parent
string_child_idx = string_leaf.remove()
def insert_str_child(child: LN) -> None:
nonlocal string_child_idx
assert string_parent is not None
assert string_child_idx is not None
string_parent.insert_child(string_child_idx, child)
string_child_idx += 1
return insert_str_child
def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]:
"""
Examples:
```
my_list = [1, 2, 3]
is_valid_index = is_valid_index_factory(my_list)
assert is_valid_index(0)
assert is_valid_index(2)
assert not is_valid_index(3)
assert not is_valid_index(-1)
```
"""
def is_valid_index(idx: int) -> bool:
"""
Returns:
True iff @idx is positive AND seq[@idx] does NOT raise an
IndexError.
"""
return 0 <= idx < len(seq)
return is_valid_index
| mit |
zhreshold/mxnet | example/image-classification/symbols/resnet.py | 55 | 9630 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
import numpy as np
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv2 + shortcut
def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.sym.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 28:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnet(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype)
| apache-2.0 |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/models/wav2vec.py | 1 | 24313 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import BaseFairseqModel, register_model, register_model_architecture
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GumbelVectorQuantizer,
KmeansVectorQuantizer,
)
from fairseq.utils import buffered_arange
logger = logging.getLogger(__name__)
@register_model("wav2vec")
class Wav2VecModel(BaseFairseqModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--prediction-steps",
type=int,
metavar="N",
help="number of steps ahead to predict",
)
parser.add_argument(
"--sample-distance",
type=int,
metavar="N",
help="sample distance from target. does not work properly with cross-sampling",
)
parser.add_argument(
"--cross-sample-negatives",
type=int,
metavar="N",
help="num of cross sampled negatives",
)
parser.add_argument(
"--num-negatives", type=int, metavar="N", help="number of negative examples"
)
parser.add_argument(
"--conv-feature-layers",
type=str,
metavar="EXPR",
help="convolutional feature extraction layers [(dim, kernel_size, stride), ...]",
)
parser.add_argument(
"--conv-aggregator-layers",
type=str,
metavar="EXPR",
help="convolutional feature extraction layers [(dim, kernel_size, stride), ...]",
)
parser.add_argument(
"--dropout",
type=float,
metavar="D",
help="dropout to apply within the model",
)
parser.add_argument(
"--dropout-features",
type=float,
metavar="D",
help="dropout to apply to the features",
)
parser.add_argument(
"--dropout-agg",
type=float,
metavar="D",
help="dropout to apply after aggregation step",
)
parser.add_argument(
"--encoder", type=str, choices=["cnn"], help="type of encoder to use"
)
parser.add_argument(
"--aggregator",
type=str,
choices=["cnn", "gru"],
help="type of aggregator to use",
)
parser.add_argument(
"--gru-dim", type=int, metavar="N", help="GRU dimensionality"
)
parser.add_argument(
"--no-conv-bias",
action="store_true",
help="if set, does not learn bias for conv layers",
)
parser.add_argument(
"--agg-zero-pad",
action="store_true",
help="if set, zero pads in aggregator instead of repl pad",
)
parser.add_argument(
"--skip-connections-feat",
action="store_true",
help="if set, adds skip connections to the feature extractor",
)
parser.add_argument(
"--skip-connections-agg",
action="store_true",
help="if set, adds skip connections to the aggregator",
)
parser.add_argument(
"--residual-scale",
type=float,
metavar="D",
help="scales residual by sqrt(value)",
)
parser.add_argument(
"--log-compression",
action="store_true",
help="if set, adds a log compression to feature extractor",
)
parser.add_argument(
"--balanced-classes",
action="store_true",
help="if set, loss is scaled to balance for number of negatives",
)
parser.add_argument(
"--project-features",
choices=["none", "same", "new"],
help="if not none, features are projected using the (same or new) aggregator",
)
parser.add_argument(
"--non-affine-group-norm",
action="store_true",
help="if set, group norm is not affine",
)
parser.add_argument(
"--offset",
help="if set, introduces an offset from target to predictions. "
'if set to "auto", it is computed automatically from the receptive field',
)
parser.add_argument(
"--activation",
type=str,
choices=["relu", "gelu"],
help="which activation function to use",
)
parser.add_argument(
"--vq-type",
type=str,
choices=["none", "gumbel", "kmeans"],
help="which type of quantizer to use",
)
parser.add_argument(
"--vq-vars",
type=int,
metavar="N",
help="if set, project to this many vector quantized variables per group",
)
parser.add_argument(
"--vq-groups",
type=int,
metavar="N",
help="number of groups of latent variables",
)
parser.add_argument(
"--vq-dim",
type=int,
metavar="N",
help="uses this dimensionality for quantized vectors",
)
parser.add_argument(
"--vq-depth",
type=int,
metavar="N",
help="number of layers for vq weight projection",
)
parser.add_argument(
"--combine-groups",
action="store_true",
help="if set, variables are shared among groups",
)
parser.add_argument(
"--vq-temp",
type=str,
metavar="TEMP",
help="temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)",
)
parser.add_argument(
"--vq-gamma",
type=float,
metavar="D",
help="gamma parameter for kmeans style vector quantization",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_wav2vec_architecture(args)
model = Wav2VecModel(args)
logger.info(model)
return model
def __init__(self, args):
super().__init__()
self.prediction_steps = args.prediction_steps
offset = args.offset
if args.activation == "relu":
activation = nn.ReLU()
elif args.activation == "gelu":
activation = nn.GELU()
else:
raise Exception("unknown activation " + args.activation)
if args.encoder == "cnn":
feature_enc_layers = eval(args.conv_feature_layers)
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
log_compression=args.log_compression,
skip_connections=args.skip_connections_feat,
residual_scale=args.residual_scale,
non_affine_group_norm=args.non_affine_group_norm,
activation=activation,
)
embed = feature_enc_layers[-1][0]
else:
raise Exception("unknown encoder type " + args.encoder)
self.vector_quantizer = None
if args.vq_type == "gumbel":
self.vector_quantizer = GumbelVectorQuantizer(
dim=embed,
num_vars=args.vq_vars,
temp=eval(args.vq_temp),
groups=args.vq_groups,
combine_groups=args.combine_groups,
vq_dim=args.vq_dim if args.vq_dim > 0 else embed,
time_first=False,
activation=activation,
weight_proj_depth=args.vq_depth,
weight_proj_factor=2,
)
elif args.vq_type == "kmeans":
self.vector_quantizer = KmeansVectorQuantizer(
dim=embed,
num_vars=args.vq_vars,
groups=args.vq_groups,
combine_groups=args.combine_groups,
vq_dim=args.vq_dim if args.vq_dim > 0 else embed,
time_first=False,
gamma=args.vq_gamma,
)
else:
assert (
args.vq_type == "none" or args.vq_type is None
), "Unknown quantizer type"
if args.offset == "auto":
assert args.encoder == "cnn"
jin = 0
rin = 0
for _, k, stride in feature_enc_layers:
if rin == 0:
rin = k
rin = rin + (k - 1) * jin
if jin == 0:
jin = stride
else:
jin *= stride
offset = math.ceil(rin / jin)
offset = int(offset)
def make_aggregator():
if args.aggregator == "cnn":
agg_layers = eval(args.conv_aggregator_layers)
agg_dim = agg_layers[-1][0]
feature_aggregator = ConvAggegator(
conv_layers=agg_layers,
embed=embed,
dropout=args.dropout,
skip_connections=args.skip_connections_agg,
residual_scale=args.residual_scale,
non_affine_group_norm=args.non_affine_group_norm,
conv_bias=not args.no_conv_bias,
zero_pad=args.agg_zero_pad,
activation=activation,
)
elif args.aggregator == "gru":
agg_dim = args.gru_dim
feature_aggregator = nn.Sequential(
TransposeLast(),
nn.GRU(
input_size=embed,
hidden_size=agg_dim,
num_layers=1,
dropout=args.dropout,
),
TransposeLast(deconstruct_idx=0),
)
else:
raise Exception("unknown aggregator type " + args.aggregator)
return feature_aggregator, agg_dim
self.feature_aggregator, agg_dim = make_aggregator()
self.wav2vec_predictions = Wav2VecPredictionsModel(
in_dim=agg_dim,
out_dim=embed,
prediction_steps=args.prediction_steps,
n_negatives=args.num_negatives,
cross_sample_negatives=args.cross_sample_negatives,
sample_distance=args.sample_distance,
dropout=args.dropout,
offset=offset,
balanced_classes=args.balanced_classes,
infonce=args.infonce,
)
self.dropout_feats = nn.Dropout(p=args.dropout_features)
self.dropout_agg = nn.Dropout(p=args.dropout_agg)
if args.project_features == "none":
self.project_features = None
elif args.project_features == "same":
self.project_features = self.feature_aggregator
elif args.project_features == "new":
self.project_features, _ = make_aggregator()
def forward(self, source):
result = {}
features = self.feature_extractor(source)
if self.vector_quantizer:
q_res = self.vector_quantizer(features)
features = q_res["x"]
for k in q_res.keys():
if k != "x":
result[k] = q_res[k]
x = self.dropout_feats(features)
x = self.feature_aggregator(x)
x = self.dropout_agg(x)
if self.project_features is not None:
features = self.project_features(features)
x, targets = self.wav2vec_predictions(x, features)
result["cpc_logits"] = x
result["cpc_targets"] = targets
return result
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
def max_positions(self):
"""Maximum length supported by the model."""
return sys.maxsize
def get_logits(self, net_output):
logits = net_output["cpc_logits"]
return logits
def get_targets(self, sample, net_output):
t = net_output["cpc_targets"]
if isinstance(t, tuple):
t = t[0]
return t.contiguous()
def get_target_weights(self, targets, net_output):
targets = net_output["cpc_targets"]
if isinstance(targets, tuple) and targets[-1] is not None:
return targets[-1]
return None
def get_extra_losses(self, net_output):
loss = None
if "prob_perplexity" in net_output:
loss = net_output["num_vars"] - net_output["prob_perplexity"]
elif "kmeans_loss" in net_output:
loss = net_output["kmeans_loss"]
return loss
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
def norm_block(is_layer_norm, dim, affine=True):
if is_layer_norm:
mod = nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=affine),
TransposeLast(),
)
else:
mod = Fp32GroupNorm(1, dim, affine=affine)
return mod
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers,
dropout,
log_compression,
skip_connections,
residual_scale,
non_affine_group_norm,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
return nn.Sequential(
nn.Conv1d(n_in, n_out, k, stride=stride, bias=False),
nn.Dropout(p=dropout),
norm_block(
is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm
),
activation,
)
in_d = 1
self.conv_layers = nn.ModuleList()
for dim, k, stride in conv_layers:
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.log_compression = log_compression
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
residual = x
x = conv(x)
if self.skip_connections and x.size(1) == residual.size(1):
tsz = x.size(2)
r_tsz = residual.size(2)
residual = residual[..., :: r_tsz // tsz][..., :tsz]
x = (x + residual) * self.residual_scale
if self.log_compression:
x = x.abs()
x = x + 1
x = x.log()
return x
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
class ConvAggegator(nn.Module):
def __init__(
self,
conv_layers,
embed,
dropout,
skip_connections,
residual_scale,
non_affine_group_norm,
conv_bias,
zero_pad,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
# padding dims only really make sense for stride = 1
ka = k // 2
kb = ka - 1 if k % 2 == 0 else ka
pad = (
ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0))
)
return nn.Sequential(
pad,
nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias),
nn.Dropout(p=dropout),
norm_block(False, n_out, affine=not non_affine_group_norm),
activation,
)
in_d = embed
self.conv_layers = nn.ModuleList()
self.residual_proj = nn.ModuleList()
for dim, k, stride in conv_layers:
if in_d != dim and skip_connections:
self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False))
else:
self.residual_proj.append(None)
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.conv_layers = nn.Sequential(*self.conv_layers)
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
for rproj, conv in zip(self.residual_proj, self.conv_layers):
residual = x
x = conv(x)
if self.skip_connections:
if rproj is not None:
residual = rproj(residual)
x = (x + residual) * self.residual_scale
return x
class Wav2VecPredictionsModel(nn.Module):
def __init__(
self,
in_dim,
out_dim,
prediction_steps,
n_negatives,
cross_sample_negatives,
sample_distance,
dropout,
offset,
balanced_classes,
infonce,
):
super().__init__()
self.n_negatives = n_negatives
self.cross_sample_negatives = cross_sample_negatives
self.sample_distance = sample_distance
self.project_to_steps = nn.ConvTranspose2d(
in_dim, out_dim, (1, prediction_steps)
)
self.dropout = nn.Dropout(p=dropout)
self.offset = offset
self.balanced_classes = balanced_classes
self.infonce = infonce
def sample_negatives(self, y):
bsz, fsz, tsz = y.shape
y = y.transpose(0, 1) # BCT -> CBT
y = y.contiguous().view(fsz, -1) # CBT => C(BxT)
cross_high = tsz * bsz
high = tsz if self.sample_distance is None else min(tsz, self.sample_distance)
assert high > 1
neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz))
with torch.no_grad():
if self.n_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * tsz)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * tsz),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[..., neg_idxs.view(-1)]
negs = negs.view(
fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz
).permute(
2, 1, 0, 3
) # to NxBxCxT
return negs
def forward(self, x, y):
x = x.unsqueeze(-1)
x = self.project_to_steps(x) # BxCxTxS
x = self.dropout(x)
negatives = self.sample_negatives(y)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T
copies = targets.size(0)
bsz, dim, tsz, steps = x.shape
steps = min(steps, tsz - self.offset)
predictions = x.new(
bsz * copies * (tsz - self.offset + 1) * steps
- ((steps + 1) * steps // 2) * copies * bsz
)
if self.infonce:
labels = predictions.new_full(
(predictions.shape[0] // copies,), 0, dtype=torch.long
)
else:
labels = torch.zeros_like(predictions)
weights = (
torch.full_like(labels, 1 / self.n_negatives)
if self.balanced_classes and not self.infonce
else None
)
start = end = 0
for i in range(steps):
offset = i + self.offset
end = start + (tsz - offset) * bsz * copies
if self.infonce:
predictions[start:end] = torch.einsum(
"bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:]
).flatten()
else:
pos_num = (end - start) // copies
predictions[start:end] = torch.einsum(
"bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:]
).flatten()
labels[start : start + pos_num] = 1.0
if weights is not None:
weights[start : start + pos_num] = 1.0
start = end
assert end == predictions.numel(), "{} != {}".format(end, predictions.numel())
if self.infonce:
predictions = predictions.view(-1, copies)
else:
if weights is not None:
labels = (labels, weights)
return predictions, labels
@register_model_architecture("wav2vec", "wav2vec")
def base_wav2vec_architecture(args):
conv_feature_layers = "[(512, 10, 5)]"
conv_feature_layers += " + [(512, 8, 4)]"
conv_feature_layers += " + [(512, 4, 2)] * 3"
args.conv_feature_layers = getattr(args, "conv_feature_layers", conv_feature_layers)
args.conv_aggregator_layers = getattr(
args, "conv_aggregator_layers", "[(512, 3, 1)] * 9"
)
args.prediction_steps = getattr(args, "prediction_steps", 12)
args.num_negatives = getattr(args, "num_negatives", 1)
args.sample_distance = getattr(args, "sample_distance", None)
args.cross_sample_negatives = getattr(args, "cross_sample_negatives", 0)
args.dropout = getattr(args, "dropout", 0.0)
args.dropout_features = getattr(args, "dropout_features", 0.0)
args.dropout_agg = getattr(args, "dropout_agg", 0.0)
args.encoder = getattr(args, "encoder", "cnn")
args.aggregator = getattr(args, "aggregator", "cnn")
args.skip_connections_feat = getattr(args, "skip_connections_feat", False)
args.skip_connections_agg = getattr(args, "skip_connections_agg", False)
args.residual_scale = getattr(args, "residual_scale", 0.5)
args.gru_dim = getattr(args, "gru_dim", 512)
args.no_conv_bias = getattr(args, "no_conv_bias", False)
args.agg_zero_pad = getattr(args, "agg_zero_pad", False)
args.log_compression = getattr(args, "log_compression", False)
args.balanced_classes = getattr(args, "balanced_classes", False)
args.infonce = getattr(args, "infonce", False)
args.project_features = getattr(args, "project_features", "none")
args.non_affine_group_norm = getattr(args, "non_affine_group_norm", False)
args.offset = getattr(args, "offset", "auto")
args.activation = getattr(args, "activation", "relu")
args.vq_type = getattr(args, "vq_type", "none")
args.vq_vars = getattr(args, "vq_vars", 320)
args.vq_groups = getattr(args, "vq_groups", 2)
args.vq_dim = getattr(args, "vq_dim", 0)
args.vq_depth = getattr(args, "vq_depth", 1)
args.combine_groups = getattr(args, "combine_groups", False)
args.vq_temp = getattr(args, "vq_temp", "(2.0, 0.5, 0.999995)")
args.vq_gamma = getattr(args, "vq_gamma", 0.25)
| bsd-3-clause |
chattahippie/Hearthstone-Card-Generator-AI- | Score_Generated_Cards.py | 1 | 3033 | import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
# Not sure if I need these yet
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
def nn_model():
#create model
model = Sequential()
model.add(Dense(output_dim=128, input_dim=160, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dense(output_dim=128, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
#compile model
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
def main(input_filename, chars_file):
raw_text = ''
with open(input_filename) as fp:
raw_text = fp.read()
chars = ''
with open(chars_file) as fp:
chars = fp.read()
raw_text = raw_text.lower()
input_data = raw_text.splitlines()
input_data = [(x + (' ' * 160))[:160] for x in input_data]
# create mapping of unique chars to integers
chars = sorted(list(set(chars)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
# prepare the dataset of input to output pairs encoded as integers
dataX = []
for i in range(0, len(input_data), 1):
# output is formatted 'name\tnumber' and we just want the number
dataX.append([char_to_int[char] for char in input_data[i]])
n_patterns = len(dataX)
#X = numpy.reshape(dataX, (n_patterns, 160, 1))
X = numpy.reshape(dataX, (n_patterns, 160))
# normalize
seed = 7
numpy.random.seed(seed)
#scale = StandardScaler()
#dataX = scale.fit_transform(dataX)
# filepath = 'checkpoints/weights-improvement-Adam-{epoch:02d}-{loss:.4f}.hdf5'
# checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
# callbacks_list = [checkpoint]
model = nn_model()
model.load_weights("checkpoints/weights-improvement-Adam-49-0.6549.hdf5")
model.compile(loss='categorical_crossentropy', optimizer='adam')
res = model.predict(X)
for i in range(len(input_data)):
print(input_data[i] + " -> {}".format(res[i][0] * 100))
#kfold = KFold(n_splits=10, random_state=seed)
# --The program breaks here--
#results = cross_val_score(estimator, dataX, dataY, cv=kfold)
#print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
if __name__=='__main__':
import sys
# I had to do this because of windows
sys.exit(main(sys.argv[1], "data/cards.collectible.json_formatted.txt"))
#sys.exit(main(sys.argv[1], sys.argv[2]))
| apache-2.0 |
PyThaiNLP/pythainlp | pythainlp/summarize/mt5.py | 1 | 2416 | # -*- coding: utf-8 -*-
"""
Summarization by mT5 model
"""
from transformers import T5Tokenizer, MT5ForConditionalGeneration
from typing import List
from pythainlp.summarize import CPE_KMUTT_THAI_SENTENCE_SUM
class mT5Summarizer:
def __init__(
self,
model_size: str = "small",
num_beams: int = 4,
no_repeat_ngram_size: int = 2,
min_length: int = 30,
max_length: int = 100,
skip_special_tokens: bool = True,
pretrained_mt5_model_name: str = None,
):
model_name = ""
if pretrained_mt5_model_name is None:
if model_size not in ["small", "base", "large", "xl", "xxl"]:
raise ValueError(
f"""model_size \"{model_size}\" not found.
It might be a typo; if not, please consult our document."""
)
model_name = f"google/mt5-{model_size}"
else:
if pretrained_mt5_model_name == CPE_KMUTT_THAI_SENTENCE_SUM:
model_name = f"thanathorn/{CPE_KMUTT_THAI_SENTENCE_SUM}"
else:
model_name = pretrained_mt5_model_name
self.model_name = model_name
self.model = MT5ForConditionalGeneration.from_pretrained(model_name)
self.tokenizer = T5Tokenizer.from_pretrained(model_name)
self.num_beams = num_beams
self.no_repeat_ngram_size = no_repeat_ngram_size
self.min_length = min_length
self.max_length = max_length
self.skip_special_tokens = skip_special_tokens
def summarize(self, text: str) -> List[str]:
preprocess_text = text.strip().replace("\n", "")
if self.model_name == f"thanathorn/{CPE_KMUTT_THAI_SENTENCE_SUM}":
t5_prepared_Text = "simplify: " + preprocess_text
else:
t5_prepared_Text = "summarize: " + preprocess_text
tokenized_text = self.tokenizer.encode(
t5_prepared_Text, return_tensors="pt"
)
summary_ids = self.model.generate(
tokenized_text,
num_beams=self.num_beams,
no_repeat_ngram_size=self.no_repeat_ngram_size,
min_length=self.min_length,
max_length=self.max_length,
early_stopping=True,
)
output = self.tokenizer.decode(
summary_ids[0], skip_special_tokens=self.skip_special_tokens
)
return [output]
| apache-2.0 |
ipashchenko/ml4vs | ml4vs/nnpr_hyperopt.py | 1 | 15077 | import hyperopt
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import os
import numpy as np
from hyperopt import Trials, STATUS_OK, tpe
from keras import callbacks
from keras.models import Sequential, load_model
from keras.layers import Dense
from keras.layers import Dropout
from keras.constraints import maxnorm
from keras.optimizers import SGD
from sklearn.cross_validation import (StratifiedShuffleSplit, StratifiedKFold,
cross_val_score)
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
from sklearn.metrics import roc_auc_score, average_precision_score
from utils import print_cm_summary
from data_load import load_data, load_data_tgt
data_dir = '/home/ilya/code/ml4vs/data/LMC_SC20__corrected_list_of_variables/raw_index_values'
file_1 = 'vast_lightcurve_statistics_variables_only.log'
file_0 = 'vast_lightcurve_statistics_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
# names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',
# 'Npts', 'CSSD']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X, y, df, feature_names, delta = load_data([file_0, file_1], names, names_to_delete)
target = 'variable'
predictors = list(df)
predictors.remove(target)
dtrain = df
kfold = StratifiedKFold(dtrain[target], n_folds=4, shuffle=True, random_state=1)
sss = StratifiedShuffleSplit(y, n_iter=1, test_size=0.25, random_state=123)
for train_index, test_index in sss:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
transforms = list()
transforms.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
transforms.append(('scaler', StandardScaler()))
pipeline = Pipeline(transforms)
for name, transform in pipeline.steps:
transform.fit(X_train)
X_test = transform.transform(X_test)
X_train = transform.transform(X_train)
def auc(y_true, y_pred):
return roc_auc_score(y_true, y_pred)
def keras_fmin_fnct(space):
print "Using hyperparameters ================================="
if not space['use_3_layers']:
print "2 layers, 18 - {} neurons".format(space['Dense'])
print "Dropouts: {} - {}".format(space['Dropout'], space['Dropout_1'])
print "W_constraints: {} - {}".format(space['w1'], space['w2'])
# print "LR = {}, DR = {}, Momentum = {}".format(0.1, space['dr'],
# space['momentum'])
print "Batch size = {}".format(space['batch_size'])
print "Class weight = {}".format(space['cw'])
elif space['use_3_layers']:
print "3 layers, 18 - {} - {} neurons".format(space['Dense'], space['use_3_layers']['Dense_2'])
print "Dropouts: {} - {} - {}".format(space['Dropout'], space['Dropout_1'],
space['use_3_layers']['Dropout_2'])
print "W_constraints: {} - {} - {}".format(space['w1'], space['w2'],
space['use_3_layers']['w3'])
# print "LR = {}, DR = {}, Momentum = {}".format(0.1, space['dr'],
# space['momentum'])
print "Batch size = {}".format(space['batch_size'])
print "Class weight = {}".format(space['cw'])
print "==================================================="
# Create and compile model
model = Sequential()
model.add(Dense(18, input_dim=18, init='normal', activation='relu',
W_constraint=maxnorm(space['w1'])))
model.add(Dropout(space['Dropout']))
model.add(Dense(space['Dense'], init='normal', activation='relu',
W_constraint=maxnorm(space['w2'])))
# model.add(Activation(space['Activation']))
model.add(Dropout(space['Dropout_1']))
# if conditional(space['conditional']) == 'three':
# model.add(Dense(space['Dense_2'], activation='relu',
# W_constraint=maxnorm(space['w3']),
# init='normal'))
# model.add(Dropout(space['Dropout_2']))
if space['use_3_layers']:
model.add(Dense(space['use_3_layers']['Dense_2'], activation='relu',
W_constraint=maxnorm(space['use_3_layers']['w3']),
init='normal'))
model.add(Dropout(space['use_3_layers']['Dropout_2']))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
learning_rate = 0.2
decay_rate = 0.001
momentum = 0.9
sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum,
nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics=['accuracy'])
# Save model to HDF5
model.save('model.h5')
del model
# earlyStopping = callbacks.EarlyStopping(monitor='val_loss', patience=10,
# verbose=1, mode='auto')
earlyStopping = callbacks.EarlyStopping(monitor='val_loss', patience=25,
verbose=1, mode='auto')
laps = list()
for train_index, test_index in kfold:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
for name, transform in pipeline.steps:
transform.fit(X_train)
X_test = transform.transform(X_test)
X_train = transform.transform(X_train)
model = load_model('model.h5')
model.fit(X_train, y_train,
batch_size=space['batch_size'],
nb_epoch=1000,
verbose=2,
validation_data=(X_test, y_test),
callbacks=[earlyStopping],
class_weight={0: 1, 1: space['cw']})
# TODO: Use CV and cross_val_score
# score, acc = model.evaluate(X_test, y_test, verbose=1)
y_pred = model.predict(X_test, batch_size=space['batch_size'])
del model
aps = average_precision_score(y[test_index], y_pred)
print "== Fold AUPRC: {} ==".format(aps)
laps.append(aps)
aps = np.mean(laps)
print "== Area Under PR-Curve: {} ==".format(aps)
return {'loss': 1-aps, 'status': STATUS_OK}
space = {
'Dropout': hp.quniform('Dropout', 0., 0.5, 0.05),
'Dense': hp.choice('Dense', (9, 13, 18, 22, 27)),
'Dropout_1': hp.quniform('Dropout_1', 0., 0.5, 0.05),
# 'conditional': hp.choice('conditional', [{'n_layers': 'two'},
# {'n_layes': 'three',
# 'Dense_2': hp.choice('Dense_2', (9, 18, 36)),
# 'Dropout_2': hp.uniform('Dropout_2', 0., 1.),
# 'w3': hp.choice('w3', (1, 2, 3, 5, 7))}]),
'use_3_layers': hp.choice('use_3_layers', [False,
{'Dense_2': hp.choice('Dense_2', (9, 13, 18, 22, 27)),
'Dropout_2': hp.quniform('Dropout_2', 0., 0.5, 0.05),
'w3': hp.choice('w3', (2, 3, 4, 5))}]),
# 'lr': hp.loguniform('lr', -4.6, -0.7),
# 'dr': hp.loguniform('dr', -10.6, -2.5),
'w1': hp.choice('w1', (2, 3, 4, 5)),
'w2': hp.choice('w2', (2, 3, 4, 5)),
# 'momentum': hp.quniform('momentum', 0.5, 0.95, 0.05),
# 'cw': hp.qloguniform('cw', 0, 6, 1),
'cw': hp.quniform('cw', 1, 20, 1),
'batch_size': hp.choice('batch_size', (256, 512, 1024))
}
trials = Trials()
best = fmin(fn=keras_fmin_fnct,
space=space,
algo=tpe.suggest,
max_evals=100,
trials=trials)
print hyperopt.space_eval(space, best)
best_pars = hyperopt.space_eval(space, best)
# Now show plots
sss = StratifiedShuffleSplit(y, n_iter=1, test_size=0.25, random_state=1)
for train_index, test_index in sss:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
transforms = list()
transforms.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
transforms.append(('scaler', StandardScaler()))
pipeline = Pipeline(transforms)
for name, transform in pipeline.steps:
transform.fit(X_train)
X_test = transform.transform(X_test)
X_train = transform.transform(X_train)
history = callbacks.History()
earlyStopping = callbacks.EarlyStopping(monitor='val_loss', patience=50,
verbose=1, mode='auto')
# Build model with best parameters
model = Sequential()
model.add(Dense(18, input_dim=18, init='normal', activation='relu',
W_constraint=maxnorm(best_pars['w1'])))
model.add(Dropout(best_pars['Dropout']))
model.add(Dense(best_pars['Dense'], init='normal', activation='relu',
W_constraint=maxnorm(best_pars['w2'])))
model.add(Dropout(best_pars['Dropout_1']))
if best_pars['use_3_layers']:
model.add(Dense(best_pars['use_3_layers']['Dense_2'], activation='relu',
W_constraint=maxnorm(best_pars['use_3_layers']['w3']),
init='normal'))
model.add(Dropout(best_pars['use_3_layers']['Dropout_2']))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
learning_rate = 0.2
decay_rate = 0.001
momentum = 0.9
sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum,
nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics=['accuracy'])
model.save('model.h5')
model.fit(X_train, y_train,
batch_size=best_pars['batch_size'],
nb_epoch=1000,
show_accuracy=True,
verbose=2,
validation_data=(X_test, y_test),
callbacks=[earlyStopping, history],
class_weight={0: 1, 1: best_pars['cw']})
n_epoch = history.epoch[-1]
y_pred = model.predict(X_test)
y_pred[y_pred < 0.5] = 0.
y_pred[y_pred >= 0.5] = 1.
y_probs = model.predict_proba(X_test)
cm = confusion_matrix(y_test, y_pred)
print_cm_summary(cm)
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.savefig("nn_accuracy_weights.png")
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.savefig("nn_loss_weights.png")
plt.close()
################################################################################
# Now fit all train data
transforms = list()
transforms.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
transforms.append(('scaler', StandardScaler()))
pipeline = Pipeline(transforms)
for name, transform in pipeline.steps:
transform.fit(X)
X = transform.transform(X)
# Build model with best parameters
model = Sequential()
model.add(Dense(18, input_dim=18, init='normal', activation='relu',
W_constraint=maxnorm(best_pars['w1'])))
model.add(Dropout(best_pars['Dropout']))
model.add(Dense(best_pars['Dense'], init='normal', activation='relu',
W_constraint=maxnorm(best_pars['w2'])))
model.add(Dropout(best_pars['Dropout_1']))
if best_pars['use_3_layers']:
model.add(Dense(best_pars['use_3_layers']['Dense_2'], activation='relu',
W_constraint=maxnorm(best_pars['use_3_layers']['w3']),
init='normal'))
model.add(Dropout(best_pars['use_3_layers']['Dropout_2']))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
learning_rate = 0.2
decay_rate = 0.001
momentum = 0.9
sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum,
nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics=['accuracy'])
model.fit(X, y, batch_size=best_pars['batch_size'], nb_epoch=int(1.25*n_epoch),
show_accuracy=True, verbose=2, class_weight={0: 1, 1: best_pars['cw']})
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics.log'
file_tgt = os.path.join(data_dir, file_tgt)
X_tgt, feature_names, df, df_orig = load_data_tgt(file_tgt, names, names_to_delete,
delta)
# Use the same transform
for name, transform in pipeline.steps:
X_tgt = transform.transform(X_tgt)
y_probs = model.predict(X_tgt)[:, 0]
idx = y_probs > 0.5
idx_ = y_probs < 0.5
nn_no = list(df_orig['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('nn_results.txt', 'w') as fo:
for line in list(df_orig['star_ID'][idx]):
fo.write(line + '\n')
# Analyze results
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
news = set(news)
with open('nn_results.txt', 'r') as fo:
nn = fo.readlines()
nn = [line.strip().split('_')[4].split('.')[0] for line in nn]
nn = set(nn)
print "Among new vars found {}".format(len(news.intersection(nn)))
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST' not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
nn_no = set([line.strip().split('_')[4].split('.')[0] for line in nn_no])
found_bad = '181193' in nn
print "Found known variable : ", found_bad
FN = len(nn_no.intersection(all_vars))
TP = len(all_vars.intersection(nn))
TN = len(nn_no) - FN
FP = len(nn) - TP
recall = float(TP) / (TP + FN)
precision = float(TP) / (TP + FP)
F1 = 2 * precision * recall / (precision + recall)
print "precision: {}".format(precision)
print "recall: {}".format(recall)
print "F1: {}".format(F1)
print "TN={}, FP={}".format(TN, FP)
print "FN={}, TP={}".format(FN, TP)
| mit |
PyThaiNLP/pythainlp | tests/test_augment.py | 1 | 1771 | # -*- coding: utf-8 -*-
import unittest
from pythainlp.augment import WordNetAug
from pythainlp.augment.wordnet import postype2wordnet
from pythainlp.augment.lm import Thai2transformersAug
from pythainlp.augment.word2vec.bpemb_wv import BPEmbAug
from pythainlp.augment.word2vec import (
Thai2fitAug,
LTW2VAug
)
import nltk
class TestTextaugmentPackage(unittest.TestCase):
def setUp(self):
self.text = "เรารักคุณมากที่สุดในโลก"
self.text2 = "เราอยู่ที่มหาวิทยาลัยขอนแก่น"
def test_WordNetAug(self):
nltk.download('omw-1.4', force=True) # load wordnet
wordnetaug = WordNetAug()
self.assertIsNotNone(wordnetaug.augment(self.text))
self.assertIsNotNone(wordnetaug.find_synonyms("ผม", pos=None))
self.assertIsNotNone(wordnetaug.augment(self.text, postag=False))
self.assertIsNone(postype2wordnet('n', 'abc'))
self.assertIsNotNone(postype2wordnet('NOUN', 'orchid'))
def test_Thai2fitAug(self):
_aug = Thai2fitAug()
self.assertIsNotNone(_aug.tokenizer(self.text))
self.assertIsNotNone(_aug.augment(self.text, n_sent=3, p=0.5))
def test_BPEmbAug(self):
_aug = BPEmbAug()
self.assertIsNotNone(_aug.tokenizer(self.text))
self.assertIsNotNone(_aug.augment(self.text, n_sent=3, p=0.5))
def test_LTW2VAug(self):
_aug = LTW2VAug()
self.assertIsNotNone(_aug.tokenizer(self.text))
self.assertIsNotNone(_aug.augment(self.text, n_sent=3, p=0.5))
def test_Thai2transformersAug(self):
_aug = Thai2transformersAug()
self.assertIsNotNone(_aug.augment(self.text2, num_replace_tokens=1))
| apache-2.0 |
freezmeinster/avagata-site | djangoappengine/db/compiler.py | 5 | 20122 | from .db_settings import get_model_indexes
import datetime
import sys
from django.db.models.sql import aggregates as sqlaggregates
from django.db.models.sql.constants import LOOKUP_SEP, MULTI, SINGLE
from django.db.models.sql.where import AND, OR
from django.db.utils import DatabaseError, IntegrityError
from django.utils.tree import Node
from functools import wraps
from google.appengine.api.datastore import Entity, Query, MultiQuery, \
Put, Get, Delete, Key
from google.appengine.api.datastore_errors import Error as GAEError
from google.appengine.api.datastore_types import Text, Category, Email, Link, \
PhoneNumber, PostalAddress, Text, Blob, ByteString, GeoPt, IM, Key, \
Rating, BlobKey
from djangotoolbox.db.basecompiler import NonrelQuery, NonrelCompiler, \
NonrelInsertCompiler, NonrelUpdateCompiler, NonrelDeleteCompiler
import cPickle as pickle
import decimal
# Valid query types (a dictionary is used for speedy lookups).
OPERATORS_MAP = {
'exact': '=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
# The following operators are supported with special code below:
'isnull': None,
'in': None,
'startswith': None,
'range': None,
'year': None,
}
NEGATION_MAP = {
'gt': '<=',
'gte': '<',
'lt': '>=',
'lte': '>',
# TODO: support these filters
#'exact': '!=', # this might actually become individual '<' and '>' queries
}
def safe_call(func):
@wraps(func)
def _func(*args, **kwargs):
try:
return func(*args, **kwargs)
except GAEError, e:
raise DatabaseError, DatabaseError(str(e)), sys.exc_info()[2]
return _func
class GAEQuery(NonrelQuery):
# ----------------------------------------------
# Public API
# ----------------------------------------------
def __init__(self, compiler, fields):
super(GAEQuery, self).__init__(compiler, fields)
self.inequality_field = None
self.pk_filters = None
self.excluded_pks = ()
self.has_negated_exact_filter = False
self.ordering = ()
self.gae_ordering = []
pks_only = False
if len(fields) == 1 and fields[0].primary_key:
pks_only = True
self.db_table = self.query.get_meta().db_table
self.pks_only = pks_only
start_cursor = getattr(self.query, '_gae_start_cursor', None)
end_cursor = getattr(self.query, '_gae_end_cursor', None)
self.gae_query = [Query(self.db_table, keys_only=self.pks_only,
cursor=start_cursor, end_cursor=end_cursor)]
# This is needed for debugging
def __repr__(self):
return '<GAEQuery: %r ORDER %r>' % (self.gae_query, self.ordering)
@safe_call
def fetch(self, low_mark, high_mark):
query = self._build_query()
executed = False
if self.excluded_pks and high_mark is not None:
high_mark += len(self.excluded_pks)
if self.pk_filters is not None:
results = self.get_matching_pk(low_mark, high_mark)
else:
if high_mark is None:
kw = {}
if low_mark:
kw['offset'] = low_mark
results = query.Run(**kw)
executed = True
elif high_mark > low_mark:
results = query.Get(high_mark - low_mark, low_mark)
executed = True
else:
results = ()
for entity in results:
if isinstance(entity, Key):
key = entity
else:
key = entity.key()
if key in self.excluded_pks:
continue
yield self._make_entity(entity)
if executed and not isinstance(query, MultiQuery):
self.query._gae_cursor = query.GetCompiledCursor()
@safe_call
def count(self, limit=None):
if self.pk_filters is not None:
return len(self.get_matching_pk(0, limit))
if self.excluded_pks:
return len(list(self.fetch(0, 2000)))
kw = {}
if limit is not None:
kw['limit'] = limit
return self._build_query().Count(**kw)
@safe_call
def delete(self):
if self.pk_filters is not None:
keys = [key for key in self.pk_filters if key is not None]
else:
keys = self.fetch()
if keys:
Delete(keys)
@safe_call
def order_by(self, ordering):
self.ordering = ordering
for order in self.ordering:
if order.startswith('-'):
order, direction = order[1:], Query.DESCENDING
else:
direction = Query.ASCENDING
if order == self.query.get_meta().pk.column:
order = '__key__'
self.gae_ordering.append((order, direction))
# This function is used by the default add_filters() implementation
@safe_call
def add_filter(self, column, lookup_type, negated, db_type, value):
if value in ([], ()):
self.pk_filters = []
return
# Emulated/converted lookups
if column == self.query.get_meta().pk.column:
column = '__key__'
db_table = self.query.get_meta().db_table
if lookup_type in ('exact', 'in'):
# Optimization: batch-get by key
if self.pk_filters is not None:
raise DatabaseError("You can't apply multiple AND filters "
"on the primary key. "
"Did you mean __in=[...]?")
if not isinstance(value, (tuple, list)):
value = [value]
pks = [create_key(db_table, pk) for pk in value if pk]
if negated:
self.excluded_pks = pks
else:
self.pk_filters = pks
return
else:
# XXX: set db_type to 'gae_key' in order to allow
# convert_value_for_db to recognize the value to be a Key and
# not a str. Otherwise the key would be converted back to a
# unicode (see convert_value_for_db)
db_type = 'gae_key'
key_type_error = 'Lookup values on primary keys have to be' \
'a string or an integer.'
if lookup_type == 'range':
if isinstance(value,(list, tuple)) and not(isinstance(
value[0], (basestring, int, long)) and \
isinstance(value[1], (basestring, int, long))):
raise DatabaseError(key_type_error)
elif not isinstance(value,(basestring, int, long)):
raise DatabaseError(key_type_error)
# for lookup type range we have to deal with a list
if lookup_type == 'range':
value[0] = create_key(db_table, value[0])
value[1] = create_key(db_table, value[1])
else:
value = create_key(db_table, value)
if lookup_type not in OPERATORS_MAP:
raise DatabaseError("Lookup type %r isn't supported" % lookup_type)
# We check for negation after lookup_type isnull because it
# simplifies the code. All following lookup_type checks assume
# that they're not negated.
if lookup_type == 'isnull':
if (negated and value) or not value:
# TODO/XXX: is everything greater than None?
op = '>'
else:
op = '='
value = None
elif negated and lookup_type == 'exact':
if self.has_negated_exact_filter:
raise DatabaseError("You can't exclude more than one __exact "
"filter")
self.has_negated_exact_filter = True
self._combine_filters(column, db_type,
(('<', value), ('>', value)))
return
elif negated:
try:
op = NEGATION_MAP[lookup_type]
except KeyError:
raise DatabaseError("Lookup type %r can't be negated" % lookup_type)
if self.inequality_field and column != self.inequality_field:
raise DatabaseError("Can't have inequality filters on multiple "
"columns (here: %r and %r)" % (self.inequality_field, column))
self.inequality_field = column
elif lookup_type == 'in':
# Create sub-query combinations, one for each value
if len(self.gae_query) * len(value) > 30:
raise DatabaseError("You can't query against more than "
"30 __in filter value combinations")
op_values = [('=', v) for v in value]
self._combine_filters(column, db_type, op_values)
return
elif lookup_type == 'startswith':
self._add_filter(column, '>=', db_type, value)
if isinstance(value, str):
value = value.decode('utf8')
if isinstance(value, Key):
value = list(value.to_path())
if isinstance(value[-1], str):
value[-1] = value[-1].decode('utf8')
value[-1] += u'\ufffd'
value = Key.from_path(*value)
else:
value += u'\ufffd'
self._add_filter(column, '<=', db_type, value)
return
elif lookup_type in ('range', 'year'):
self._add_filter(column, '>=', db_type, value[0])
op = '<=' if lookup_type == 'range' else '<'
self._add_filter(column, op, db_type, value[1])
return
else:
op = OPERATORS_MAP[lookup_type]
self._add_filter(column, op, db_type, value)
# ----------------------------------------------
# Internal API
# ----------------------------------------------
def _add_filter(self, column, op, db_type, value):
for query in self.gae_query:
key = '%s %s' % (column, op)
value = self.convert_value_for_db(db_type, value)
if isinstance(value, Text):
raise DatabaseError('TextField is not indexed, by default, '
"so you can't filter on it. Please add "
'an index definition for the column %s '
'on the model %s.%s as described here:\n'
'http://www.allbuttonspressed.com/blog/django/2010/07/Managing-per-field-indexes-on-App-Engine'
% (column, self.query.model.__module__, self.query.model.__name__))
if key in query:
existing_value = query[key]
if isinstance(existing_value, list):
existing_value.append(value)
else:
query[key] = [existing_value, value]
else:
query[key] = value
def _combine_filters(self, column, db_type, op_values):
gae_query = self.gae_query
combined = []
for query in gae_query:
for op, value in op_values:
self.gae_query = [Query(self.db_table,
keys_only=self.pks_only)]
self.gae_query[0].update(query)
self._add_filter(column, op, db_type, value)
combined.append(self.gae_query[0])
self.gae_query = combined
def _make_entity(self, entity):
if isinstance(entity, Key):
key = entity
entity = {}
else:
key = entity.key()
entity[self.query.get_meta().pk.column] = key
return entity
@safe_call
def _build_query(self):
for query in self.gae_query:
query.Order(*self.gae_ordering)
if len(self.gae_query) > 1:
return MultiQuery(self.gae_query, self.gae_ordering)
return self.gae_query[0]
def get_matching_pk(self, low_mark=0, high_mark=None):
if not self.pk_filters:
return []
results = [result for result in Get(self.pk_filters)
if result is not None and
self.matches_filters(result)]
if self.ordering:
results.sort(cmp=self.order_pk_filtered)
if high_mark is not None and high_mark < len(results) - 1:
results = results[:high_mark]
if low_mark:
results = results[low_mark:]
return results
def order_pk_filtered(self, lhs, rhs):
left = dict(lhs)
left[self.query.get_meta().pk.column] = lhs.key().to_path()
right = dict(rhs)
right[self.query.get_meta().pk.column] = rhs.key().to_path()
return self._order_in_memory(left, right)
def matches_filters(self, entity):
item = dict(entity)
pk = self.query.get_meta().pk
value = self.convert_value_from_db(pk.db_type(connection=self.connection),
entity.key())
item[pk.column] = value
result = self._matches_filters(item, self.query.where)
return result
class SQLCompiler(NonrelCompiler):
"""
A simple App Engine query: no joins, no distinct, etc.
"""
query_class = GAEQuery
def convert_value_from_db(self, db_type, value):
if isinstance(value, (list, tuple, set)) and \
db_type.startswith(('ListField:', 'SetField:')):
db_sub_type = db_type.split(':', 1)[1]
value = [self.convert_value_from_db(db_sub_type, subvalue)
for subvalue in value]
if db_type.startswith('SetField:') and value is not None:
value = set(value)
if db_type.startswith('DictField:') and value is not None:
value = pickle.loads(value)
if ':' in db_type:
db_sub_type = db_type.split(':', 1)[1]
value = dict((key, self.convert_value_from_db(db_sub_type, value[key]))
for key in value)
# the following GAE database types are all unicode subclasses, cast them
# to unicode so they appear like pure unicode instances for django
if isinstance(value, basestring) and value and db_type.startswith('decimal'):
value = decimal.Decimal(value)
elif isinstance(value, (Category, Email, Link, PhoneNumber, PostalAddress,
Text, unicode)):
value = unicode(value)
elif isinstance(value, Blob):
value = str(value)
elif isinstance(value, str):
# always retrieve strings as unicode (it is possible that old datasets
# contain non unicode strings, nevertheless work with unicode ones)
value = value.decode('utf-8')
elif isinstance(value, Key):
# for now we do not support KeyFields thus a Key has to be the own
# primary key
# TODO: GAE: support parents via GAEKeyField
assert value.parent() is None, "Parents are not yet supported!"
if db_type == 'integer':
if value.id() is None:
raise DatabaseError('Wrong type for Key. Expected integer, found'
'None')
else:
value = value.id()
elif db_type == 'text':
if value.name() is None:
raise DatabaseError('Wrong type for Key. Expected string, found'
'None')
else:
value = value.name()
else:
raise DatabaseError("%s fields cannot be keys on GAE" % db_type)
elif db_type == 'date' and isinstance(value, datetime.datetime):
value = value.date()
elif db_type == 'time' and isinstance(value, datetime.datetime):
value = value.time()
return value
def convert_value_for_db(self, db_type, value):
if isinstance(value, unicode):
value = unicode(value)
elif isinstance(value, str):
value = str(value)
elif isinstance(value, (list, tuple, set)) and \
db_type.startswith(('ListField:', 'SetField:')):
db_sub_type = db_type.split(':', 1)[1]
value = [self.convert_value_for_db(db_sub_type, subvalue)
for subvalue in value]
elif isinstance(value, decimal.Decimal) and db_type.startswith("decimal:"):
value = self.connection.ops.value_to_db_decimal(value, *eval(db_type[8:]))
elif isinstance(value, dict) and db_type.startswith('DictField:'):
if ':' in db_type:
db_sub_type = db_type.split(':', 1)[1]
value = dict([(key, self.convert_value_for_db(db_sub_type, value[key]))
for key in value])
value = Blob(pickle.dumps(value))
if db_type == 'gae_key':
return value
elif db_type == 'longtext':
# long text fields cannot be indexed on GAE so use GAE's database
# type Text
value = Text((isinstance(value, str) and value.decode('utf-8')) or value)
elif db_type == 'text':
value = (isinstance(value, str) and value.decode('utf-8')) or value
elif db_type == 'blob':
value = Blob(value)
elif type(value) is str:
# always store unicode strings
value = value.decode('utf-8')
elif db_type == 'date' or db_type == 'time' or db_type == 'datetime':
# here we have to check the db_type because GAE always stores datetimes
value = to_datetime(value)
return value
class SQLInsertCompiler(NonrelInsertCompiler, SQLCompiler):
@safe_call
def insert(self, data, return_id=False):
gae_data = {}
opts = self.query.get_meta()
unindexed_fields = get_model_indexes(self.query.model)['unindexed']
unindexed_cols = [opts.get_field(name).column
for name in unindexed_fields]
kwds = {'unindexed_properties': unindexed_cols}
for column, value in data.items():
if column == opts.pk.column:
if isinstance(value, basestring):
kwds['name'] = value
else:
kwds['id'] = value
elif isinstance(value, (tuple, list)) and not len(value):
# gae does not store emty lists (and even does not allow passing empty
# lists to Entity.update) so skip them
continue
else:
gae_data[column] = value
entity = Entity(self.query.get_meta().db_table, **kwds)
entity.update(gae_data)
key = Put(entity)
return key.id_or_name()
class SQLUpdateCompiler(NonrelUpdateCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(NonrelDeleteCompiler, SQLCompiler):
pass
def to_datetime(value):
"""Convert a time or date to a datetime for datastore storage.
Args:
value: A datetime.time, datetime.date or string object.
Returns:
A datetime object with date set to 1970-01-01 if value is a datetime.time
A datetime object with date set to value.year - value.month - value.day and
time set to 0:00 if value is a datetime.date
"""
if value is None:
return value
elif isinstance(value, datetime.datetime):
return value
elif isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
elif isinstance(value, datetime.time):
return datetime.datetime(1970, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def create_key(db_table, value):
if isinstance(value, (int, long)) and value < 1:
return None
return Key.from_path(db_table, value)
| bsd-3-clause |
Clyde-fare/scikit-learn | benchmarks/bench_plot_lasso_path.py | 299 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
jorge2703/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
jorge2703/scikit-learn | examples/datasets/plot_iris_dataset.py | 281 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
neurodata/ndstore | django/nduser/urls.py | 2 | 1606 | # Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import *
from . import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^profile/$', views.getProjects),
url(r'^projects/$', views.getProjects),
url(r'^datasets/$', views.getDatasets),
url(r'^channels/$', views.getChannels),
url(r'^token/$', views.getTokens),
url(r'^alltokens/$', views.getAllTokens),
url(r'^createproject/$', views.createProject),
url(r'^createdataset/$', views.createDataset),
url(r'^createtoken/$', views.createToken),
url(r'^updateproject/$', views.updateProject),
url(r'^updatetoken/$', views.updateToken),
url(r'^updatechannel/$', views.updateChannel),
url(r'^updatedataset/$', views.updateDataset),
url(r'^backupproject/$', views.backupProject),
url(r'^restoreproject/$', views.restoreProject),
url(r'^download/$', views.downloadData),
url(r'^usertoken/$', views.getUserToken),
url(r'^$', views.default)
]
| apache-2.0 |
mikewiebe-ansible/ansible | lib/ansible/modules/storage/zfs/zfs_facts.py | 19 | 7995 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs_facts
short_description: Gather facts about ZFS datasets.
description:
- Gather facts from ZFS dataset properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS dataset name.
required: yes
aliases: [ "ds", "dataset" ]
recurse:
description:
- Specifies if properties for any children should be recursively
displayed.
type: bool
default: 'no'
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: 'no'
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zfs(1M) man page.
default: all
aliases: [ "props" ]
type:
description:
- Specifies which datasets types to display. Multiple values have to be
provided in comma-separated form.
choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
default: all
depth:
description:
- Specifies recursion depth.
'''
EXAMPLES = '''
- name: Gather facts about ZFS dataset rpool/export/home
zfs_facts:
dataset: rpool/export/home
- name: Report space usage on ZFS filesystems under data/home
zfs_facts:
name: data/home
recurse: yes
type: filesystem
- debug:
msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
with_items: '{{ ansible_zfs_datasets }}'
'''
RETURN = '''
name:
description: ZFS dataset name
returned: always
type: str
sample: rpool/var/spool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: bool
sample: True
recurse:
description: if we should recurse over ZFS dataset
returned: if 'recurse' is set to True
type: bool
sample: True
zfs_datasets:
description: ZFS dataset facts
returned: always
type: str
sample:
{
"aclinherit": "restricted",
"aclmode": "discard",
"atime": "on",
"available": "43.8G",
"canmount": "on",
"casesensitivity": "sensitive",
"checksum": "on",
"compression": "off",
"compressratio": "1.00x",
"copies": "1",
"creation": "Thu Jun 16 11:37 2016",
"dedup": "off",
"devices": "on",
"exec": "on",
"filesystem_count": "none",
"filesystem_limit": "none",
"logbias": "latency",
"logicalreferenced": "18.5K",
"logicalused": "3.45G",
"mlslabel": "none",
"mounted": "yes",
"mountpoint": "/rpool",
"name": "rpool",
"nbmand": "off",
"normalization": "none",
"org.openindiana.caiman:install": "ready",
"primarycache": "all",
"quota": "none",
"readonly": "off",
"recordsize": "128K",
"redundant_metadata": "all",
"refcompressratio": "1.00x",
"referenced": "29.5K",
"refquota": "none",
"refreservation": "none",
"reservation": "none",
"secondarycache": "all",
"setuid": "on",
"sharenfs": "off",
"sharesmb": "off",
"snapdir": "hidden",
"snapshot_count": "none",
"snapshot_limit": "none",
"sync": "standard",
"type": "filesystem",
"used": "4.41G",
"usedbychildren": "4.41G",
"usedbydataset": "29.5K",
"usedbyrefreservation": "0",
"usedbysnapshots": "0",
"utf8only": "off",
"version": "5",
"vscan": "off",
"written": "29.5K",
"xattr": "on",
"zoned": "off"
}
'''
from collections import defaultdict
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
class ZFSFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.recurse = module.params['recurse']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self.type = module.params['type']
self.depth = module.params['depth']
self._datasets = defaultdict(dict)
self.facts = []
def dataset_exists(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
if self.recurse:
cmd.append('-r')
if int(self.depth) != 0:
cmd.append('-d')
cmd.append('%s' % self.depth)
if self.type:
cmd.append('-t')
cmd.append(self.type)
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
dataset, property, value = line.split('\t')
self._datasets[dataset].update({property: value})
for k, v in iteritems(self._datasets):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_datasets': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
recurse=dict(required=False, default=False, type='bool'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
depth=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
zfs_facts = ZFSFacts(module)
result = {}
result['changed'] = False
result['name'] = zfs_facts.name
if zfs_facts.parsable:
result['parsable'] = zfs_facts.parsable
if zfs_facts.recurse:
result['recurse'] = zfs_facts.recurse
if zfs_facts.dataset_exists():
result['ansible_facts'] = zfs_facts.get_facts()
else:
module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jorge2703/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 104 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
rsepassi/tensor2tensor | tensor2tensor/data_generators/imagenet.py | 1 | 12198 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.data_generators import image_utils
from tensor2tensor.utils import registry
import tensorflow as tf
# Derived from ImageNet data
MEAN_RGB = [0.485, 0.456, 0.406]
STDDEV_RGB = [0.229, 0.224, 0.225]
def imagenet_preprocess_example(example, mode, resize_size=None):
"""Preprocessing used for Imagenet and similar problems."""
resize_size = resize_size or [299, 299]
assert resize_size[0] == resize_size[1]
image = example["inputs"]
if mode == tf.estimator.ModeKeys.TRAIN:
image = preprocess_for_train(image, image_size=resize_size[0])
else:
image = preprocess_for_eval(image, image_size=resize_size[0])
example["inputs"] = image
return example
@registry.register_problem
class ImageImagenet(image_utils.Image2ClassProblem):
"""Imagenet."""
@property
def is_small(self):
return False
@property
def num_classes(self):
return 1000
def generate_data(self, data_dir, tmp_dir, task_id=-1):
# TODO(lukaszkaiser): find a better way than printing this.
print("To generate the ImageNet dataset in the proper format, follow "
"instructions at https://github.com/tensorflow/models/blob/master"
"/inception/README.md#getting-started")
def preprocess_example(self, example, mode, _):
return imagenet_preprocess_example(example, mode)
class ImageImagenetRescaled(ImageImagenet):
"""Imagenet rescaled to rescale_size."""
@property
def rescale_size(self):
# return [224, 224]
raise NotImplementedError()
def dataset_filename(self):
return "image_imagenet" # Reuse Imagenet data.
def generate_data(self, data_dir, tmp_dir, task_id=-1):
tf.logging.warning(
"Generate data for rescaled ImageNet problems with image_imagenet")
def preprocess_example(self, example, mode, _):
return imagenet_preprocess_example(
example, mode, resize_size=self.rescale_size)
@registry.register_problem
class ImageImagenet224(ImageImagenetRescaled):
"""Imagenet rescaled to 224x224."""
@property
def rescale_size(self):
return [224, 224]
@registry.register_problem
class ImageImagenet32(ImageImagenetRescaled):
"""Imagenet rescaled to 32x32."""
@property
def rescale_size(self):
return [32, 32]
@property
def is_small(self):
return True # Modalities like for CIFAR.
def preprocess_example(self, example, mode, _):
# Just resize with area.
if self._was_reversed:
example["inputs"] = tf.to_int64(
tf.image.resize_images(example["inputs"], self.rescale_size,
tf.image.ResizeMethod.AREA))
else:
example = imagenet_preprocess_example(example, mode)
example["inputs"] = tf.to_int64(
tf.image.resize_images(example["inputs"], self.rescale_size))
return example
@registry.register_problem
class ImageImagenet64(ImageImagenet32):
"""Imagenet rescaled to 64x64."""
@property
def rescale_size(self):
return [64, 64]
@registry.register_problem
class Img2imgImagenet(image_utils.ImageProblem):
"""Imagenet rescaled to 8x8 for input and 32x32 for output."""
def dataset_filename(self):
return "image_imagenet" # Reuse Imagenet data.
def preprocess_example(self, example, unused_mode, unused_hparams):
inputs = example["inputs"]
# For Img2Img resize input and output images as desired.
example["inputs"] = image_utils.resize_by_area(inputs, 8)
example["targets"] = image_utils.resize_by_area(inputs, 32)
return example
def generate_data(self, data_dir, tmp_dir, task_id=-1):
tf.logging.warning("Generate data for img2img_imagenet with image_imagenet")
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {"inputs": ("image:identity", 256)}
p.target_modality = ("image:identity", 256)
p.batch_size_multiplier = 256
p.input_space_id = 1
p.target_space_id = 1
# The following preprocessing functions were taken from
# cloud_tpu/models/resnet/resnet_preprocessing.py
# ==============================================================================
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: `Tensor` image of shape [height, width, channels].
offset_height: `Tensor` indicating the height offset.
offset_width: `Tensor` indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
["Crop size greater than the image size."])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image (it will be converted to floats in [0, 1]).
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope, "distorted_bounding_box_crop", [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def _random_crop(image, size):
"""Make a random crop of (`size` x `size`)."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
random_image, bbox = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=1,
scope=None)
bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3)
image = tf.cond(
bad, lambda: _center_crop(_do_scale(image, size), size),
lambda: tf.image.resize_bicubic([random_image], [size, size])[0])
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def _at_least_x_are_true(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are true."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _do_scale(image, size):
"""Rescale the image by scaling the smaller spatial dimension to `size`."""
shape = tf.cast(tf.shape(image), tf.float32)
w_greater = tf.greater(shape[0], shape[1])
shape = tf.cond(w_greater,
lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32),
lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32))
return tf.image.resize_bicubic([image], shape)[0]
def _center_crop(image, size):
"""Crops to center of image with specified `size`."""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = ((image_height - size) + 1) / 2
offset_width = ((image_width - size) + 1) / 2
image = _crop(image, offset_height, offset_width, size, size)
return image
def _normalize(image):
"""Normalize the image to zero mean and unit variance."""
offset = tf.constant(MEAN_RGB, shape=[1, 1, 3])
image -= offset
scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3])
image /= scale
return image
def preprocess_for_train(image, image_size=224):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
image_size: int, how large the output image should be.
Returns:
A preprocessed image `Tensor`.
"""
image = _random_crop(image, image_size)
image = _normalize(image)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
return image
def preprocess_for_eval(image, image_size=224):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
image_size: int, how large the output image should be.
Returns:
A preprocessed image `Tensor`.
"""
image = _do_scale(image, image_size + 32)
image = _normalize(image)
image = _center_crop(image, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
return image
# ==============================================================================
| apache-2.0 |
microsoft/onnxruntime | orttraining/orttraining/test/python/orttraining_test_onnxblock.py | 1 | 27610 | import copy
import io
import os
import random
import tempfile
import numpy as np
import onnx
import pytest
import torch
import onnxruntime
import onnxruntime.training.onnxblock as onnxblock
from onnxruntime.capi import _pybind_state as C
# PyTorch Module definitions
class SimpleNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(SimpleNet, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, model_input):
out = self.fc1(model_input)
out = self.relu(out)
out = self.fc2(out)
return out
# onnxblock Model definitions
class SimpleModelWithMSELoss(onnxblock.Model):
def __init__(self):
super(SimpleModelWithMSELoss, self).__init__()
self.loss = onnxblock.loss.MSELoss()
def build(self, output_name):
return self.loss(output_name)
class SimpleModelWithCrossEntropyLoss(onnxblock.Model):
def __init__(self):
super(SimpleModelWithCrossEntropyLoss, self).__init__()
self.loss = onnxblock.loss.CrossEntropyLoss()
def build(self, output_name):
return self.loss(output_name)
class SimpleTrainingModelWithMSELoss(onnxblock.TrainingModel):
def __init__(self):
super(SimpleTrainingModelWithMSELoss, self).__init__()
self.loss = onnxblock.loss.MSELoss()
def build(self, output_name):
return self.loss(output_name)
class SimpleTrainingModelWithCrossEntropyLoss(onnxblock.TrainingModel):
def __init__(self):
super(SimpleTrainingModelWithCrossEntropyLoss, self).__init__()
self.loss = onnxblock.loss.CrossEntropyLoss()
def build(self, output_name):
return self.loss(output_name)
class SimpleModelWithBCEWithLogitsLoss(onnxblock.Model):
def __init__(self):
super(SimpleModelWithBCEWithLogitsLoss, self).__init__()
self.loss = onnxblock.loss.BCEWithLogitsLoss()
def build(self, output_name):
return self.loss(output_name)
class SimpleTrainingModelWithBCEWithLogitsLoss(onnxblock.TrainingModel):
def __init__(self):
super(SimpleTrainingModelWithBCEWithLogitsLoss, self).__init__()
self.loss = onnxblock.loss.BCEWithLogitsLoss()
def build(self, output_name):
return self.loss(output_name)
# Test utility methods
def _get_onnx_model(torch_model, model_inputs):
model_outputs = torch_model(*model_inputs)
if isinstance(model_outputs, torch.Tensor):
model_outputs = [model_outputs]
dynamic_axes = {}
input_names = []
output_names = []
for i, model_input in enumerate(model_inputs):
input_name = f"input-{i}"
input_names.append(input_name)
dynamic_axes[input_name] = {}
for dim_idx in range(len(model_input.shape)):
dynamic_axes[input_name].update({dim_idx: f"{input_name}_dim{dim_idx}"})
for i, model_output in enumerate(model_outputs):
output_name = f"output-{i}"
output_names.append(output_name)
dynamic_axes[output_name] = {}
for dim_idx in range(len(model_output.shape)):
dynamic_axes[output_name].update({dim_idx: f"{output_name}_dim{dim_idx}"})
f = io.BytesIO()
torch.onnx.export(
torch_model,
model_inputs,
f,
input_names=input_names,
output_names=output_names,
opset_version=14,
do_constant_folding=False,
training=torch.onnx.TrainingMode.TRAINING,
dynamic_axes=dynamic_axes,
export_params=True,
keep_initializers_as_inputs=False,
)
return onnx.load_model_from_string(f.getvalue())
def _to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def _get_models(device, batch_size, input_size, hidden_size, output_size, zero_flag=False):
"""Returns the pt and onnx models for SimpleNet"""
pt_model = SimpleNet(input_size, hidden_size, output_size).to(device)
# setting all initial weights to zero
if zero_flag:
with torch.no_grad():
for param in pt_model.parameters():
param.zero_()
x = torch.randn(batch_size, input_size, device=device)
onnx_model = _get_onnx_model(pt_model, (x,))
return pt_model, onnx_model
def _get_training_ort_output_names(pt_model, onnx_model):
"""Returns the ort output names"""
ort_output_names = [onnx_model.graph.output[0].name]
for name, _ in pt_model.named_parameters():
ort_output_names.append(f"{name}_grad.accumulation.out")
return ort_output_names
def _get_training_ort_inputs(x, target, pt_model, onnx_model, target_type=None):
"""Returns the ort inputs"""
ort_inputs = {
onnx_model.graph.input[0].name: _to_numpy(copy.deepcopy(x)),
onnx_model.graph.input[1].name: _to_numpy(copy.deepcopy(target))
if target_type is None
else _to_numpy(copy.deepcopy(target).type(target_type)),
}
if target_type is not None:
ort_inputs[onnx_model.graph.input[1].name]
for name, param in pt_model.named_parameters():
ort_inputs[name] = _to_numpy(copy.deepcopy(param))
ort_inputs[f"{name}_grad.accumulation.buffer"] = _to_numpy(torch.zeros_like(param))
ort_inputs["lazy_reset_grad"] = np.full(1, True)
return ort_inputs
# All unit tests
@pytest.mark.parametrize(
"graph",
[
SimpleModelWithMSELoss,
SimpleModelWithCrossEntropyLoss,
SimpleTrainingModelWithMSELoss,
SimpleTrainingModelWithCrossEntropyLoss,
SimpleModelWithBCEWithLogitsLoss,
SimpleTrainingModelWithBCEWithLogitsLoss,
],
)
def test_loss_composition(graph):
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
_, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
# When / Then no error occurs
simple_model = graph()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
def test_mse_loss_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randn(batch_size, output_size, device=device)
# Build the onnx model with loss
simple_model = SimpleModelWithMSELoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
ort_output_names = [onnx_model.graph.output[0].name]
ort_inputs = {
onnx_model.graph.input[0].name: _to_numpy(copy.deepcopy(x)),
onnx_model.graph.input[1].name: _to_numpy(copy.deepcopy(target)),
}
def mse_loss(prediction, target):
loss = torch.nn.MSELoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(onnx_model, onnx_fo.name)
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
ort_outs = ort_session.run(ort_output_names, ort_inputs)
torch_outs = mse_loss(pt_model(x), target)
# Then
assert np.allclose(ort_outs[0], _to_numpy(torch_outs))
def test_crossentropy_loss_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randint(high=output_size, size=(batch_size,), dtype=torch.int64, device=device)
# Build the onnx model with loss
simple_model = SimpleModelWithCrossEntropyLoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
ort_output_names = [onnx_model.graph.output[0].name]
ort_inputs = {
onnx_model.graph.input[0].name: _to_numpy(copy.deepcopy(x)),
onnx_model.graph.input[1].name: _to_numpy(copy.deepcopy(target).type(torch.int32)),
}
def crossentropy_loss(prediction, target):
loss = torch.nn.CrossEntropyLoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(onnx_model, onnx_fo.name)
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
ort_outs = ort_session.run(ort_output_names, ort_inputs)
torch_outs = crossentropy_loss(pt_model(x), target)
# Then
assert np.allclose(ort_outs[0], _to_numpy(torch_outs))
def test_bcewithlogits_loss_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randn(batch_size, output_size, device=device)
# Build the onnx model with loss
simple_model = SimpleModelWithBCEWithLogitsLoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
ort_output_names = [onnx_model.graph.output[0].name]
ort_inputs = {
onnx_model.graph.input[0].name: _to_numpy(copy.deepcopy(x)),
onnx_model.graph.input[1].name: _to_numpy(copy.deepcopy(target)),
}
def bcewithlogits_loss(prediction, target):
loss = torch.nn.BCEWithLogitsLoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(onnx_model, onnx_fo.name)
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
ort_outs = ort_session.run(ort_output_names, ort_inputs)
torch_outs = bcewithlogits_loss(pt_model(x), target)
# Then
assert np.allclose(ort_outs[0], _to_numpy(torch_outs))
def test_mse_loss_training_graph_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randn(batch_size, output_size, device=device)
# Build the onnx trainingmodel with loss
simple_model = SimpleTrainingModelWithMSELoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
ort_output_names = _get_training_ort_output_names(pt_model, onnx_model)
ort_inputs = _get_training_ort_inputs(x, target, pt_model, onnx_model)
def mse_loss(prediction, target):
loss = torch.nn.MSELoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(onnx_model, onnx_fo.name)
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
ort_outs = ort_session.run(ort_output_names, ort_inputs)
torch_outs = mse_loss(pt_model(x), target)
torch_outs.backward()
# Then
# assert loss is close
assert np.allclose(ort_outs[0], _to_numpy(torch_outs))
def test_crossentropy_loss_training_graph_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randint(high=output_size, size=(batch_size,), dtype=torch.int64, device=device)
# Build the onnx trainingmodel with loss
simple_model = SimpleTrainingModelWithCrossEntropyLoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
ort_output_names = _get_training_ort_output_names(pt_model, onnx_model)
ort_inputs = _get_training_ort_inputs(x, target, pt_model, onnx_model, target_type=torch.int32)
def crossentropy_loss(prediction, target):
loss = torch.nn.CrossEntropyLoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(onnx_model, onnx_fo.name)
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
ort_outs = ort_session.run(ort_output_names, ort_inputs)
torch_outs = crossentropy_loss(pt_model(x), target)
torch_outs.backward()
# Then
# assert loss is close
assert np.allclose(ort_outs[0], _to_numpy(torch_outs))
def test_bcewithlogits_loss_training_graph_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randn(batch_size, output_size, device=device)
# Build the onnx model with loss
simple_model = SimpleTrainingModelWithBCEWithLogitsLoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
ort_output_names = _get_training_ort_output_names(pt_model, onnx_model)
ort_inputs = _get_training_ort_inputs(x, target, pt_model, onnx_model)
def bcewithlogits_loss(prediction, target):
loss = torch.nn.BCEWithLogitsLoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(onnx_model, onnx_fo.name)
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
ort_outs = ort_session.run(ort_output_names, ort_inputs)
torch_outs = bcewithlogits_loss(pt_model(x), target)
torch_outs.backward()
# Then
# assert loss is close
assert np.allclose(ort_outs[0], _to_numpy(torch_outs))
@pytest.mark.parametrize(
"graph",
[SimpleTrainingModelWithMSELoss, SimpleTrainingModelWithCrossEntropyLoss, SimpleTrainingModelWithBCEWithLogitsLoss],
)
@pytest.mark.parametrize("grad_clipping", [None, onnxblock.optim.ClipGradNorm(2.5)])
def test_adamw_optimizer_composition(graph, grad_clipping):
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
_, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
# When / Then no error occurs
simple_model = graph()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
optimizer = onnxblock.optim.AdamW(clip_grad=grad_clipping)
with onnxblock.onnx_model() as accessor:
_ = optimizer(simple_model.parameters())
optimizer_model = accessor.model
assert optimizer_model
# TODO: Add a test for correctness when creation of ortvalues of
# tensor seq is possible on cuda
def test_adamw_optimizer_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randn(batch_size, output_size, device=device)
simple_model = SimpleTrainingModelWithMSELoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
optimizer = onnxblock.optim.AdamW()
with onnxblock.onnx_model() as accessor:
output_name = optimizer(simple_model.parameters())
optimizer_model = accessor.model
learning_rate = 0.001
step = 1
ort_output_names = [output_name]
def mse_loss(prediction, target):
loss = torch.nn.MSELoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(optimizer_model, onnx_fo.name)
loss = mse_loss(pt_model(x), target)
loss.backward()
ort_inputs = {
"learning_rate": np.full(1, learning_rate, dtype=np.float32),
"step": np.full(1, step, dtype=np.int64),
"params": [],
"gradients": [],
"first_order_moments": [],
"second_order_moments": [],
}
for _, param in pt_model.named_parameters():
ort_inputs["params"].append(_to_numpy(copy.deepcopy(param)))
ort_inputs["gradients"].append(_to_numpy(copy.deepcopy(param.grad)))
ort_inputs["first_order_moments"].append(_to_numpy(torch.zeros_like(param)))
ort_inputs["second_order_moments"].append(_to_numpy(torch.zeros_like(param)))
# Then no error occurs when executing the model
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
_ = ort_session.run(ort_output_names, ort_inputs)
def test_retrieve_parameters():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
simple_model = SimpleTrainingModelWithMSELoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
# When
trainable_params, non_trainable_params = simple_model.parameters()
# Then
assert not non_trainable_params
for ort_param, (pt_param_name, pt_param) in zip(trainable_params, pt_model.named_parameters()):
assert ort_param.name == pt_param_name
assert np.allclose(
np.frombuffer(ort_param.raw_data, dtype=np.float32).reshape(pt_param.shape),
_to_numpy(pt_param),
)
def test_retrieve_parameters_before_building_gradient_graph():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
_, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
simple_model = SimpleTrainingModelWithMSELoss()
# When / Then
with pytest.raises(Exception) as ex_info:
_, _ = simple_model.parameters()
assert "Please build the training model first before trying to retrieve the parameters." in str(ex_info.value)
def test_save_checkpoint():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
_, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
simple_model = SimpleTrainingModelWithMSELoss()
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
trainable_params, non_trainable_params = simple_model.parameters()
# When
with tempfile.TemporaryDirectory() as checkpoint_dir_name:
checkpoint_file_path = os.path.join(checkpoint_dir_name, "checkpoint")
onnxblock.save_checkpoint((trainable_params, non_trainable_params), checkpoint_file_path)
# Then
assert os.path.exists(checkpoint_file_path)
def test_load_checkpoint():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
_, zero_onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size, zero_flag=True)
for i in range(len(zero_onnx_model.graph.initializer)):
zero_np = onnx.numpy_helper.to_array(zero_onnx_model.graph.initializer[i])
assert np.allclose(zero_np, np.zeros(zero_np.shape))
_, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
# Copy of onnx_model for comparison
onnx_model_copy = copy.deepcopy(onnx_model)
simple_model = SimpleTrainingModelWithMSELoss()
# When
simple_model.requires_grad("fc2.weight", False)
simple_model.requires_grad("fc1.bias", False)
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
trainable_params, non_trainable_params = simple_model.parameters()
with tempfile.TemporaryDirectory() as checkpoint_dir_name:
checkpoint_file_path = os.path.join(checkpoint_dir_name, "checkpoint")
onnxblock.save_checkpoint((trainable_params, non_trainable_params), checkpoint_file_path)
# Load checkpoint parameters to the new simple model
onnxblock.load_checkpoint_to_model(checkpoint_file_path, zero_onnx_model)
# Then
onnx_model_copy.graph.initializer.sort(key=lambda x: x.name)
zero_onnx_model.graph.initializer.sort(key=lambda x: x.name)
for i, _ in enumerate(onnx_model_copy.graph.initializer):
onnx_np = onnx.numpy_helper.to_array(onnx_model_copy.graph.initializer[i])
zero_np = onnx.numpy_helper.to_array(zero_onnx_model.graph.initializer[i])
assert np.allclose(onnx_np, zero_np)
def test_set_requires_grad_on_parameters():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
_, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
simple_model = SimpleTrainingModelWithMSELoss()
# When
simple_model.requires_grad("fc2.weight", False)
simple_model.requires_grad("fc1.bias", False)
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
trainable_params, non_trainable_params = simple_model.parameters()
# Then
expected_trainable_parameters = {"fc1.weight", "fc2.bias"}
expected_non_trainable_parameters = {"fc2.weight", "fc1.bias"}
for param in trainable_params:
assert param.name in expected_trainable_parameters
for param in non_trainable_params:
assert param.name in expected_non_trainable_parameters
def test_set_requires_grad_on_inputs():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
_, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
# When
simple_model = SimpleTrainingModelWithMSELoss()
simple_model.requires_grad("input-0")
with onnxblock.onnx_model(onnx_model):
_ = simple_model(onnx_model.graph.output[0].name)
# Then
expecteinput_sizeput_gradient_buffer_name = "input-0_grad.accumulation.buffer"
expecteinput_sizeput_gradient_output_name = "input-0_grad.accumulation.out"
graph_input_names = {graph_input.name for graph_input in onnx_model.graph.input}
graph_output_names = {graph_output.name for graph_output in onnx_model.graph.output}
assert expecteinput_sizeput_gradient_buffer_name in graph_input_names
assert expecteinput_sizeput_gradient_output_name in graph_output_names
@pytest.mark.parametrize("model_type", [onnxblock.Model, onnxblock.TrainingModel])
def test_weighted_average_model_composition(model_type):
# Given
class TwoOutputNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(TwoOutputNet, self).__init__()
self.fc1_1 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.fc1_2 = torch.nn.Linear(hidden_size, num_classes)
self.fc2_1 = torch.nn.Linear(input_size, hidden_size)
self.relu2 = torch.nn.ReLU()
self.fc2_2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, model_input1, model_input2):
out1 = self.fc1_2(self.relu1(self.fc1_1(model_input1)))
out2 = self.fc2_2(self.relu2(self.fc2_1(model_input2)))
return out1, out2
class WeightedAvg(model_type):
def __init__(self, w1, w2):
super(WeightedAvg, self).__init__()
self.loss1 = onnxblock.loss.CrossEntropyLoss()
self.loss2 = onnxblock.loss.CrossEntropyLoss()
self.w1 = onnxblock.building_blocks.Constant(w1)
self.w2 = onnxblock.building_blocks.Constant(w2)
self.mul = onnxblock.building_blocks.Mul()
self.add = onnxblock.building_blocks.Add()
def build(self, loss_input_name1, loss_input_name2):
return self.add(
self.mul(self.w1(), self.loss1(loss_input_name1, labels_name="labels1")),
self.mul(self.w2(), self.loss2(loss_input_name2, labels_name="labels2")),
)
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model = TwoOutputNet(input_size, hidden_size, output_size).to(device)
x1 = torch.randn(batch_size, input_size, device=device)
x2 = torch.randn(batch_size, input_size, device=device)
onnx_model = _get_onnx_model(pt_model, (x1, x2))
# When / Then no error occurs
weighted_model = WeightedAvg(random.random(), random.random())
with onnxblock.onnx_model(onnx_model):
_ = weighted_model(onnx_model.graph.output[0].name, onnx_model.graph.output[1].name)
def test_grad_clipping_execution():
# Given
device = "cuda"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, _ = _get_models(device, batch_size, input_size, hidden_size, output_size)
x = torch.randn(batch_size, input_size, device=device)
target = torch.randn(batch_size, output_size, device=device)
# Prepare the onnx model with only grad clipping
onnx_model = onnx.ModelProto()
onnx_model.graph.name = "ClipGradNorm Model"
onnx_model.producer_name = "grad clipping test"
onnx_model.opset_import.extend(onnxblock.optim.optim._OPSET_IMPORTS)
onnx_model.ir_version = onnx.IR_VERSION
class GradClippingModel(onnxblock.Model):
def __init__(self, max_norm):
super().__init__()
self._grad_clip = onnxblock.optim.ClipGradNorm(max_norm)
def build(self, grads_name):
return self._grad_clip(grads_name)
onnx_model.graph.input.append(
onnx.helper.make_tensor_sequence_value_info("gradients", onnx.TensorProto.FLOAT, None)
)
grad_clip = GradClippingModel(2.5)
with onnxblock.onnx_model(onnx_model):
ort_output_names = grad_clip("gradients")
onnx_model.graph.output.append(
onnx.helper.make_tensor_sequence_value_info(ort_output_names, onnx.TensorProto.FLOAT, None)
)
def mse_loss(prediction, target):
loss = torch.nn.MSELoss()
return loss(prediction, target)
# When
with tempfile.NamedTemporaryFile(suffix=".onnx") as onnx_fo:
onnx.save(onnx_model, onnx_fo.name)
loss = mse_loss(pt_model(x), target)
loss.backward()
ort_inputs = {"gradients": []}
for _, param in pt_model.named_parameters():
ort_inputs["gradients"].append(_to_numpy(copy.deepcopy(param.grad)))
torch.nn.utils.clip_grad_norm_(pt_model.parameters(), 2.5)
# Then no error occurs when executing the model
ort_session = onnxruntime.InferenceSession(onnx_fo.name, providers=C.get_available_providers())
ort_outs = ort_session.run([ort_output_names], ort_inputs)
# assert all the gradients are close
for ort_grad, pt_param in zip(ort_outs[0], pt_model.parameters()):
assert np.allclose(ort_grad, _to_numpy(pt_param.grad))
| mit |
ericmckean/namebench | nb_third_party/dns/node.py | 215 | 5914 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS nodes. A node is a set of rdatasets."""
import StringIO
import dns.rdataset
import dns.rdatatype
import dns.renderer
class Node(object):
"""A DNS node.
A node is a set of rdatasets
@ivar rdatasets: the node's rdatasets
@type rdatasets: list of dns.rdataset.Rdataset objects"""
__slots__ = ['rdatasets']
def __init__(self):
"""Initialize a DNS node.
"""
self.rdatasets = [];
def to_text(self, name, **kw):
"""Convert a node to text format.
Each rdataset at the node is printed. Any keyword arguments
to this method are passed on to the rdataset's to_text() method.
@param name: the owner name of the rdatasets
@type name: dns.name.Name object
@rtype: string
"""
s = StringIO.StringIO()
for rds in self.rdatasets:
print >> s, rds.to_text(name, **kw)
return s.getvalue()[:-1]
def __repr__(self):
return '<DNS node ' + str(id(self)) + '>'
def __eq__(self, other):
"""Two nodes are equal if they have the same rdatasets.
@rtype: bool
"""
#
# This is inefficient. Good thing we don't need to do it much.
#
for rd in self.rdatasets:
if rd not in other.rdatasets:
return False
for rd in other.rdatasets:
if rd not in self.rdatasets:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.rdatasets)
def __iter__(self):
return iter(self.rdatasets)
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Find an rdataset matching the specified properties in the
current node.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@raises KeyError: An rdataset of the desired type and class does
not exist and I{create} is not True.
@rtype: dns.rdataset.Rdataset object
"""
for rds in self.rdatasets:
if rds.match(rdclass, rdtype, covers):
return rds
if not create:
raise KeyError
rds = dns.rdataset.Rdataset(rdclass, rdtype)
self.rdatasets.append(rds)
return rds
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching the specified properties in the
current node.
If a matching rdataset does not exist, it is not an error.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
"""
rds = self.get_rdataset(rdclass, rdtype, covers)
if not rds is None:
self.rdatasets.remove(rds)
def replace_rdataset(self, replacement):
"""Replace an rdataset.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the node;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
"""
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
replacement.covers)
self.rdatasets.append(replacement)
| apache-2.0 |
analysiscenter/dataset | batchflow/opensets/base.py | 1 | 1668 | """ Contains the base class for open datasets """
from .. import Dataset, DatasetIndex
from .. import ImagesBatch
class Openset(Dataset):
""" The base class for open datasets """
def __init__(self, index=None, batch_class=None, path=None, preloaded=None, **kwargs):
self._train_index, self._test_index = None, None
if index is None:
preloaded, index, self._train_index, self._test_index = self.download(path=path)
super().__init__(index, batch_class=batch_class, preloaded=preloaded, **kwargs)
if self._train_index and self._test_index:
self.train = type(self).from_dataset(self, self._train_index, batch_class=batch_class, **kwargs)
self.test = type(self).from_dataset(self, self._test_index, batch_class=batch_class, **kwargs)
@staticmethod
def uild_index(index):
""" Create an index """
if index is not None:
return super().build_index(index)
return None
def download(self, path):
""" Download a dataset from the source web-site """
_ = path
return None
def _infer_train_test_index(self, train_len, test_len):
total_len = train_len + test_len
index = DatasetIndex(list(range(total_len)))
train_index = DatasetIndex(list(range(train_len)))
test_index = DatasetIndex(list(range(train_len, total_len)))
return index, train_index, test_index
class ImagesOpenset(Openset):
""" The base class for open datasets with images """
def __init__(self, index=None, batch_class=ImagesBatch, *args, **kwargs):
super().__init__(index, batch_class, *args, **kwargs)
| apache-2.0 |
CompPhysics/MachineLearning | doc/src/SupportVMachines/Programs/xgcancer.py | 2 | 1413 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_validate
import scikitplot as skplt
import xgboost as xgb
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
xg_clf = xgb.XGBClassifier(max_depth = 4, n_estimators = 200)
xg_clf.fit(X_train_scaled,y_train)
y_test = xg_clf.predict(X_test_scaled)
print("Test set accuracy with Random Forests and scaled data: {:.2f}".format(xg_clf.score(X_test_scaled,y_test)))
import scikitplot as skplt
y_pred = xg_clf.predict(X_test_scaled)
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
plt.show()
y_probas = xg_clf.predict_proba(X_test_scaled)
skplt.metrics.plot_roc(y_test, y_probas)
plt.show()
skplt.metrics.plot_cumulative_gain(y_test, y_probas)
plt.show()
xgb.plot_tree(xg_clf,num_trees=0)
plt.rcParams['figure.figsize'] = [50, 10]
plt.show()
xgb.plot_importance(xg_clf)
plt.rcParams['figure.figsize'] = [5, 5]
plt.show()
| cc0-1.0 |
garibaldu/multicauseRBM | Marcus/revrbm.py | 1 | 18072 | import math, time, sys
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rng
from scipy.special import expit as sigmoid
np.set_printoptions(precision=2)
def inverse_sigmoid(prob1):
return np.log(prob1/(1-prob1))
class RBM(object):
"""An RBM has weights, visible biases, and hidden biases.
You can either make a new one, or read an old one in from a .npz file.
An RBM can be told to train itself, given some data set of visible
patterns. eg: rbm.train(indata, learning_params). The training can
be via Contrastive Divergence, or as an auto-encoder.
It can save itself in pickled form.
It can make pretty pics of itself.
"""
def __init__(self, filename, num_hid=0, num_vis=0, DROPOUT=True, hid_type='logistic'):
f = open(filename+'.txt', 'a')
if (num_hid>0 and num_vis>0):
self.name = filename
self.num_hid = num_hid
self.num_vis = num_vis
rng.seed(99)
self.W = np.asarray( 0.1*rng.normal(size = (num_hid, num_vis)),order= 'fortran' )
self.hid_bias = 0.001 * rng.normal(size = (1, num_hid))
self.vis_bias = 0.001 * rng.normal(size = (1, num_vis))
else:
print('Reading in pickled RBM from %s' % (filename))
f.write('Reading in pickled RBM from %s' % (filename))
# read in from a saved npz file
if not(filename.endswith('.npz')):
filename = filename + '.npz'
with np.load('./saved_nets/' + filename) as data:
self.name = filename[:-4]
self.W = data['W']
self.hid_bias = data['hid_bias']
self.vis_bias = data['vis_bias']
self.num_hid = self.W.shape[0]
self.num_vis = self.W.shape[1]
print('NAME: %s, is an RBM with %d hids and %d vis' % (self.name, self.num_hid, self.num_vis))
f.write('NAME: %s, is an RBM with %d hids and %d vis' % (self.name, self.num_hid, self.num_vis))
self.DROPOUT = DROPOUT
self.hid_type = hid_type
self.vis_type = 'linear'
print ('dropout is %s, hidden units of type %s' %(self.DROPOUT, self.hid_type))
f.write('dropout is %s, hidden units of type %s' %(self.DROPOUT, self.hid_type))
f.close()
def rename(self, newname):
""" give the RBM a new name """
self.name = newname
def pushup(self, vis_pats, noise=True):
""" push visible pats into hidden layer"""
psi = np.dot(vis_pats, self.W.T) + self.hid_bias
if self.hid_type == 'logistic': # BERNOULLI units
hid_prob1 = sigmoid(psi)
if noise == False:
return hid_prob1
elif noise == True:
return 1*(hid_prob1 > rng.random(size=hid_prob1.shape))
elif self.hid_type == 'relu': # ReLU units
if noise == True:
psi = psi + rng.normal(0.,0.001+np.sqrt(sigmoid(psi)), size=psi.shape)
return np.maximum(0.0, psi)
def pushdown(self, hid_pats, noise=True):
""" push hidden pats into visible """
if self.DROPOUT:
dropout = rng.randint(2, size=(hid_pats.shape[0], self.num_hid))
vis = np.dot(hid_pats*dropout, self.W) + self.vis_bias
else:
FACTOR = 0.5
vis = np.dot(hid_pats, FACTOR*self.W) + self.vis_bias
if noise == True:
vis += 0.5*rng.normal(size = (self.num_vis))
return vis
# OR....
#return 1*(vis_prob1 > rng.random(size=vis_prob1.shape))
def explainaway(self, other_phi, my_h, v):
psi = np.dot(v - other_phi, self.W.T) + self.hid_bias
#psi += (my_h - 0.5)*np.sum(self.W**2,axis=1)
return psi
def train(self, indata, num_iterations, Loss, rate, momentum, L1_penalty, minibatch_size):
"""
Train the RBM's weights on the supplied data, and (optionally) use dropout.
Loss can be CD or AE (contrastive divergence or auto-encoder).
"""
f = open(self.name+'.txt', 'a')
print('training with Loss %s and L1 penalty %.6f' % (Loss, L1_penalty))
print('rate %.5f, momentum %.2f, minibatches of %d' % (rate, momentum, minibatch_size))
f.write('training with Loss %s and L1 penalty %.6f' % (Loss, L1_penalty))
f.write('rate %.5f, momentum %.2f, minibatches of %d' % (rate, momentum, minibatch_size))
announce_every = num_iterations / 5
start = time.time()
num_pats = indata.shape[0]
W_change = 0.0
hid_bias_change = 0.0
vis_bias_change = 0.0
for t in range(num_iterations+1):
outputs = self.pushdown(self.pushup(indata,noise=False),noise=False)
C = 0.5*np.sum((outputs - indata)**2)
if (t % announce_every == 0):
print ('Iteration %5d \t TIME (secs): %.1f, RMSreconstruction: %.1f' % (t, time.time() - start, C/num_pats))
f.write('Iteration %5d \t TIME (secs): %.1f, RMSreconstruction: %.1f' % (t, time.time() - start, C/num_pats))
start_index = 0
C = 0.0
######## training loop starts
while start_index < num_pats-1:
next_index = min(start_index + minibatch_size, num_pats)
vis_minibatch = indata[start_index : next_index]
ndata = np.shape(vis_minibatch)[0] # how many in this minibatch
start_index = next_index # ready for next time
if Loss == 'CD':
W_grad, hid_bias_grad = self.CD_gradient(vis_minibatch, CD_steps=2)
elif Loss == 'AE':
W_grad, hid_bias_grad = self.autoencoder_gradient(vis_minibatch)
W_change = rate * W_grad + momentum * W_change
self.W += W_change - L1_penalty * np.sign(self.W)
# Now we have to do the visible and hidden bias weights as well.
hid_bias_change = rate * hid_bias_grad + momentum * hid_bias_change
self.hid_bias += hid_bias_change
# IGNORING VISIBLE BIASES STILL???????????????
# self.vis_bias_change = rate * (vis_minibatch.mean(0) - vis_reconstruction.mean(0)) + momentum * self.vis_bias_change
# self.vis_bias += self.vis_bias_change
######## training loop ends
f.close()
return
def CD_gradient(self, inputs, CD_steps=1):
"""This RBM, with this data, can calculate the gradient for its
weights and biases under the CD loss. So do it...
"""
ndata = np.shape(inputs)[0]
assert (CD_steps > 0)
# WAKE PHASE followed by HEBB
# push visible pats into hidden
hid_first = self.pushup(inputs)
# (Einstein alternative suggested by Paul Mathews)
Hebb = np.einsum('ij,ik->jk', hid_first, inputs)
# SLEEP PHASE followed by HEBB
hiddens = hid_first
for step in range(CD_steps):
# push hidden pats into visible
vis_reconstruction = self.pushdown(hiddens, noise=True)
# push reconstructed visible pats back into hidden
hiddens = self.pushup(vis_reconstruction, noise=True)
# the final step is noiseless.
vis_reconstruction = self.pushdown(hiddens, noise=False)
# push reconstructed visible pats back into hidden
hiddens = self.pushup(vis_reconstruction, noise=False)
hid_second = hiddens
# push hidden pats into visible
vis_reconstruction = self.pushdown(hid_first, noise=False)
# push reconstructed visible pats back into hidden
hid_second = self.pushup(vis_reconstruction)
AntiHebb = np.einsum('ij,ik->jk', hid_second, vis_reconstruction)
weights_gradient = (Hebb - AntiHebb)/ndata
hid_bias_gradient = hid_first.mean(0) - hid_second.mean(0)
return weights_gradient, hid_bias_gradient
def autoencoder_gradient(self,inputs):
"""This RBM, with this data, can calculate the gradient for its
weights and biases under the auto-encoder loss. So do it...
"""
ndata = np.shape(inputs)[0]
targets = inputs # it's an autoencoder...
hiddens = self.pushup(inputs, noise=False)
outputs = self.pushdown(hiddens, noise=False)
out_type = 'linear'
# Different types of output neurons
if self.vis_type == 'linear':
deltao = (outputs-targets)
elif self.vis_type == 'logistic':
deltao = (outputs-targets)*outputs*(1.0-outputs)
elif self.vis_type == 'softmax':
deltao = (outputs-targets)*(outputs*(-outputs)+outputs) #WHAT??!
else:
print ("bogus vis_type")
if self.hid_type == 'linear':
deltah = np.dot(deltao,np.transpose(self.W))
elif self.hid_type == 'relu':
deltah = np.maximum(0,np.sign(hiddens)) * (np.dot(deltao,np.transpose(self.W)))
elif self.hid_type == 'logistic':
deltah = hiddens*(1.0-hiddens)*(np.dot(deltao,np.transpose(self.W)))
else:
print ("bogus hid_type")
w1_gradient = (np.dot(np.transpose(inputs),deltah))
w2_gradient = (np.dot(np.transpose(hiddens),deltao))
weights_gradient = 0.5*(w1_gradient.T + w2_gradient) / ndata
hid_bias_gradient = np.sum(deltah,0) / ndata
#error = 0.5*np.sum((outputs-inputs)**2) / ndata
#print ("\t\t error: %.1f" % error)
return -1.0 * weights_gradient, -1.* hid_bias_gradient
def get_num_vis(self):
return self.num_vis
def get_num_hid(self):
return self.num_hid
def make_weights_figure(self):
"""
reality-check by looking at the weights, and their updates, for some particular hidden units.
"""
plt.clf()
rows, cols = 7, 8
i=0
maxw = np.max(np.abs(self.W))
for r in range(rows):
for c in range(cols):
i += 1
plt.subplot(rows,cols,i)
if (i == 1): # the very first one will be the bias weights
img = self.vis_bias.reshape(28,28)
plt.text(0,-2,'bias', fontsize=8, color='red')
else:
j = i % self.num_hid #rng.randint(self.num_hid) # random choice of hidden node
img = self.W[j].reshape(28,28)
plt.text(0,-2,'h%d %.1f, %.1f' %(j, np.min(self.W[j]), np.max(self.W[j])), fontsize=8)
plt.imshow(img, interpolation='nearest',cmap='RdBu', vmin=-maxw, vmax=maxw)
# setting vmin and vmax there ensures zero weights aren't coloured.
plt.axis('off')
filename = '%s_weights.png' % (self.name)
plt.savefig(filename)
print('Saved figure named %s' % (filename))
def show_patterns(self, vis_pats):
num_pats = vis_pats.shape[0]
num_rows, num_cols = 7, 10
num_examples = num_rows*num_cols + 1
Vis_test = np.copy(vis_pats[rng.randint(0, num_pats, size=(num_examples)), :])
i = 0
plt.clf()
for r in range(num_rows):
for c in range(num_cols):
i += 1
plt.subplot(num_rows,num_cols,i)
plt.imshow(Vis_test[i].reshape(28,28), cmap='Greys', vmin=-1., vmax=1., interpolation='nearest')
plt.axis('off')
filename = '%s_visibles.png' % (self.name)
plt.savefig(filename)
print('Saved %s' % (filename))
def make_dynamics_figure(self, indata, SCRAMBLE=False):
if SCRAMBLE: # initialise with completely scrambled training pics.
safe = np.copy(indata)
num_pixels = indata.shape[1]
for i in range(indata.shape[0]):
img = safe[i]
rand_order = rng.permutation(np.arange(num_pixels))
indata[i] = img[rand_order]
## WATCH OUT FOR CONSEQUENCES!!!
# self.DROPOUT = True
### WATCH OUT FOR CONSEQUENCES!!!
num_pats = indata.shape[0]
num_examples = 5
eg_indices = rng.randint(0, num_pats, size=(num_examples))
Vis_test = np.copy(indata[eg_indices, :])
i = 0
next_stop = 0
num_rows = 6
plt.clf()
total_time = 0
for s in range(num_rows):
#print('doing alternating Gibbs Sampling until t=%d' % (next_stop))
while total_time < next_stop:
hid = self.pushup(Vis_test, noise=True)
Vis_test = self.pushdown(hid, noise=False)
total_time += 1
for n in range(num_examples):
i += 1
plt.subplot(num_rows,num_examples,i)
plt.imshow(Vis_test[n].reshape(28,28), cmap='Greys',interpolation='nearest') #, vmin=-1., vmax=1.,
plt.axis('off')
plt.text(0,-2,'t=%d %.1f, %.1f' %(total_time, np.min(Vis_test[n]), np.max(Vis_test[n])), fontsize=8)
next_stop = 2**s #max(1, next_stop) * 2 # wait X times longer each time before showing the next sample.
filename = '%s_gibbs_chains.png' % (self.name)
plt.savefig(filename)
print('Saved %s' % (filename))
def save_as_pickle(self, annotation=''):
"""
Save this RBM's weights and biases.
"""
name = self.name
if len(annotation)>0:
name = name + annotation
filename = './saved_nets/%s.npz' % (name)
np.savez(filename, W=self.W, hid_bias=self.hid_bias, vis_bias=self.vis_bias)
print('Saved the pickle of %s' % (filename))
def random_visibles_for_rbm(rbm, num_items):
return np.random.randint(0,2,(num_items, rbm.get_num_vis()))
def random_hiddens_for_rbm(rbm, num_items):
return np.random.randint(0,2,(num_items, rbm.get_num_hid()))
def random_hidden_for_rbm(rbm):
return np.random.randint(0,2,rbm.get_num_hid())
def weights_into_hiddens(weights):
num_vis = math.sqrt(weights.shape[1])
return weights.reshape(weights.shape[0],num_vis, num_vis)
def load_mnist_digits(digits, dataset_size):
vis_train_pats = flatten_dataset(load_mnist_digit(digits[0],dataset_size))
for i in digits[1:]:
vis_train_pats = np.vstack((vis_train_pats, flatten_dataset(load_mnist_digit(i,dataset_size))))
# Now scramble the order.
num_pats = vis_train_pats.shape[0]
rand_order = rng.permutation(np.arange(num_pats))
vis_train_pats = vis_train_pats[rand_order]
# THE FOLLOWING WRITES LIST OF DIGIT IMAGES AS A CSV TO A PLAIN TXT FILE
# np.savetxt(fname='mnist_digits.txt', X=vis_train_pats, fmt='%.2f', delimiter=',')
vis_train_pats = vis_train_pats*2.0 - 1.0 # so range is something...now.
vis_train_pats *= 2.0 # 0.5
print('visibles range from %.2f to %.2f' % (vis_train_pats.min(), vis_train_pats.max()))
return vis_train_pats
def generate_smooth_bkgd(dataset_size):
bkgd_imgs = np.ones((dataset_size, 28, 28), dtype=float)
x = np.linspace(-1.0,1.0,28)
y = np.linspace(-1.0,1.0,28)
X, Y = np.meshgrid(x,y)
print('dataset size: ', dataset_size)
for i in range(dataset_size):
xslope, yslope = .5*(2.*rng.rand()-1.), .5*(2*rng.rand()-1.)
intercept = 0.5*(2.*rng.rand()-1.)
img = xslope*X + yslope*Y + intercept
img = img - np.min(np.ravel(img))
img = img / np.max(np.ravel(img))
bkgd_imgs[i] = 2*img - 1.0
vis_train_pats = flatten_dataset(bkgd_imgs)
#print('vis_train_pats shape is ', vis_train_pats.shape)
print('gradient visibles range from %.2f to %.2f' % (vis_train_pats.min(), vis_train_pats.max()))
return vis_train_pats
def load_mnist_digit(digit, dataset_size):
assert(digit >= 0 and digit < 10)
with open("datasets/{}.npy".format(digit),'rb') as f:
return np.load(f)[:dataset_size]
def flatten_dataset(images):
smushed = images.copy()
return smushed.reshape((smushed.shape[0], -1))
def show_example_images(pats, filename='examples.png'):
rows = 6
cols = 6
i=0
plt.clf()
for r in range(rows):
for c in range(cols):
plt.subplot(rows,cols,i+1)
j = r+rows*c # rng.randint(0,len(pats))
plt.imshow(pats[j].reshape(28,28), cmap='Greys', interpolation='nearest', vmin=-1.0, vmax=1.0)
maxval = pats[j].max()
minval = pats[j].min()
plt.text(0,0,"%.1f, %.1f" %(minval, maxval), fontsize=7, color='b')
plt.axis('off')
i += 1
plt.savefig(filename)
print('Saved figure named %s' % (filename))
def make_2layer_dynamics_figure(L1, L2):
## WATCH OUT FOR CONSEQUENCES!!!
L1.DROPOUT = False
L2.DROPOUT = False
### WATCH OUT FOR CONSEQUENCES!!!
num_examples = 5
mid_pats = random_visibles_for_rbm(L2, num_examples)
i = 0
next_stop = 0
num_rows = 6
plt.clf()
total_time = 0
for s in range(num_rows):
print ('alternating Gibbs to iter %d' % (next_stop))
while total_time < next_stop:
top_pats = L2.pushup(mid_pats)
mid_pats = L2.pushdown(top_pats)
total_time += 1
vis_pats = L1.pushdown(mid_pats)
for n in range(num_examples):
i += 1
plt.subplot(num_rows,num_examples,i)
plt.imshow(vis_pats[n].reshape(28,28), cmap='Greys', vmin=-1., vmax=1., interpolation='nearest')
plt.axis('off')
plt.text(0,-2,'iter %d' %(total_time), fontsize=8)
next_stop = max(1, next_stop) * 5 # wait X times longer each time before showing the next sample.
filename = '%s_2layer_chains.png' % (L2.name)
plt.savefig(filename)
print('Saved %s' % (filename))
| mit |
Clyde-fare/scikit-learn | sklearn/feature_selection/tests/test_base.py | 141 | 3670 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 233 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
google-research/smore | smore/cpp_sampler/online_sampler.py | 1 | 11310 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import ctypes
import numpy as np
import torch
from smore.cpp_sampler import libsampler
from smore.cpp_sampler import sampler_clib
from smore.common.util import name_query_dict
from collections import defaultdict
from tqdm import tqdm
def is_all_relation(query_structure):
for ele in query_structure[-1]:
if ele not in ['r', 'n']:
return False
return True
def has_negation(query_structure):
for ele in query_structure[-1]:
if ele == 'n':
return True
return False
def build_query_tree(query_structure, fn_qt_create):
if is_all_relation(query_structure):
assert len(query_structure) == 2
if query_structure[0] == 'e':
prev_node = fn_qt_create(libsampler.entity)
else:
prev_node = build_query_tree(query_structure[0], fn_qt_create)
for i, c in enumerate(query_structure[-1]):
if c == 'r':
cur_op = libsampler.relation
else:
assert c == 'n'
cur_op = libsampler.negation
cur_root = fn_qt_create(libsampler.entity_set)
cur_root.add_child(cur_op, prev_node)
prev_node = cur_root
return cur_root
else:
last_qt = query_structure[-1]
node_type = libsampler.intersect
if len(last_qt) == 1 and last_qt[0] == 'u':
node_type = libsampler.union
query_structure = query_structure[:-1]
sub_root = fn_qt_create(node_type)
for c in query_structure:
ch_node = build_query_tree(c, fn_qt_create)
sub_root.add_child(libsampler.no_op, ch_node)
return sub_root
class OnlineSampler(object):
def __init__(self, kg, query_names, negative_sample_size,
sample_mode, normalized_structure_prob, sampler_type='naive',
share_negative=False, same_in_batch=False,
weighted_answer_sampling=False, weighted_negative_sampling=False,
nprefetch=10, num_threads=8):
self.kg = kg
kg_dtype = kg.dtype
fn_qt_create = libsampler.create_qt32 if kg_dtype == 'uint32' else libsampler.create_qt64
query_structures = [name_query_dict[task] for task in query_names]
self.query_structures = query_structures
self.normalized_structure_prob = normalized_structure_prob
assert len(normalized_structure_prob) == len(query_structures)
self.negative_sample_size = negative_sample_size
self.share_negative = share_negative
self.same_in_batch = same_in_batch
self.nprefetch = nprefetch
if len(sample_mode) == 5:
self.rel_bandwidth, self.max_to_keep, self.weighted_style, self.structure_weighted_style, self.max_n_partial_answers = sample_mode
self.weighted_ans_sample = False
self.weighted_neg_sample = False
else:
self.rel_bandwidth, self.max_to_keep, self.weighted_style, self.structure_weighted_style, self.max_n_partial_answers, self.weighted_ans_sample, self.weighted_neg_sample = sample_mode
if self.rel_bandwidth <= 0:
self.rel_bandwidth = kg.num_ent
if self.max_to_keep <= 0:
self.max_to_keep = kg.num_ent
if self.max_n_partial_answers <= 0:
self.max_n_partial_answers = kg.num_ent
if self.structure_weighted_style == 'wstruct':
assert self.normalized_structure_prob is not None
list_qt = []
list_qt_nargs = []
for qs in query_structures:
if qs[0] == '<': # inverse query
assert is_all_relation(qs[1]) and not has_negation(qs[1])
qt = build_query_tree(qs[1], fn_qt_create)
qt.is_inverse = True
else:
qt = build_query_tree(qs, fn_qt_create)
list_qt.append(qt)
list_qt_nargs.append(qt.get_num_args())
self.list_qt = list_qt
self.list_qt_nargs = list_qt_nargs
self.max_num_args = max(list_qt_nargs)
no_search_list = []
if sampler_type == 'naive':
sampler_cls = sampler_clib.naive_sampler(kg_dtype)
elif sampler_type.startswith('sqrt'):
sampler_cls = sampler_clib.rejection_sampler(kg_dtype)
if '-' in sampler_type:
no_search_list = [int(x) for x in sampler_type.split('-')[1].split('.')]
elif sampler_type == 'nosearch':
sampler_cls = sampler_clib.no_search_sampler(kg_dtype)
elif sampler_type == 'edge':
sampler_cls = sampler_clib.edge_sampler(kg_dtype)
list_qt = query_names
else:
raise ValueError("Unknown sampler %s" % sampler_type)
self.sampler_type = sampler_type
self.sampler = sampler_cls(kg, list_qt, normalized_structure_prob, self.share_negative, self.same_in_batch,
self.weighted_ans_sample, self.weighted_neg_sample,
negative_sample_size, self.rel_bandwidth, self.max_to_keep, self.max_n_partial_answers, num_threads, no_search_list)
def print_queries(self):
self.sampler.print_queries()
def sample_entities(self, weighted, num):
entities = torch.LongTensor(num)
self.sampler.sample_batch_entities(weighted, num, entities.numpy())
return entities
def set_seed(self, seed):
self.sampler.set_seed(seed)
def batch_generator(self, batch_size):
self.sampler.prefetch(batch_size, self.nprefetch)
uniform_weigths = torch.ones(batch_size)
list_buffer = []
for i in range(2):
t_pos_ans = torch.LongTensor(batch_size)
if self.share_negative:
t_neg_ans = torch.LongTensor(1, self.negative_sample_size)
t_is_neg_mat = torch.FloatTensor(batch_size, self.negative_sample_size)
else:
t_neg_ans = torch.LongTensor(batch_size, self.negative_sample_size)
t_is_neg_mat = torch.FloatTensor(1, 2)
t_weights = torch.FloatTensor(batch_size)
t_arg_buffer = torch.LongTensor(batch_size, self.max_num_args)
list_buffer.append((t_pos_ans, t_neg_ans, t_is_neg_mat, t_weights, t_arg_buffer))
buf_idx = 0
pos_ans, neg_ans, is_neg_mat, weights, arg_buffer = list_buffer[buf_idx]
q_type = self.sampler.next_batch(pos_ans.numpy(), neg_ans.numpy(), weights.numpy(), is_neg_mat.numpy(),
arg_buffer.numpy())
while True:
next_buf_idx = 1 - buf_idx
next_pos_ans, next_neg_ans, next_is_neg_mat, next_weights, next_arg_buffer = list_buffer[next_buf_idx]
next_q_type = self.sampler.next_batch(next_pos_ans.numpy(), next_neg_ans.numpy(),
next_weights.numpy(), next_is_neg_mat.numpy(),
next_arg_buffer.numpy())
if self.weighted_style == 'u':
weights = uniform_weigths
pos_ans, neg_ans, is_neg_mat, weights, arg_buffer = list_buffer[buf_idx]
q_args = arg_buffer[:, :self.list_qt_nargs[q_type]]
q_structs = [self.query_structures[q_type]] * batch_size
if self.sampler_type == 'edge':
is_neg_mat = None
yield pos_ans, neg_ans, is_neg_mat if self.share_negative else None, weights, q_args, q_structs
q_type = next_q_type
buf_idx = 1 - buf_idx
def has_negation(st):
if isinstance(st, tuple):
for c in st:
if has_negation(c):
return True
else:
assert isinstance(st, str)
return st == 'n'
return False
def print_qt(qt, g, idx):
import graphviz
node_type = str(qt.node_type).split('.')[-1]
root_idx = str(idx)
color = '#CCCCFF' if qt.sqrt_middle else '#FFFFFF'
g.node(root_idx, node_type, fillcolor=color)
idx += 1
ch_list = []
qt.get_children(ch_list)
for ch in ch_list:
ch_idx = idx
idx = print_qt(ch, g, ch_idx)
l = str(ch.parent_edge).split('.')[-1]
if l == 'no_op':
l = ''
s = 'solid'
if l == 'negation':
s = 'dashed'
g.edge(root_idx, str(ch_idx), label=l, style=s)
return idx
if __name__ == '__main__':
import time
db_name = 'FB15k'
data_folder = os.path.join(os.path.expanduser('~'), 'data/knowledge_graphs/%s' % db_name)
with open(os.path.join(data_folder, 'stats.txt'), 'r') as f:
num_ent = f.readline().strip().split()[-1]
num_rel = f.readline().strip().split()[-1]
num_ent, num_rel = int(num_ent), int(num_rel)
kg = libsampler.KG32(num_ent, num_rel)
kg.load(data_folder + '/train_bidir.bin')
print('num ent', kg.num_ent)
print('num rel', kg.num_rel)
print('num edges', kg.num_edges)
sampler_type = 'naive'
query_structures = ["1p"]
negative_sample_size = 256
sample_mode = (0, 0, 'u', 'u', 0, True, False)
sampler = OnlineSampler(kg, query_structures, negative_sample_size, sample_mode, [1.0 / len(query_structures)] * len(query_structures),
sampler_type=sampler_type,
share_negative=True,
same_in_batch=True,
num_threads=1)
batch_gen = sampler.batch_generator(10)
idx = 0
for pos_ans, neg_ans, is_neg_mat, weights, q_args, q_structs in tqdm(batch_gen):
idx += 1
# if idx > 10:
# break
# for i, qt in enumerate(sampler.list_qt):
# g = graphviz.Digraph()
# g.node_attr['style']='filled'
# print_qt(qt, g, 0)
# g.render('graph-%d' % i)
# log_file = open('%s/%s-%d.txt' % (db_name, sampler_type, bd), 'w')
# samplers = [None] * len(all_query_structures)
# for i in range(len(all_query_structures)):
# query_structures = all_query_structures[i:i+1]
# samplers[i] = OnlineSampler(kg, query_structures, negative_sample_size, sample_mode, [1.0 / len(query_structures)] * len(query_structures),
# sampler_type=sampler_type,
# num_threads=8)
# sampler = samplers[i]
# batch_gen = sampler.batch_generator(1024)
# t = time.time()
# idx = 0
# for pos_ans, weights, q_args, q_structs, neg_ans in batch_gen:
# idx += 1
# if idx > 10:
# break
# log_file.write('%d %.4f\n' % (i, (time.time() - t) / 10))
# log_file.close()
| apache-2.0 |
analysiscenter/dataset | batchflow/models/torch/encoder_decoder.py | 1 | 19679 | """ Encoder, decoder, encoder-decoder architectures. """
from collections import OrderedDict
import torch
import torch.nn as nn
from .base import TorchModel
from .utils import get_shape
from .layers import ConvBlock, Upsample, Combine, Crop
from .blocks import DefaultBlock
from ..utils import unpack_args
class EncoderModule(nn.ModuleDict):
""" Encoder: create compressed representation of an input by reducing its spatial dimensions. """
def __init__(self, inputs=None, return_all=True, **kwargs):
super().__init__()
self.return_all = return_all
self._make_modules(inputs, **kwargs)
def forward(self, x):
outputs = []
for letter, layer in zip(self.layout, self.values()):
if letter in ['b', 'd', 'p']:
x = layer(x)
elif letter in ['s']:
outputs.append(x)
outputs.append(x)
if self.return_all:
return outputs
return outputs[-1]
def _make_modules(self, inputs, **kwargs):
num_stages = kwargs.pop('num_stages')
encoder_layout = ''.join([item[0] for item in kwargs.pop('order')])
block_args = kwargs.pop('blocks')
downsample_args = kwargs.pop('downsample')
self.layout = ''
for i in range(num_stages):
for letter in encoder_layout:
if letter in ['b']:
args = {**kwargs, **block_args, **unpack_args(block_args, i, num_stages)}
layer = ConvBlock(inputs=inputs, **args)
inputs = layer(inputs)
layer_desc = 'block-{}'.format(i)
elif letter in ['d', 'p']:
args = {**kwargs, **downsample_args, **unpack_args(downsample_args, i, num_stages)}
layer = ConvBlock(inputs=inputs, **args)
inputs = layer(inputs)
layer_desc = 'downsample-{}'.format(i)
elif letter in ['s']:
layer = nn.Identity()
layer_desc = 'skip-{}'.format(i)
else:
raise ValueError('Unknown letter in order {}, use one of "b", "d", "p", "s"'
.format(letter))
self.update([(layer_desc, layer)])
self.layout += letter
class EmbeddingModule(nn.Module):
""" Embedding: thorough processing of an input tensor. """
def __init__(self, inputs=None, **kwargs):
super().__init__()
inputs = inputs[-1] if isinstance(inputs, list) else inputs
self.embedding = ConvBlock(inputs=inputs, **kwargs)
def forward(self, x):
inputs = x if isinstance(x, list) else [x]
x = inputs[-1]
inputs.append(self.embedding(x))
return inputs
class DecoderModule(nn.ModuleDict):
""" Decoder: increasing spatial dimensions. """
def __init__(self, inputs=None, **kwargs):
super().__init__()
self._make_modules(inputs, **kwargs)
def forward(self, x):
inputs = x if isinstance(x, list) else [x]
x = inputs[-1]
i = 0
for letter, layer in zip(self.layout, self.values()):
if letter in ['b', 'u']:
x = layer(x)
elif letter in ['c'] and self.skip and (i < len(inputs) - 2):
x = layer([x, inputs[-i - 3]])
i += 1
return x
def _make_modules(self, inputs, **kwargs):
inputs = inputs if isinstance(inputs, list) else [inputs]
x = inputs[-1]
num_stages = kwargs.pop('num_stages') or len(inputs) - 2
decoder_layout = ''.join([item[0] for item in kwargs.pop('order')])
self.skip = kwargs.pop('skip')
factor = kwargs.pop('factor') or [2]*num_stages
if isinstance(factor, int):
factor = int(factor ** (1/num_stages))
factor = [factor] * num_stages
elif not isinstance(factor, list):
raise TypeError('factor should be int or list of int, but %s was given' % type(factor))
block_args = kwargs.pop('blocks')
upsample_args = kwargs.pop('upsample')
combine_args = kwargs.pop('combine')
self.layout = ''
for i in range(num_stages):
for letter in decoder_layout:
if letter in ['b']:
args = {**kwargs, **block_args, **unpack_args(block_args, i, num_stages)}
layer = ConvBlock(inputs=x, **args)
x = layer(x)
layer_desc = 'block-{}'.format(i)
elif letter in ['u']:
args = {'factor': factor[i],
**kwargs, **upsample_args, **unpack_args(upsample_args, i, num_stages)}
layer = Upsample(inputs=x, **args)
x = layer(x)
layer_desc = 'upsample-{}'.format(i)
elif letter in ['c']:
if self.skip and (i < len(inputs) - 2):
args = {'factor': factor[i],
**kwargs, **combine_args, **unpack_args(combine_args, i, num_stages)}
layer = Combine(inputs=[x, inputs[-i - 3]], **args)
x = layer([x, inputs[-i - 3]])
layer_desc = 'combine-{}'.format(i)
else:
raise ValueError('Unknown letter in order {}, use one of ("b", "u", "c")'.format(letter))
self.update([(layer_desc, layer)])
self.layout += letter
class Encoder(TorchModel):
""" Encoder architecture. Allows to combine blocks from different models,
e.g. ResNet and DenseNet, in order to create new ones with just a few lines of code.
Intended to be used for classification tasks.
Parameters
----------
body : dict
encoder : dict, optional
num_stages : int
Number of downsampling stages.
order : str, sequence of str
Determines order of applying layers.
If str, then each letter stands for operation:
'b' for 'block', 'd'/'p' for 'downsampling', 's' for 'skip'.
If sequence, than the first letter of each item stands for operation:
For example, `'sbd'` allows to use throw skip connection -> block -> downsampling.
downsample : dict, optional
Parameters for downsampling (see :class:`~.layers.ConvBlock`)
blocks : dict, optional
Parameters for pre-processing blocks.
base : callable
Tensor processing function. Default is :class:`~.layers.ConvBlock`.
other args : dict
Parameters for the base block.
"""
@classmethod
def default_config(cls):
config = super().default_config()
config['body/encoder'] = dict(num_stages=None,
order=['skip', 'block', 'downsampling'])
config['body/encoder/downsample'] = dict(layout='p', pool_size=2, pool_strides=2)
config['body/encoder/blocks'] = dict(base=DefaultBlock)
return config
@classmethod
def body(cls, inputs, return_all=False, **kwargs):
kwargs = cls.get_defaults('body', kwargs)
encoder = kwargs.pop('encoder')
layers = [('encoder', EncoderModule(inputs=inputs, return_all=return_all, **{**kwargs, **encoder}))]
return nn.Sequential(OrderedDict(layers))
class Decoder(TorchModel):
""" Decoder architecture. Allows to combine blocks from different models,
e.g. ResNet and DenseNet, in order to create new ones with just a few lines of code.
Intended to be used for increasing spatial dimensionality of inputs.
Parameters
----------
body : dict
decoder : dict, optional
num_stages : int
Number of upsampling blocks.
factor : int or list of int
If int, the total upsampling factor for all stages combined.
If list, upsampling factors for each stage.
skip : bool, dict
If bool, then whether to combine upsampled tensor with stored pre-downsample encoding by
using `combine` parameters that can be specified for each of blocks separately.
order : str, sequence of str
Determines order of applying layers.
If str, then each letter stands for operation:
'b' for 'block', 'u' for 'upsampling', 'c' for 'combine'
If sequence, than the first letter of each item stands for operation.
For example, `'ucb'` allows to use upsampling -> combine -> block.
upsample : dict
Parameters for upsampling (see :class:`~.layers.Upsample`).
blocks : dict
Parameters for post-processing blocks:
base : callable
Tensor processing function. Default is :class:`~.layers.ConvBlock`.
other args : dict
Parameters for the base block.
combine : dict
If dict, then parameters for combining tensors, see :class:`~.layers.Combine`.
head : dict, optional
Parameters for the head layers, usually :class:`~.layers.ConvBlock` parameters. Note that an extra 1x1
convolution may be applied in order to make predictions compatible with the shape of the targets.
"""
@classmethod
def default_config(cls):
config = super().default_config()
config['body/decoder'] = dict(skip=True, num_stages=None, factor=None,
order=['upsampling', 'block', 'combine'])
config['body/decoder/upsample'] = dict(layout='tna')
config['body/decoder/blocks'] = dict(base=DefaultBlock)
config['body/decoder/combine'] = dict(op='concat', leading_index=1)
return config
@classmethod
def body(cls, inputs, **kwargs):
kwargs = cls.get_defaults('body', kwargs)
decoder = kwargs.pop('decoder')
layers = [('decoder', DecoderModule(inputs=inputs, **{**kwargs, **decoder}))]
return nn.Sequential(OrderedDict(layers))
@classmethod
def head(cls, inputs, target_shape, classes, **kwargs):
kwargs = cls.get_defaults('head', kwargs)
layers = []
layer = super().head(inputs, target_shape, classes, **kwargs)
if layer is not None:
inputs = layer(inputs)
layers.append(layer)
if classes:
if get_shape(inputs)[1] != classes:
layer = ConvBlock(inputs=inputs, layout='c', filters=classes, kernel_size=1)
layers.append(layer)
return nn.Sequential(*layers)
class EncoderDecoder(Decoder):
""" Encoder-decoder architecture. Allows to combine blocks from different models,
e.g. ResNet and DenseNet, in order to create new ones with just a few lines of code.
Intended to be used for segmentation tasks.
Parameters
----------
body : dict
encoder : dict, optional
num_stages : int
Number of downsampling stages.
order : str, sequence of str
Determines order of applying layers.
If str, then each letter stands for operation:
'b' for 'block', 'd'/'p' for 'downsampling', 's' for 'skip'.
If sequence, than the first letter of each item stands for operation:
For example, `'sbd'` allows to use throw skip connection -> block -> downsampling.
downsample : dict, optional
Parameters for downsampling (see :class:`~.layers.ConvBlock`)
blocks : dict, optional
Parameters for pre-processing blocks.
base : callable
Tensor processing function. Default is :class:`~.layers.ConvBlock`.
other args : dict
Parameters for the base block.
embedding : dict or None, optional
If None no embedding block is created.
If dict, then parameters for tensor processing function.
base : callable
Tensor processing function. Default is :class:`~.layers.ConvBlock`.
other args
Parameters for the base block.
decoder : dict, optional
num_stages : int
Number of upsampling blocks.
factor : int or list of int
If int, the total upsampling factor for all stages combined.
If list, upsampling factors for each stage.
skip : bool, dict
If bool, then whether to combine upsampled tensor with stored pre-downsample encoding by
using `combine` parameters that can be specified for each of blocks separately.
order : str, sequence of str
Determines order of applying layers.
If str, then each letter stands for operation:
'b' for 'block', 'u' for 'upsampling', 'c' for 'combine'
If sequence, than the first letter of each item stands for operation.
For example, `'ucb'` allows to use upsampling -> combine -> block.
upsample : dict
Parameters for upsampling (see :class:`~.layers.Upsample`).
blocks : dict
Parameters for post-processing blocks:
base : callable
Tensor processing function. Default is :class:`~.layers.ConvBlock`.
other args : dict
Parameters for the base block.
combine : dict
If dict, then parameters for combining tensors, see :class:`~.layers.Combine`.
head : dict, optional
Parameters for the head layers, usually :class:`~.layers.ConvBlock` parameters. Note that an extra 1x1
convolution may be applied in order to make predictions compatible with the shape of the targets.
Examples
--------
Use ResNet as an encoder with desired number of blocks and filters in them (total downsampling factor is 4),
create an embedding that contains 256 channels, then upsample it to get 8 times the size of initial image.
>>> config = {
'inputs': dict(images={'shape': B('image_shape')},
masks={'name': 'targets', 'shape': B('mask_shape')}),
'initial_block/inputs': 'images',
'body/encoder': {'base': ResNet,
'num_blocks': [2, 3, 4]
'filters': [16, 32, 128]},
'body/embedding': {'layout': 'cna', 'filters': 256},
'body/decoder': {'num_stages': 5, 'factor': 32},
}
Preprocess input image with 7x7 convolutions, downsample it 5 times with DenseNet blocks in between,
use MobileNet block in the bottom, then restore original image size with subpixel convolutions and
ResNeXt blocks in between:
>>> config = {
'inputs': dict(images={'shape': B('image_shape')},
masks={'name': 'targets', 'shape': B('mask_shape')}),
'initial_block': {'inputs': 'images',
'layout': 'cna', 'filters': 4, 'kernel_size': 7},
'body/encoder': {'num_stages': 5,
'blocks': {'base': DenseNet.block,
'num_layers': [2, 2, 3, 4, 5],
'growth_rate': 6, 'skip': True}},
'body/embedding': {'base': MobileNet.block,
'width_factor': 2},
'body/decoder': {'upsample': {'layout': 'X'},
'blocks': {'base': ResNet.block,
'filters': [256, 128, 64, 32, 16],
'resnext': True}},
}
"""
@classmethod
def default_config(cls):
config = super().default_config()
config['body/encoder'] = dict(num_stages=None,
order=['skip', 'block', 'downsampling'])
config['body/encoder/downsample'] = dict(layout='p', pool_size=2, pool_strides=2)
config['body/encoder/blocks'] = dict(base=DefaultBlock)
config['body/embedding'] = dict(base=DefaultBlock)
config['body/decoder'] = dict(skip=True, num_stages=None, factor=None,
order=['upsampling', 'block', 'combine'])
config['body/decoder/upsample'] = dict(layout='tna')
config['body/decoder/blocks'] = dict(base=DefaultBlock)
config['body/decoder/combine'] = dict(op='concat', leading_index=1)
return config
@classmethod
def body(cls, inputs, **kwargs):
kwargs = cls.get_defaults('body', kwargs)
encoder = kwargs.pop('encoder')
embedding = kwargs.pop('embedding')
decoder = kwargs.pop('decoder')
layers = []
encoder = cls.encoder(inputs=inputs, **{**kwargs, **encoder})
encoder_outputs = encoder(inputs)
layers.append(('encoder', encoder))
if embedding is not None:
embedding = cls.embedding(inputs=encoder_outputs, **{**kwargs, **embedding})
else:
embedding = nn.Identity()
encoder_outputs = embedding(encoder_outputs)
layers.append(('embedding', embedding))
decoder = cls.decoder(inputs=encoder_outputs, **{**kwargs, **decoder})
layers.append(('decoder', decoder))
return nn.Sequential(OrderedDict(layers))
@classmethod
def encoder(cls, inputs, **kwargs):
""" Create encoder either from base model or block args. """
if 'base_model' in kwargs:
base_model = kwargs['base_model']
base_model_kwargs = kwargs.get('base_model_kwargs', {})
return base_model.body(inputs=inputs, return_all=True, encoder=base_model_kwargs).encoder
return EncoderModule(inputs=inputs, **kwargs)
@classmethod
def embedding(cls, inputs, **kwargs):
return EmbeddingModule(inputs=inputs, **kwargs)
@classmethod
def decoder(cls, inputs, **kwargs):
return DecoderModule(inputs=inputs, **kwargs)
class AutoEncoder(EncoderDecoder):
""" Model without skip-connections between corresponding stages of encoder and decoder. """
@classmethod
def default_config(cls):
config = super().default_config()
config['body/decoder'] += dict(skip=False)
return config
class VariationalBlock(nn.Module):
""" Reparametrization trick block. """
def __init__(self, inputs=None, base_mu=None, base_std=None, **kwargs):
super().__init__()
self.mean = base_mu(inputs=inputs, **kwargs)
self.std = base_std(inputs=inputs, **kwargs)
def forward(self, x):
mean = self.mean(x)
std = self.std(x)
return mean + std * torch.randn_like(std)
class VariationalAutoEncoder(AutoEncoder):
""" Autoencoder that maps input into distribution. Based on
Kingma, Diederik P; Welling, Max "`Auto-Encoding Variational Bayes
<https://arxiv.org/abs/1312.6114>`_"
Notes
-----
Distribution that is learned is always normal.
"""
@classmethod
def default_config(cls):
config = super().default_config()
config['body/embedding'] += dict(base=VariationalBlock,
base_mu=DefaultBlock, base_std=DefaultBlock)
return config
| apache-2.0 |
vigilv/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 259 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
eviljeff/olympia | src/olympia/addons/models.py | 1 | 84031 | # -*- coding: utf-8 -*-
import hashlib
import itertools
import os
import re
import time
import uuid
from datetime import datetime
from urllib.parse import urlsplit
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, transaction
from django.db.models import F, Max, Q, signals as dbsignals
from django.dispatch import receiver
from django.utils import translation
from django.utils.functional import cached_property
from django.utils.translation import trans_real, ugettext_lazy as _
from django_jsonfield_backport.models import JSONField
from django_statsd.clients import statsd
from jinja2.filters import do_dictsort
import olympia.core.logger
from olympia import activity, amo, core
from olympia.access import acl
from olympia.addons.utils import generate_addon_guid
from olympia.amo.decorators import use_primary_db
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import (
BasePreview, BaseQuerySet, FilterableManyToManyField, LongNameIndex,
ManagerBase, ModelBase, OnChangeMixin, SaveUpdateMixin, SlugField)
from olympia.amo.templatetags import jinja_helpers
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import (
StopWatch, attach_trans_dict,
find_language, send_mail, slugify, sorted_groupby, timer, to_language)
from olympia.constants.categories import CATEGORIES, CATEGORIES_BY_ID
from olympia.constants.promoted import NOT_PROMOTED, RECOMMENDED
from olympia.constants.reviewers import REPUTATION_CHOICES
from olympia.files.models import File
from olympia.files.utils import extract_translations, resolve_i18n_message
from olympia.ratings.models import Rating
from olympia.tags.models import Tag
from olympia.translations.fields import (
LinkifiedField, PurifiedField, TranslatedField, save_signal)
from olympia.translations.hold import translation_saved
from olympia.translations.models import Translation
from olympia.users.models import UserProfile
from olympia.versions.compare import version_int
from olympia.versions.models import (
Version, VersionPreview, VersionReviewerFlags, inherit_nomination)
from . import signals
log = olympia.core.logger.getLogger('z.addons')
MAX_SLUG_INCREMENT = 999
SLUG_INCREMENT_SUFFIXES = set(range(1, MAX_SLUG_INCREMENT + 1))
GUID_REUSE_FORMAT = 'guid-reused-by-pk-{}'
def get_random_slug():
"""Return a 20 character long random string"""
return ''.join(str(uuid.uuid4()).split('-')[:-1])
def clean_slug(instance, slug_field='slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible but is only used
by Add-ons at the moment.
:param instance: The instance to clean the slug for.
:param slug_field: The field where to get the currently set slug from.
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation
# or in last resort a random slug.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
max_length = instance._meta.get_field(slug_field).max_length
# We have to account for slug being reduced to '' by slugify
slug = slugify(slug or '')[:max_length] or get_random_slug()
if DeniedSlug.blocked(slug):
slug = slug[:max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon. Also, make sure we use the base class.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
max_postfix_length = len(str(MAX_SLUG_INCREMENT))
slug = slugify(slug)[:max_length - max_postfix_length]
# There is a clash, so find a suffix that will make this slug unique.
lookup = {'%s__startswith' % slug_field: slug}
clashes = qs.filter(**lookup)
prefix_len = len(slug)
used_slug_numbers = [value[prefix_len:] for value in clashes]
# find the next free slug number
slug_numbers = {int(i) for i in used_slug_numbers if i.isdigit()}
unused_numbers = SLUG_INCREMENT_SUFFIXES - slug_numbers
if unused_numbers:
num = min(unused_numbers)
elif max_length is None:
num = max(slug_numbers) + 1
else:
# This could happen. The current implementation (using
# ``[:max_length -2]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 2`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError(
'No suitable slug increment for {} found'.format(slug))
slug = u'{slug}{postfix}'.format(slug=slug, postfix=num)
setattr(instance, slug_field, slug)
return instance
class AddonQuerySet(BaseQuerySet):
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
if isinstance(val, str) and not val.isdigit():
return self.filter(slug=val)
return self.filter(id=val)
def public(self):
"""Get reviewed add-ons only"""
return self.filter(self.valid_q(amo.REVIEWED_STATUSES))
def valid(self):
"""Get valid, enabled add-ons only"""
return self.filter(self.valid_q(amo.VALID_ADDON_STATUSES))
def not_disabled_by_mozilla(self):
"""Get all add-ons not disabled by Mozilla."""
return self.exclude(status=amo.STATUS_DISABLED)
def valid_q(self, statuses):
"""
Return a Q object that selects a valid Addon with the given statuses.
An add-on is valid if not disabled and has a current version.
"""
return Q(
_current_version__isnull=False,
disabled_by_user=False,
status__in=statuses)
class AddonManager(ManagerBase):
_queryset_class = AddonQuerySet
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(AddonManager, self).get_queryset()
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
return qs.transform(Addon.transformer)
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
return self.get_queryset().id_or_slug(val)
def public(self):
"""Get public add-ons only"""
return self.get_queryset().public()
def valid(self):
"""Get valid, enabled add-ons only"""
return self.get_queryset().valid()
def not_disabled_by_mozilla(self):
"""Get all add-ons not disabled by Mozilla."""
return self.get_queryset().not_disabled_by_mozilla()
def get_base_queryset_for_queue(self, admin_reviewer=False,
admin_content_review=False,
show_pending_rejection=False):
qs = (
self.get_queryset()
# We don't want the default transformer, it does too much, and
# crucially, it prevents the
# select_related('_current_version__autoapprovalsummary') from
# working, because it overrides the _current_version with the one
# it fetches. We want translations though.
.only_translations()
# We need those joins for the queue to work without making extra
# queries.
.select_related(
'addonapprovalscounter',
'reviewerflags',
'_current_version__autoapprovalsummary',
'promotedaddon',
)
.prefetch_related(
'_current_version__files'
)
)
if not show_pending_rejection:
qs = qs.filter(
_current_version__reviewerflags__pending_rejection__isnull=True
)
if not admin_reviewer:
if admin_content_review:
qs = qs.exclude(
reviewerflags__needs_admin_content_review=True
)
else:
qs = qs.exclude(
reviewerflags__needs_admin_code_review=True
)
return qs
def get_auto_approved_queue(self, admin_reviewer=False):
"""Return a queryset of Addon objects that have been auto-approved but
not confirmed by a human yet."""
success_verdict = amo.AUTO_APPROVED
qs = (
self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer
)
.public()
.filter(
_current_version__autoapprovalsummary__verdict=success_verdict
)
.exclude(
_current_version__autoapprovalsummary__confirmed=True
)
.order_by(
'-_current_version__autoapprovalsummary__weight',
'addonapprovalscounter__last_human_review',
'created',
)
)
return qs
def get_content_review_queue(self, admin_reviewer=False):
"""Return a queryset of Addon objects that need content review."""
qs = (
self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer,
admin_content_review=True
)
.valid()
.filter(
addonapprovalscounter__last_content_review=None,
# Only content review extensions and dictionaries. See
# https://github.com/mozilla/addons-server/issues/11796 &
# https://github.com/mozilla/addons-server/issues/12065
type__in=(amo.ADDON_EXTENSION, amo.ADDON_DICT),
)
.order_by('created')
)
return qs
def get_scanners_queue(self, admin_reviewer=False):
"""Return a queryset of Addon objects that have been approved but
contain versions that were automatically flagged as needing human
review (regardless of channel)."""
return (
self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer
)
# All valid statuses, plus incomplete as well because the add-on
# could be purely unlisted (so we can't use valid_q(), which
# filters out current_version=None). We know the add-ons are likely
# to have a version since they got the needs_human_review flag, so
# returning incomplete ones is acceptable.
.filter(
status__in=[
amo.STATUS_APPROVED, amo.STATUS_NOMINATED, amo.STATUS_NULL
],
versions__files__status__in=[
amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW,
],
versions__needs_human_review=True
)
.order_by('created')
# There could be several versions matching for a single add-on so
# we need a distinct.
.distinct()
)
def get_mad_queue(self, admin_reviewer=False):
return (
self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer
)
# All valid statuses, plus incomplete as well because the add-on
# could be purely unlisted (so we can't use valid_q(), which
# filters out current_version=None). We know the add-ons are likely
# to have a version since they got the needs_human_review_by_mad
# flag, so returning incomplete ones is acceptable.
.filter(
Q(status__in=[
amo.STATUS_APPROVED, amo.STATUS_NOMINATED, amo.STATUS_NULL
]),
Q(versions__files__status__in=[
amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW,
]),
(Q(versions__reviewerflags__needs_human_review_by_mad=True,
versions__channel=amo.RELEASE_CHANNEL_UNLISTED) |
Q(_current_version__reviewerflags__needs_human_review_by_mad=( # noqa
True)))
)
.order_by('created')
# There could be several versions matching for a single add-on so
# we need a distinct.
.distinct()
)
def get_pending_rejection_queue(self, admin_reviewer=False):
filter_kwargs = {
'_current_version__reviewerflags__pending_rejection__isnull': False
}
return self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer,
show_pending_rejection=True,
).filter(**filter_kwargs).order_by(
'_current_version__reviewerflags__pending_rejection'
)
class Addon(OnChangeMixin, ModelBase):
id = PositiveAutoField(primary_key=True)
STATUS_CHOICES = amo.STATUS_CHOICES_ADDON
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
name = TranslatedField()
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE,
db_column='defaultlocale')
type = models.PositiveIntegerField(
choices=amo.ADDON_TYPE.items(), db_column='addontype_id',
default=amo.ADDON_EXTENSION)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), default=amo.STATUS_NULL)
icon_type = models.CharField(
max_length=25, blank=True, db_column='icontype')
icon_hash = models.CharField(max_length=8, blank=True, null=True)
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
summary = LinkifiedField()
developer_comments = PurifiedField(db_column='developercomments')
eula = PurifiedField()
privacy_policy = PurifiedField(db_column='privacypolicy')
average_rating = models.FloatField(
max_length=255, default=0, null=True, db_column='averagerating')
bayesian_rating = models.FloatField(
default=0, db_column='bayesianrating')
total_ratings = models.PositiveIntegerField(
default=0, db_column='totalreviews')
text_ratings_count = models.PositiveIntegerField(
default=0, db_column='textreviewscount')
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads')
hotness = models.FloatField(default=0)
average_daily_users = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(
null=True, help_text='Last time this add-on had a file/version update')
disabled_by_user = models.BooleanField(default=False, db_column='inactive')
target_locale = models.CharField(
max_length=255, blank=True, null=True,
help_text='For dictionaries and language packs. Identifies the '
'language and, optionally, region that this add-on is '
'written for. Examples: en-US, fr, and de-AT')
contributions = models.URLField(max_length=255, blank=True)
authors = FilterableManyToManyField(
'users.UserProfile', through='AddonUser', related_name='addons',
q_filter=~Q(addonuser__role=amo.AUTHOR_ROLE_DELETED))
categories = models.ManyToManyField('Category', through='AddonCategory')
_current_version = models.ForeignKey(Version, db_column='current_version',
related_name='+', null=True,
on_delete=models.SET_NULL)
is_experimental = models.BooleanField(default=False,
db_column='experimental')
reputation = models.SmallIntegerField(
default=0, null=True, choices=REPUTATION_CHOICES.items(),
help_text='The higher the reputation value, the further down the '
'add-on will be in the auto-approved review queue. '
'A value of 0 has no impact')
requires_payment = models.BooleanField(default=False)
unfiltered = AddonManager(include_deleted=True)
objects = AddonManager()
class Meta:
db_table = 'addons'
# This is very important:
# The default base manager will be used for relations like
# `version.addon`. We thus want one that is NOT filtered in any case,
# we don't want a 500 if the addon is not found (because it has the
# status amo.STATUS_DELETED for example).
# The CLASS of the one configured here will also be used for "many to
# many relations" like `collection.addons`. In that case, we do want
# the filtered version by default, to make sure we're not displaying
# stuff by mistake. You thus want the filtered one configured
# as `base_manager_name`.
# We don't control the instantiation, but AddonManager sets
# include_deleted to False by default, so filtering is enabled by
# default.
base_manager_name = 'unfiltered'
indexes = [
models.Index(fields=('bayesian_rating',), name='bayesianrating'),
models.Index(fields=('created',), name='created_idx'),
models.Index(fields=('_current_version',), name='current_version'),
models.Index(fields=('disabled_by_user',), name='inactive'),
models.Index(fields=('hotness',), name='hotness_idx'),
models.Index(fields=('last_updated',), name='last_updated'),
models.Index(fields=('modified',), name='modified_idx'),
models.Index(fields=('status',), name='status'),
models.Index(fields=('target_locale',), name='target_locale'),
models.Index(fields=('type',), name='addontype_id'),
models.Index(fields=('weekly_downloads',),
name='weeklydownloads_idx'),
models.Index(fields=('average_daily_users', 'type'),
name='adus_type_idx'),
models.Index(fields=('bayesian_rating', 'type'),
name='rating_type_idx'),
models.Index(fields=('created', 'type'),
name='created_type_idx'),
models.Index(fields=('last_updated', 'type'),
name='last_updated_type_idx'),
models.Index(fields=('modified', 'type'),
name='modified_type_idx'),
models.Index(fields=('type', 'status', 'disabled_by_user'),
name='type_status_inactive_idx'),
models.Index(fields=('weekly_downloads', 'type'),
name='downloads_type_idx'),
models.Index(fields=('type', 'status', 'disabled_by_user',
'_current_version'),
name='visible_idx'),
models.Index(fields=('name', 'status', 'type'),
name='name_2'),
]
def __str__(self):
return u'%s: %s' % (self.id, self.name)
def __init__(self, *args, **kw):
super(Addon, self).__init__(*args, **kw)
def save(self, **kw):
self.clean_slug()
super(Addon, self).save(**kw)
@use_primary_db
def clean_slug(self, slug_field='slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
def force_disable(self):
activity.log_create(amo.LOG.CHANGE_STATUS, self, amo.STATUS_DISABLED)
log.info('Addon "%s" status changed to: %s',
self.slug, amo.STATUS_DISABLED)
self.update(status=amo.STATUS_DISABLED)
self.update_version()
# See: https://github.com/mozilla/addons-server/issues/13194
self.disable_all_files()
def force_enable(self):
activity.log_create(amo.LOG.CHANGE_STATUS, self, amo.STATUS_APPROVED)
log.info('Addon "%s" status changed to: %s',
self.slug, amo.STATUS_APPROVED)
self.update(status=amo.STATUS_APPROVED)
# Call update_status() to fix the status if the add-on is not actually
# in a state that allows it to be public.
self.update_status()
def deny_resubmission(self):
if self.is_guid_denied:
raise RuntimeError("GUID already denied")
activity.log_create(amo.LOG.DENIED_GUID_ADDED, self)
log.info('Deny resubmission for addon "%s"', self.slug)
DeniedGuid.objects.create(guid=self.guid)
def allow_resubmission(self):
if not self.is_guid_denied:
raise RuntimeError("GUID already denied")
activity.log_create(amo.LOG.DENIED_GUID_DELETED, self)
log.info('Allow resubmission for addon "%s"', self.slug)
DeniedGuid.objects.filter(guid=self.guid).delete()
def disable_all_files(self):
File.objects.filter(version__addon=self).update(
status=amo.STATUS_DISABLED)
@property
def is_guid_denied(self):
return DeniedGuid.objects.filter(guid=self.guid).exists()
def is_soft_deleteable(self):
return self.status or Version.unfiltered.filter(addon=self).exists()
def _prepare_deletion_email(self, msg, reason):
user = core.get_user()
# Don't localize email to admins, use 'en-US' always.
with translation.override(settings.LANGUAGE_CODE):
# The types are lazy translated in apps/constants/base.py.
atype = amo.ADDON_TYPE.get(self.type, 'unknown').upper()
context = {
'atype': atype,
'authors': [u.email for u in self.authors.all()],
'adu': self.average_daily_users,
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.slug,
'weekly_downloads': self.weekly_downloads,
'url': jinja_helpers.absolutify(self.get_url_path()),
'user_str': (
"%s, %s (%s)" % (user.name, user.email, user.id) if user
else "Unknown"),
}
email_msg = u"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
WEEKLY DOWNLOADS: %(weekly_downloads)s
AVERAGE DAILY USERS: %(adu)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
""" % context
log.info('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
return subject, email_msg
@transaction.atomic
def delete(self, msg='', reason='', send_delete_email=True):
# To avoid a circular import
from . import tasks
from olympia.versions import tasks as version_tasks
from olympia.files import tasks as file_tasks
# Check for soft deletion path. Happens only if the addon status isn't
# 0 (STATUS_INCOMPLETE) with no versions.
soft_deletion = self.is_soft_deleteable()
if soft_deletion and self.status == amo.STATUS_DELETED:
# We're already done.
return
id = self.id
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(Preview.objects.filter(addon__id=id)
.values_list('id', flat=True))
version_previews = list(
VersionPreview.objects.filter(version__addon__id=id)
.values_list('id', flat=True))
if soft_deletion:
# /!\ If we ever stop using soft deletion, and remove this code, we
# need to make sure that the logs created below aren't cascade
# deleted!
log.info('Deleting add-on: %s' % self.id)
if send_delete_email:
email_to = [settings.DELETION_EMAIL]
subject, email_msg = self._prepare_deletion_email(msg, reason)
else:
email_to, subject, email_msg = [], '', ''
# If the add-on was disabled by Mozilla, add the guid to
# DeniedGuids to prevent resubmission after deletion.
if self.status == amo.STATUS_DISABLED:
try:
with transaction.atomic():
self.deny_resubmission()
except RuntimeError:
# If the guid is already in DeniedGuids, we are good.
pass
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Addon, instance=self)
self._ratings.all().delete()
# We avoid triggering signals for Version & File on purpose to
# avoid extra work. Files will be moved to the correct storage
# location with hide_disabled_files task or hide_disabled_files
# cron as a fallback.
self.disable_all_files()
file_tasks.hide_disabled_files.delay(addon_id=self.id)
self.versions.all().update(deleted=True)
VersionReviewerFlags.objects.filter(version__addon=self).update(
pending_rejection=None)
# The last parameter is needed to automagically create an AddonLog.
activity.log_create(amo.LOG.DELETE_ADDON, self.pk,
str(self.guid), self)
self.update(status=amo.STATUS_DELETED, slug=None,
_current_version=None, modified=datetime.now())
models.signals.post_delete.send(sender=Addon, instance=self)
if send_delete_email:
send_mail(subject, email_msg, recipient_list=email_to)
else:
# Real deletion path.
super(Addon, self).delete()
for preview in previews:
tasks.delete_preview_files.delay(preview)
for preview in version_previews:
version_tasks.delete_preview_files.delay(preview)
return True
@classmethod
def initialize_addon_from_upload(cls, data, upload, channel, user):
timer = StopWatch('addons.models.initialize_addon_from_upload.')
timer.start()
fields = [field.name for field in cls._meta.get_fields()]
guid = data.get('guid')
old_guid_addon = None
if guid: # It's an extension.
# Reclaim GUID from deleted add-on.
try:
old_guid_addon = Addon.unfiltered.get(guid=guid)
old_guid_addon.update(guid=None)
except ObjectDoesNotExist:
pass
generate_guid = (
not data.get('guid', None) and
data.get('is_webextension', False)
)
if generate_guid:
data['guid'] = guid = generate_addon_guid()
timer.log_interval('1.guids')
data = cls.resolve_webext_translations(data, upload)
timer.log_interval('2.resolve_translations')
if channel == amo.RELEASE_CHANNEL_UNLISTED:
data['slug'] = get_random_slug()
timer.log_interval('3.get_random_slug')
addon = Addon(**{k: v for k, v in data.items() if k in fields})
timer.log_interval('4.instance_init')
addon.status = amo.STATUS_NULL
locale_is_set = (addon.default_locale and
addon.default_locale in settings.AMO_LANGUAGES and
data.get('default_locale') == addon.default_locale)
if not locale_is_set:
addon.default_locale = to_language(trans_real.get_language())
timer.log_interval('5.default_locale')
addon.save()
timer.log_interval('6.addon_save')
if guid:
AddonGUID.objects.create(addon=addon, guid=guid)
if old_guid_addon:
old_guid_addon.update(guid=GUID_REUSE_FORMAT.format(addon.pk))
log.info(f'GUID {guid} from addon [{old_guid_addon.pk}] reused '
f'by addon [{addon.pk}].')
if user:
AddonUser(addon=addon, user=user).save()
timer.log_interval('7.end')
return addon
@classmethod
def from_upload(cls, upload, selected_apps,
channel=amo.RELEASE_CHANNEL_LISTED, parsed_data=None,
user=None):
"""
Create an Addon instance, a Version and corresponding File(s) from a
FileUpload, a list of compatible app ids, a channel id and the
parsed_data generated by parse_addon().
Note that it's the caller's responsability to ensure the file is valid.
We can't check for that here because an admin may have overridden the
validation results.
"""
assert parsed_data is not None
addon = cls.initialize_addon_from_upload(
parsed_data, upload, channel, user)
Version.from_upload(
upload=upload, addon=addon, selected_apps=selected_apps,
channel=channel, parsed_data=parsed_data)
activity.log_create(amo.LOG.CREATE_ADDON, addon)
log.info('New addon %r from %r' % (addon, upload))
return addon
@classmethod
def resolve_webext_translations(cls, data, upload):
"""Resolve all possible translations from an add-on.
This returns a modified `data` dictionary accordingly with proper
translations filled in.
"""
default_locale = find_language(data.get('default_locale'))
if not data.get('is_webextension') or not default_locale:
# Don't change anything if we don't meet the requirements
return data
# find_language might have expanded short to full locale, so update it.
data['default_locale'] = default_locale
fields = ('name', 'homepage', 'summary')
messages = extract_translations(upload)
for field in fields:
data[field] = {
locale: resolve_i18n_message(
data[field],
locale=locale,
default_locale=default_locale,
messages=messages)
for locale in messages
}
return data
def get_url_path(self, add_prefix=True):
if not self._current_version_id:
return ''
return reverse(
'addons.detail', args=[self.slug], add_prefix=add_prefix)
def get_dev_url(self, action='edit', args=None, prefix_only=False):
args = args or []
prefix = 'devhub'
if not prefix_only:
prefix += '.addons'
view_name = '{prefix}.{action}'.format(prefix=prefix,
action=action)
return reverse(view_name, args=[self.slug] + args)
def get_detail_url(self, action='detail', args=None):
if args is None:
args = []
return reverse('addons.%s' % action, args=[self.slug] + args)
@property
def ratings_url(self):
return reverse('addons.ratings.list', args=[self.slug])
@cached_property
def listed_authors(self):
return self.authors.filter(
addons=self,
addonuser__listed=True).order_by('addonuser__position')
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def ratings(self):
return Rating.objects.filter(addon=self, reply_to=None)
def language_ascii(self):
lang = trans_real.to_language(self.default_locale)
return settings.LANGUAGES.get(lang)
@property
def valid_file_statuses(self):
if self.status == amo.STATUS_APPROVED:
return [amo.STATUS_APPROVED]
return amo.VALID_FILE_STATUSES
def find_latest_public_listed_version(self):
"""Retrieve the latest public listed version of an addon.
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions)."""
try:
statuses = self.valid_file_statuses
status_list = ','.join(map(str, statuses))
fltr = {
'channel': amo.RELEASE_CHANNEL_LISTED,
'files__status__in': statuses
}
return self.versions.filter(**fltr).extra(
where=["""
NOT EXISTS (
SELECT 1 FROM files AS f2
WHERE f2.version_id = versions.id AND
f2.status NOT IN (%s))
""" % status_list])[0]
except (IndexError, Version.DoesNotExist):
return None
def find_latest_version(self, channel, exclude=((amo.STATUS_DISABLED,))):
"""Retrieve the latest version of an add-on for the specified channel.
If channel is None either channel is returned.
Keyword arguments:
exclude -- exclude versions for which all files have one
of those statuses (default STATUS_DISABLED)."""
# If the add-on is deleted or hasn't been saved yet, it should not
# have a latest version.
if not self.id or self.status == amo.STATUS_DELETED:
return None
# We can't use .exclude(files__status=excluded_statuses) because that
# would exclude a version if *any* of its files match but if there is
# only one file that doesn't have one of the excluded statuses it
# should be enough for that version to be considered.
params = {
'files__status__in': (
set(amo.STATUS_CHOICES_FILE.keys()) - set(exclude)
)
}
if channel is not None:
params['channel'] = channel
try:
# Avoid most transformers - keep translations because they don't
# get automatically fetched if you just access the field without
# having made the query beforehand, and we don't know what callers
# will want ; but for the rest of them, since it's a single
# instance there is no reason to call the default transformers.
latest_qs = self.versions.filter(**params).only_translations()
latest = latest_qs.latest()
except Version.DoesNotExist:
latest = None
return latest
@use_primary_db
def update_version(self, ignore=None, _signal=True):
"""
Update the current_version field on this add-on if necessary.
Returns True if we updated the current_version field.
The optional ``ignore`` parameter, if present, is a a version
to not consider as part of the update, since it may be in the
process of being deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
new_current_version = self.find_latest_public_listed_version()
updated = {}
send_signal = False
if self._current_version != new_current_version:
updated['_current_version'] = new_current_version
send_signal = True
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = {k: v for k, v in updated.items() if v != ignore}
if updated:
diff = [self._current_version, new_current_version]
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self)
log.info(u'Version changed from current: %s to %s '
u'for addon %s'
% tuple(diff + [self]))
except Exception as e:
log.error(u'Could not save version changes current: %s to %s '
u'for addon %s (%s)' %
tuple(diff + [self, e]))
return bool(updated)
def increment_theme_version_number(self):
"""Increment theme version number by 1."""
latest_version = self.find_latest_version(None)
version = latest_version or self.current_version
version.version = str(float(version.version) + 1)
# Set the current version.
self.update(_current_version=version.save())
@property
def current_version(self):
"""Return the latest public listed version of an addon.
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions).
If the add-on has not been created yet or is deleted, it returns None.
"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@cached_property
def latest_unlisted_version(self):
"""Shortcut property for Addon.find_latest_version(
channel=RELEASE_CHANNEL_UNLISTED)."""
return self.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
@cached_property
def binary(self):
"""Returns if the current version has binary files."""
version = self.current_version
if version:
return version.files.filter(binary=True).exists()
return False
@cached_property
def binary_components(self):
"""Returns if the current version has files with binary_components."""
version = self.current_version
if version:
return version.files.filter(binary_components=True).exists()
return False
def get_icon_dir(self):
return os.path.join(jinja_helpers.user_media_path('addon_icons'),
'%s' % (self.id // 1000))
def get_icon_url(self, size, use_default=True):
"""
Returns the addon's icon url according to icon_type.
If it's a theme and there is no icon set, it will return the default
theme icon.
If it's something else, it will return the default add-on icon, unless
use_default is False, in which case it will return None.
"""
icon_type_split = []
if self.icon_type:
icon_type_split = self.icon_type.split('/')
# Get the closest allowed size without going over
if (size not in amo.ADDON_ICON_SIZES and
size >= amo.ADDON_ICON_SIZES[0]):
size = [s for s in amo.ADDON_ICON_SIZES if s < size][-1]
elif size < amo.ADDON_ICON_SIZES[0]:
size = amo.ADDON_ICON_SIZES[0]
# Figure out what to return for an image URL
if not self.icon_type:
return self.get_default_icon_url(size) if use_default else None
elif icon_type_split[0] == 'icon':
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
icon_type_split[1],
size
)
else:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
# Use the icon hash if we have one as the cachebusting suffix,
# otherwise fall back to the add-on modification date.
suffix = self.icon_hash or str(
int(time.mktime(self.modified.timetuple())))
path = '/'.join([
split_id.group(2) or '0',
'{0}-{1}.png?modified={2}'.format(self.id, size, suffix),
])
return jinja_helpers.user_media_url('addon_icons') + path
def get_default_icon_url(self, size):
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL, 'default', size
)
@use_primary_db
def update_status(self, ignore_version=None):
self.reload()
if (self.status in [amo.STATUS_NULL, amo.STATUS_DELETED] or
self.is_disabled):
self.update_version(ignore=ignore_version)
return
versions = self.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED)
status = None
reason = ''
if not versions.exists():
status = amo.STATUS_NULL
reason = 'no listed versions'
elif not versions.filter(
files__status__in=amo.VALID_FILE_STATUSES).exists():
status = amo.STATUS_NULL
reason = 'no listed version with valid file'
elif (self.status == amo.STATUS_APPROVED and
not versions.filter(files__status=amo.STATUS_APPROVED).exists()):
if versions.filter(
files__status=amo.STATUS_AWAITING_REVIEW).exists():
status = amo.STATUS_NOMINATED
reason = 'only an unreviewed file'
else:
status = amo.STATUS_NULL
reason = 'no reviewed files'
elif self.status == amo.STATUS_APPROVED:
latest_version = self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if (latest_version and latest_version.has_files and
(latest_version.all_files[0].status ==
amo.STATUS_AWAITING_REVIEW)):
# Addon is public, but its latest file is not (it's the case on
# a new file upload). So, call update, to trigger watch_status,
# which takes care of setting nomination time when needed.
status = self.status
reason = 'triggering watch_status'
if status is not None:
log.info('Changing add-on status [%s]: %s => %s (%s).'
% (self.id, self.status, status, reason))
self.update(status=status)
activity.log_create(amo.LOG.CHANGE_STATUS, self, self.status)
self.update_version(ignore=ignore_version)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = {addon.id: addon for addon in addons}
all_ids = set(
filter(None, (addon._current_version_id for addon in addons)))
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.info('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
version.addon = addon
@staticmethod
def attach_listed_authors(addons, addon_dict=None):
if addon_dict is None:
addon_dict = {addon.id: addon for addon in addons}
addonuser_qs = (
AddonUser.objects.filter(addon__in=addons, listed=True)
.order_by('addon_id', 'position')
.select_related('user'))
seen = set()
groupby = itertools.groupby(addonuser_qs, key=lambda u: u.addon_id)
for addon_id, addonusers in groupby:
addon_dict[addon_id].listed_authors = [
au.user for au in addonusers]
seen.add(addon_id)
# set listed_authors to empty list on addons without listed authors.
[setattr(addon, 'listed_authors', []) for addon in addon_dict.values()
if addon.id not in seen]
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = {a.id: a for a in addons}
qs = Preview.objects.filter(addon__in=addons,
position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
seen = set()
for addon_id, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon_id]._all_previews = list(previews)
seen.add(addon_id)
# set _all_previews to empty list on addons without previews.
[setattr(addon, '_all_previews', []) for addon in addon_dict.values()
if addon.id not in seen]
@staticmethod
def attach_static_categories(addons, addon_dict=None):
if addon_dict is None:
addon_dict = {addon.id: addon for addon in addons}
qs = (
AddonCategory.objects
.filter(addon__in=addon_dict.values())
.values_list('addon_id', 'category_id'))
for addon_id, cats_iter in itertools.groupby(qs, key=lambda x: x[0]):
# The second value of each tuple in cats_iter are the category ids
# we want.
addon_dict[addon_id].category_ids = sorted(
[c[1] for c in cats_iter])
addon_dict[addon_id].all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id
in addon_dict[addon_id].category_ids
if cat_id in CATEGORIES_BY_ID]
@staticmethod
@timer
def transformer(addons):
if not addons:
return
addon_dict = {a.id: a for a in addons}
# Attach categories.
Addon.attach_static_categories(addons, addon_dict=addon_dict)
# Set _current_version and attach listed authors.
Addon.attach_related_versions(addons, addon_dict=addon_dict)
Addon.attach_listed_authors(addons, addon_dict=addon_dict)
# Attach previews.
Addon.attach_previews(addons, addon_dict=addon_dict)
return addon_dict
def show_adu(self):
return self.type != amo.ADDON_SEARCH
@property
def contribution_url(self, lang=settings.LANGUAGE_CODE,
app=settings.DEFAULT_APP):
return reverse('addons.contribute', args=[self.slug])
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self._all_previews[0]
return preview.thumbnail_url
except IndexError:
return settings.STATIC_URL + '/img/icons/no-preview.png'
def can_request_review(self):
"""Return whether an add-on can request a review or not."""
if (self.is_disabled or
self.status in (amo.STATUS_APPROVED,
amo.STATUS_NOMINATED,
amo.STATUS_DELETED)):
return False
latest_version = self.find_latest_version(amo.RELEASE_CHANNEL_LISTED,
exclude=())
return (latest_version is not None and
latest_version.files.exists() and
not any(file.reviewed for file in latest_version.all_files))
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
def is_unreviewed(self):
return self.status in amo.UNREVIEWED_ADDON_STATUSES
def is_public(self):
return self.status == amo.STATUS_APPROVED and not self.disabled_by_user
def has_complete_metadata(self, has_listed_versions=None):
"""See get_required_metadata for has_listed_versions details."""
return all(self.get_required_metadata(
has_listed_versions=has_listed_versions))
def get_required_metadata(self, has_listed_versions=None):
"""If has_listed_versions is not specified this method will return the
current (required) metadata (truthy values if present) for this Addon.
If has_listed_versions is specified then the method will act as if
Addon.has_listed_versions() returns that value. Used to predict if the
addon will require extra metadata before a version is created."""
if has_listed_versions is None:
has_listed_versions = self.has_listed_versions()
if not has_listed_versions:
# Add-ons with only unlisted versions have no required metadata.
return []
# We need to find out if the add-on has a license set. We prefer to
# check the current_version first because that's what would be used for
# public pages, but if there isn't any listed version will do.
version = self.current_version or self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED, exclude=())
return [
self.all_categories,
self.name,
self.summary,
(version and version.license),
]
def should_redirect_to_submit_flow(self):
return (
self.status == amo.STATUS_NULL and
not self.has_complete_metadata() and
self.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED))
def can_be_deleted(self):
return not self.is_deleted
def has_listed_versions(self, include_deleted=False):
if include_deleted:
manager = self.versions(manager='unfiltered_for_relations')
else:
manager = self.versions
return self._current_version_id or manager.filter(
channel=amo.RELEASE_CHANNEL_LISTED).exists()
def has_unlisted_versions(self, include_deleted=False):
if include_deleted:
manager = self.versions(manager='unfiltered_for_relations')
else:
manager = self.versions
return manager.filter(channel=amo.RELEASE_CHANNEL_UNLISTED).exists()
@property
def is_restart_required(self):
"""Whether the add-on current version requires a browser restart to
work."""
return (
self.current_version and self.current_version.is_restart_required)
def _is_recommended_theme(self):
from olympia.bandwagon.models import CollectionAddon
return (self.type == amo.ADDON_STATICTHEME and
CollectionAddon.objects.filter(
collection_id=settings.COLLECTION_FEATURED_THEMES_ID,
addon=self).exists())
def promoted_group(self, *, currently_approved=True):
"""Is the addon currently promoted for the current applications?
Returns the group constant, or NOT_PROMOTED (which is falsey)
otherwise.
`currently_approved=True` means only returns True if
self.current_version is approved for the current promotion & apps.
If currently_approved=False then promotions where there isn't approval
are returned too.
"""
from olympia.promoted.models import PromotedAddon
try:
promoted = self.promotedaddon
except PromotedAddon.DoesNotExist:
return NOT_PROMOTED
is_promoted = (
not currently_approved or promoted.approved_applications)
return promoted.group if is_promoted else NOT_PROMOTED
@cached_property
def promoted(self):
promoted_group = self.promoted_group()
if promoted_group:
return self.promotedaddon
else:
from olympia.promoted.models import PromotedTheme
if self._is_recommended_theme():
return PromotedTheme(addon=self, group_id=RECOMMENDED.id)
return None
@cached_property
def tags_partitioned_by_developer(self):
"""Returns a tuple of developer tags and user tags for this addon."""
tags = self.tags.not_denied()
user_tags = tags.exclude(addon_tags__user__in=self.listed_authors)
dev_tags = tags.exclude(id__in=[t.id for t in user_tags])
return dev_tags, user_tags
@cached_property
def compatible_apps(self):
"""Shortcut to get compatible apps for the current version."""
if self.current_version:
return self.current_version.compatible_apps
else:
return {}
def accepts_compatible_apps(self):
"""True if this add-on lists compatible apps."""
return self.type not in amo.NO_COMPAT
def incompatible_latest_apps(self):
"""Returns a list of applications with which this add-on is
incompatible (based on the latest version of each app).
"""
apps = []
for application, version in self.compatible_apps.items():
if not version:
continue
latest_version = version.get_latest_application_version()
if version_int(version.max.version) < version_int(latest_version):
apps.append((application, latest_version))
return apps
def has_author(self, user):
"""True if ``user`` is an author of the add-on."""
if user is None or user.is_anonymous:
return False
return AddonUser.objects.filter(addon=self, user=user).exists()
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
status_change = Max('versions__files__datestatuschanged')
public = (
Addon.objects.filter(
status=amo.STATUS_APPROVED,
versions__files__status=amo.STATUS_APPROVED)
.values('id').annotate(last_updated=status_change))
stati = amo.VALID_ADDON_STATUSES
exp = (Addon.objects.exclude(status__in=stati)
.filter(versions__files__status__in=amo.VALID_FILE_STATUSES)
.values('id')
.annotate(last_updated=Max('versions__files__created')))
return {'public': public, 'exp': exp}
@cached_property
def all_categories(self):
return list(filter(
None, [cat.to_static_category() for cat in self.categories.all()]))
@cached_property
def current_previews(self):
"""Previews for the current version, or all of them if not a
static theme."""
if self.has_per_version_previews:
if self.current_version:
return self.current_version.previews.all()
return VersionPreview.objects.none()
else:
return self._all_previews
@cached_property
def _all_previews(self):
"""Exclude promo graphics."""
return list(self.previews.exclude(position=-1))
@property
def has_per_version_previews(self):
return self.type == amo.ADDON_STATICTHEME
@property
def app_categories(self):
app_cats = {}
categories = sorted_groupby(
sorted(self.all_categories),
key=lambda x: getattr(amo.APP_IDS.get(x.application), 'short', ''))
for app, cats in categories:
app_cats[app] = list(cats)
return app_cats
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the addon.
"""
if require_author:
require_owner = False
ignore_disabled = True
admin = False
return acl.check_addon_ownership(request, self, admin=admin,
dev=(not require_owner),
ignore_disabled=ignore_disabled)
def should_show_permissions(self, version=None):
version = version or self.current_version
return (self.type == amo.ADDON_EXTENSION and
version and version.all_files[0] and
(not version.all_files[0].is_webextension or
version.all_files[0].permissions or
version.all_files[0].optional_permissions
))
# Aliases for reviewerflags below are not just useful in case
# AddonReviewerFlags does not exist for this add-on: they are also used
# by reviewer tools get_flags() function to return flags shown to reviewers
# in both the review queues and the review page.
@property
def needs_admin_code_review(self):
try:
return self.reviewerflags.needs_admin_code_review
except AddonReviewerFlags.DoesNotExist:
return None
@property
def needs_admin_content_review(self):
try:
return self.reviewerflags.needs_admin_content_review
except AddonReviewerFlags.DoesNotExist:
return None
@property
def needs_admin_theme_review(self):
try:
return self.reviewerflags.needs_admin_theme_review
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_disabled(self):
try:
return self.reviewerflags.auto_approval_disabled
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_disabled_until_next_approval(self):
try:
return (
self.reviewerflags.auto_approval_disabled_until_next_approval
)
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_delayed_until(self):
try:
return self.reviewerflags.auto_approval_delayed_until
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_delayed_indefinitely(self):
return self.auto_approval_delayed_until == datetime.max
@property
def auto_approval_delayed_temporarily(self):
return (
bool(self.auto_approval_delayed_until) and
self.auto_approval_delayed_until != datetime.max and
self.auto_approval_delayed_until > datetime.now()
)
def reset_notified_about_auto_approval_delay(self):
"""
Reset notified_about_auto_approval_delay reviewer flag for this addon.
This doesn't create an AddonReviewerFlags if there wasn't one, just
resets notified_about_auto_approval_delay to False if there were flags
for this add-on.
"""
AddonReviewerFlags.objects.filter(addon=self).update(
notified_about_auto_approval_delay=False)
@classmethod
def get_lookup_field(cls, identifier):
lookup_field = 'pk'
if identifier and not identifier.isdigit():
# If the identifier contains anything other than a digit, it's
# either a slug or a guid. guids need to contain either {} or @,
# which are invalid in a slug.
if amo.ADDON_GUID_PATTERN.match(identifier):
lookup_field = 'guid'
else:
lookup_field = 'slug'
return lookup_field
@property
def addonguid_guid(self):
""" Use this function to avoid having to wrap `addon.addonguid.guid` in
a try...except.
There *should* be a matching AddonGUID record for every Addon with a
guid, but the foreign key is from AddonGUID to Addon so there's a
possiblity of bad data leading to the AddonGUID not existing. Plus we
don't want this to fail if an upload with guid=None somehow ended up
getting through.
"""
return getattr(self, 'addonguid', self).guid
@cached_property
def block(self):
from olympia.blocklist.models import Block
# Block.guid is unique so it's either on the list or not.
return Block.objects.filter(guid=self.addonguid_guid).last()
@cached_property
def blocklistsubmission(self):
from olympia.blocklist.models import BlocklistSubmission
# GUIDs should only exist in one (active) submission at once.
return BlocklistSubmission.get_submissions_from_guid(
self.addonguid_guid).last()
@property
def git_extraction_is_in_progress(self):
from olympia.git.models import GitExtractionEntry
return GitExtractionEntry.objects.filter(
addon=self, in_progress=True
).exists()
dbsignals.pre_save.connect(save_signal, sender=Addon,
dispatch_uid='addon_translations')
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, **kw):
from . import tasks
tasks.version_changed.delay(sender.id)
@receiver(dbsignals.post_save, sender=Addon,
dispatch_uid='addons.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
tasks.index_addons.delay([instance.id])
@Addon.on_change
def watch_status(old_attr=None, new_attr=None, instance=None,
sender=None, **kwargs):
"""
Set nomination date if the addon is new in queue or updating.
The nomination date cannot be reset, say, when a developer cancels
their request for review and re-requests review.
If a version is rejected after nomination, the developer has
to upload a new version.
"""
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
latest_version = instance.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
# Update the author's account profile visibility
if new_status != old_status:
[author.update_is_public() for author in instance.authors.all()]
if (new_status not in amo.VALID_ADDON_STATUSES or
not new_status or not latest_version):
return
if old_status not in amo.UNREVIEWED_ADDON_STATUSES:
# New: will (re)set nomination only if it's None.
latest_version.reset_nomination_time()
elif latest_version.has_files:
# Updating: inherit nomination from last nominated version.
# Calls `inherit_nomination` manually given that signals are
# deactivated to avoid circular calls.
inherit_nomination(None, latest_version)
@Addon.on_change
def watch_disabled(old_attr=None, new_attr=None, instance=None, sender=None,
**kwargs):
"""
Move files when an add-on is disabled/enabled.
There is a similar watcher in olympia.files.models that tracks File
status, but this one is useful for when the Files do not change their
status.
"""
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
attrs = {key: value for key, value in old_attr.items()
if key in ('disabled_by_user', 'status')}
was_disabled = Addon(**attrs).is_disabled
is_disabled = instance.is_disabled
if was_disabled and not is_disabled:
for file_ in File.objects.filter(version__addon=instance.id):
file_.unhide_disabled_file()
elif is_disabled and not was_disabled:
for file_ in File.objects.filter(version__addon=instance.id):
file_.hide_disabled_file()
@Addon.on_change
def watch_changes(old_attr=None, new_attr=None, instance=None, sender=None,
**kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
changes = {
x for x in new_attr
if not x.startswith('_') and new_attr[x] != old_attr.get(x)
}
basket_relevant_changes = (
# Some changes are not tracked here:
# - Any authors changes (separate model)
# - Creation/Deletion of unlisted version (separate model)
# - Name change (separate model, not implemented yet)
# - Categories changes (separate model, ignored for now)
# - average_rating changes (ignored for now, happens too often)
# - average_daily_users changes (ignored for now, happens too often)
'_current_version', 'default_locale', 'slug', 'status',
'disabled_by_user',
)
if any(field in changes for field in basket_relevant_changes):
from olympia.amo.tasks import sync_object_to_basket
log.info(
'Triggering a sync of %s %s with basket because of %s change',
'addon', instance.pk, 'attribute')
sync_object_to_basket.delay('addon', instance.pk)
@receiver(translation_saved, sender=Addon,
dispatch_uid='watch_addon_name_changes')
def watch_addon_name_changes(sender=None, instance=None, **kw):
field_name = kw.get('field_name')
if instance and field_name == 'name':
from olympia.amo.tasks import sync_object_to_basket
log.info(
'Triggering a sync of %s %s with basket because of %s change',
'addon', instance.pk, 'name')
sync_object_to_basket.delay('addon', instance.pk)
def attach_translations(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Addon, addons)
def attach_tags(addons):
addon_dict = {addon.id: addon for addon in addons}
qs = (Tag.objects.not_denied().filter(addons__in=addon_dict)
.values_list('addons__id', 'tag_text'))
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class AddonReviewerFlags(ModelBase):
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE,
related_name='reviewerflags')
needs_admin_code_review = models.BooleanField(default=False)
needs_admin_content_review = models.BooleanField(default=False)
needs_admin_theme_review = models.BooleanField(default=False)
auto_approval_disabled = models.BooleanField(default=False)
auto_approval_disabled_until_next_approval = models.NullBooleanField(
default=None)
auto_approval_delayed_until = models.DateTimeField(
default=None, null=True)
notified_about_auto_approval_delay = models.NullBooleanField(default=None)
notified_about_expiring_delayed_rejections = models.NullBooleanField(
default=None)
class MigratedLWT(OnChangeMixin, ModelBase):
lightweight_theme_id = models.PositiveIntegerField()
getpersonas_id = models.PositiveIntegerField()
static_theme = models.ForeignKey(
Addon, unique=True, related_name='migrated_from_lwt',
on_delete=models.CASCADE)
class Meta:
db_table = 'migrated_personas'
indexes = [
LongNameIndex(
fields=('static_theme',),
name='migrated_personas_static_theme_id_fk_addons_id'),
LongNameIndex(
fields=('getpersonas_id',),
name='migrated_personas_getpersonas_id'),
]
class AddonCategory(models.Model):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
class Meta:
db_table = 'addons_categories'
indexes = [
models.Index(fields=('category', 'addon'),
name='category_addon_idx'),
]
constraints = [
models.UniqueConstraint(fields=('addon', 'category'),
name='addon_id'),
]
class AddonUserManager(ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
super().__init__()
self.include_deleted = include_deleted
def get_queryset(self):
qs = super().get_queryset()
if not self.include_deleted:
qs = qs.exclude(role=amo.AUTHOR_ROLE_DELETED)
return qs
class AddonUser(OnChangeMixin, SaveUpdateMixin, models.Model):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
user = user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES_UNFILTERED)
listed = models.BooleanField(_(u'Listed'), default=True)
position = models.IntegerField(default=0)
unfiltered = AddonUserManager(include_deleted=True)
objects = AddonUserManager()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_role = self.role
class Meta:
# see Addon.Meta for details of why this base_manager_name is important
base_manager_name = 'unfiltered'
db_table = 'addons_users'
indexes = [
models.Index(fields=('listed',),
name='listed'),
models.Index(fields=('addon', 'user', 'listed'),
name='addon_user_listed_idx'),
models.Index(fields=('addon', 'listed'),
name='addon_listed_idx'),
]
constraints = [
models.UniqueConstraint(fields=('addon', 'user'),
name='addon_id'),
]
def delete(self):
# soft-delete
self.update(role=amo.AUTHOR_ROLE_DELETED)
@property
def is_deleted(self):
return self.role == amo.AUTHOR_ROLE_DELETED
@AddonUser.on_change
def watch_addon_user(old_attr=None, new_attr=None, instance=None, sender=None,
**kwargs):
instance.user.update_is_public()
# Update ES because authors is included.
update_search_index(sender=sender, instance=instance.addon, **kwargs)
def addon_user_sync(sender=None, instance=None, **kwargs):
# Basket doesn't care what role authors have or whether they are listed
# or not, it just needs to be updated whenever an author is added/removed.
created_or_deleted = kwargs.get('created', True) or instance.is_deleted
if created_or_deleted and instance.addon.status != amo.STATUS_DELETED:
from olympia.amo.tasks import sync_object_to_basket
log.info(
'Triggering a sync of %s %s with basket because of %s change',
'addon', instance.addon.pk, 'addonuser')
sync_object_to_basket.delay('addon', instance.addon.pk)
models.signals.post_delete.connect(watch_addon_user,
sender=AddonUser,
dispatch_uid='delete_addon_user')
models.signals.post_delete.connect(addon_user_sync,
sender=AddonUser,
dispatch_uid='delete_addon_user_sync')
models.signals.post_save.connect(addon_user_sync,
sender=AddonUser,
dispatch_uid='save_addon_user_sync')
class AddonUserPendingConfirmation(SaveUpdateMixin, models.Model):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
user = user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES)
listed = models.BooleanField(_(u'Listed'), default=True)
# Note: we don't bother with position for authors waiting confirmation,
# because it's impossible to properly reconcile it with the confirmed
# authors. Instead, authors waiting confirmation are displayed in the order
# they have been added, and when they are confirmed they end up in the
# last position by default.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_role = self.role
class Meta:
db_table = 'addons_users_pending_confirmation'
constraints = [
models.UniqueConstraint(fields=('addon', 'user'),
name='addons_users_pending_confirmation_'
'addon_id_user_id_38e3bb32_uniq'),
]
class AddonApprovalsCounter(ModelBase):
"""Model holding a counter of the number of times a listed version
belonging to an add-on has been approved by a human. Reset everytime a
listed version is auto-approved for this add-on.
Holds 2 additional date fields:
- last_human_review, the date of the last time a human fully reviewed the
add-on
- last_content_review, the date of the last time a human fully reviewed the
add-on content (not code).
"""
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE)
counter = models.PositiveIntegerField(default=0)
last_human_review = models.DateTimeField(null=True)
last_content_review = models.DateTimeField(null=True, db_index=True)
def __str__(self):
return u'%s: %d' % (str(self.pk), self.counter) if self.pk else u''
@classmethod
def increment_for_addon(cls, addon):
"""
Increment approval counter for the specified addon, setting the last
human review date and last content review date to now.
If an AddonApprovalsCounter already exists, it updates it, otherwise it
creates and saves a new instance.
"""
now = datetime.now()
data = {
'counter': 1,
'last_human_review': now,
'last_content_review': now,
}
obj, created = cls.objects.get_or_create(
addon=addon, defaults=data)
if not created:
data['counter'] = F('counter') + 1
obj.update(**data)
return obj
@classmethod
def reset_for_addon(cls, addon):
"""
Reset the approval counter (but not the dates) for the specified addon.
"""
obj, created = cls.objects.update_or_create(
addon=addon, defaults={'counter': 0})
return obj
@classmethod
def approve_content_for_addon(cls, addon, now=None):
"""
Set last_content_review for this addon.
"""
if now is None:
now = datetime.now()
return cls.reset_content_for_addon(addon, reset_to=now)
@classmethod
def reset_content_for_addon(cls, addon, reset_to=None):
"""
Reset the last_content_review date for this addon so it triggers
another review.
"""
obj, created = cls.objects.update_or_create(
addon=addon, defaults={'last_content_review': reset_to})
return obj
class DeniedGuid(ModelBase):
id = PositiveAutoField(primary_key=True)
guid = models.CharField(max_length=255, unique=True)
comments = models.TextField(default='', blank=True)
class Meta:
db_table = 'denied_guids'
def __str__(self):
return self.guid
class Category(OnChangeMixin, ModelBase):
id = PositiveAutoField(primary_key=True)
slug = SlugField(
max_length=50, help_text='Used in Category URLs.', db_index=False)
type = models.PositiveIntegerField(db_column='addontype_id',
choices=do_dictsort(amo.ADDON_TYPE))
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
null=True, blank=True,
db_column='application_id')
count = models.IntegerField('Addon count', default=0)
weight = models.IntegerField(
default=0, help_text='Category weight used in sort ordering')
misc = models.BooleanField(default=False)
addons = models.ManyToManyField(Addon, through='AddonCategory')
class Meta:
db_table = 'categories'
verbose_name_plural = 'Categories'
indexes = [
models.Index(fields=('type',), name='addontype_id'),
models.Index(fields=('application',), name='application_id'),
models.Index(fields=('slug',), name='categories_slug'),
]
@property
def name(self):
try:
value = CATEGORIES[self.application][self.type][self.slug].name
except KeyError:
# We can't find the category in the constants dict. This shouldn't
# happen, but just in case handle it by returning an empty string.
value = ''
return str(value)
def __str__(self):
return str(self.name)
def get_url_path(self):
try:
type = amo.ADDON_SLUGS[self.type]
except KeyError:
type = amo.ADDON_SLUGS[amo.ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
def to_static_category(self):
"""Return the corresponding StaticCategory instance from a Category."""
try:
staticcategory = CATEGORIES[self.application][self.type][self.slug]
except KeyError:
staticcategory = None
return staticcategory
@classmethod
def from_static_category(cls, static_category, save=False):
"""Return a Category instance created from a StaticCategory.
Does not save it into the database by default. Useful in tests."""
# We need to drop description and name - they are StaticCategory
# properties not present in the database.
data = dict(static_category.__dict__)
del data['name']
del data['description']
if save:
category, _ = Category.objects.get_or_create(
id=static_category.id, defaults=data)
return category
else:
return cls(**data)
class Preview(BasePreview, ModelBase):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(
Addon, related_name='previews', on_delete=models.CASCADE)
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = JSONField(default=dict)
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
indexes = [
models.Index(fields=('addon',), name='addon_id'),
models.Index(fields=('addon', 'position', 'created'),
name='addon_position_created_idx'),
]
dbsignals.pre_save.connect(save_signal, sender=Preview,
dispatch_uid='preview_translations')
models.signals.post_delete.connect(Preview.delete_preview_files,
sender=Preview,
dispatch_uid='delete_preview_files')
class AppSupport(ModelBase):
"""Cache to tell us if an add-on's current version supports an app."""
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min = models.BigIntegerField("Minimum app version", null=True)
max = models.BigIntegerField("Maximum app version", null=True)
class Meta:
db_table = 'appsupport'
indexes = [
models.Index(fields=('addon', 'app', 'min', 'max'),
name='minmax_idx'),
models.Index(fields=('app',), name='app_id_refs_id_481ce338'),
]
constraints = [
models.UniqueConstraint(fields=('addon', 'app'),
name='addon_id'),
]
class DeniedSlug(ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_denied_slug'
def __str__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
class FrozenAddon(models.Model):
"""Add-ons in this table never get a hotness score."""
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
class Meta:
db_table = 'frozen_addons'
def __str__(self):
return 'Frozen: %s' % self.addon_id
@receiver(dbsignals.post_save, sender=FrozenAddon)
def freezer(sender, instance, **kw):
# Adjust the hotness of the FrozenAddon.
if instance.addon_id:
Addon.objects.get(id=instance.addon_id).update(hotness=0)
class ReplacementAddon(ModelBase):
guid = models.CharField(max_length=255, unique=True, null=True)
path = models.CharField(max_length=255, null=True,
help_text=_('Addon and collection paths need to '
'end with "/"'))
class Meta:
db_table = 'replacement_addons'
@staticmethod
def path_is_external(path):
return urlsplit(path).scheme in ['http', 'https']
def has_external_url(self):
return self.path_is_external(self.path)
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The addon is being loaded from a fixure.
return
if kw.get('created'):
track_addon_status_change(instance)
models.signals.post_save.connect(track_new_status,
sender=Addon,
dispatch_uid='track_new_addon_status')
@Addon.on_change
def track_status_change(old_attr=None, new_attr=None, **kw):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_addon_status_change(kw['instance'])
def track_addon_status_change(addon):
statsd.incr('addon_status_change.all.status_{}'
.format(addon.status))
class AddonGUID(ModelBase):
"""
Addons + guids will be added to this table whenever an addon is created.
For deleted addons it will contain an fk to the Addon instance even after
Addon.guid has been set to null (i.e. when it's been reuploaded).
"""
guid = models.CharField(max_length=255, null=False, db_index=True)
addon = models.OneToOneField(
Addon, null=False, on_delete=models.CASCADE, unique=True)
hashed_guid = models.CharField(max_length=64, null=False)
class Meta:
db_table = 'addons_reusedguid'
def save(self, *args, **kwargs):
self.hashed_guid = hashlib.sha256(self.guid.encode()).hexdigest()
super().save(*args, **kwargs)
| bsd-3-clause |
jorge2703/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
vigilv/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
hojonathanho/cgt | examples/broken/mnist_torchstyle.py | 22 | 3157 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original', data_home='~/cgt/data') # XXX
print(mnist.data.shape)
print(mnist.target.shape)
np.unique(mnist.target)
#plt.imshow(mnist.data[1, :].reshape(28, 28))
#plt.show()
# do some preprocessing
X = mnist.data
y = mnist.target
X = X.astype('float64')
X = X / 255
# train-test split (as [Joachims, 2006])
# TODO can define own validation split...
n_train = 60000
X_train = X[:n_train, :]
X_test = X[n_train:, :]
y_train = y[:n_train]
y_test = y[n_train:]
# construct the network
import nn
import cgt
from opt import sgd_update
N_LAYERS = 2
hid_size = X.shape[1] # 28 * 28
out_size = 10
inps = [cgt.matrix(dtype=cgt.floatX)]
param_list = []
for k in xrange(N_LAYERS):
tmp = nn.Affine(hid_size, hid_size)#(inps[k])
param_list.extend([tmp.weight, tmp.bias])
inps.append(cgt.tanh(tmp(inps[k])))
tmp = nn.Affine(hid_size, out_size)
param_list.extend([tmp.weight, tmp.bias])
logprobs = nn.logsoftmax(tmp(inps[-1]))
#dnn = nn.Module(inps[0:1], [logprobs])
#params = dnn.get_parameters()
# XXX think should just make this part of get_parameters
theta = nn.setup_contiguous_storage(param_list)
# XXX initialize
theta[:] = np.random.uniform(-0.08, 0.08, theta.shape)
# XXX taken from other demo, move
def ind2onehot(inds, n_cls):
out = np.zeros(list(inds.shape)+[n_cls,], cgt.floatX)
for k in xrange(inds.shape[0]):
out[k, inds[k].astype('int32')] = 1
#out.flat[np.arange(inds.size)*n_cls + inds.ravel()] = 1
return out
b_size = 25
def make_loss_and_grad(net):
X_b = inps[0] #cgt.matrix(dtype=cgt.floatX)
y_onehot = cgt.matrix(dtype='i4')
outputs = [logprobs]
loss = nn.crossent(outputs[0], y_onehot) / b_size
#gradloss = cgt.grad(loss, params)
gradloss = cgt.grad(loss, param_list)
# XXX use flatcat function
grad = cgt.concatenate([x.flatten() for x in gradloss])
#grad = gradloss
return cgt.make_function([X_b, y_onehot], [loss, grad, logprobs])
f_loss_and_grad = make_loss_and_grad(None)
# train loop
# shuffle data
perm = np.random.permutation(np.arange(X_train.shape[0]))
X_train = X_train[perm, :]
y_train = y_train[perm]
class Table(object):
pass
state = Table()
state.theta = theta
state.step_size = 0.1
exploss = None
for k in xrange(X_train.shape[0] / b_size):
X_batch, y_batch = X_train[k*b_size:(k+1)*b_size, :], y_train[k*b_size:(k+1)*b_size]
loss, grad, logprobs = f_loss_and_grad(X_batch, ind2onehot(y_batch, 10))
exploss = loss if k == 0 else 0.99*exploss + 0.01*loss
print('iter %d, loss %f, exploss %f' % (k + 1, loss, exploss))
sgd_update(state, grad)
# test code
correct = 0
total = 0
print(X_test.shape)
print(y_test.shape)
for k in xrange(X_test.shape[0] / b_size):
X_batch, y_batch = X_test[k*b_size:(k+1)*b_size, :], y_test[k*b_size:(k+1)*b_size]
loss, grad, logprobs = f_loss_and_grad(X_batch, ind2onehot(y_batch, 10))
preds = logprobs.argmax(axis=1).flatten()
correct = correct + (preds == y_batch).sum()
total = total + b_size
print('%d/%d correct', correct, total)
| mit |
ericmckean/namebench | nb_third_party/dns/rdtypes/ANY/CNAME.py | 248 | 1092 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class CNAME(dns.rdtypes.nsbase.NSBase):
"""CNAME record
Note: although CNAME is officially a singleton type, dnspython allows
non-singleton CNAME rdatasets because such sets have been commonly
used by BIND and other nameservers for load balancing."""
pass
| apache-2.0 |
ibenes/speaker-oblivious-bottlenecks | plotting.py | 1 | 1980 | import numpy as np
import torch
from torch.autograd import Variable
import atexit
import matplotlib
import matplotlib.pyplot as plt
class Plotter():
def __init__(self, no_plot):
self._no_plot = no_plot
if self._no_plot:
return
atexit.register(plt.show)
self._cmap = matplotlib.colors.ListedColormap([
(1, 0, 0),
(0, 1, 0),
(0, 0, 1)
])
def plot(self, X, phn, spk, name="fig", transform=lambda x: x):
if self._no_plot:
return self.last_axes_boundaries()
plt.figure(name)
for i, m in enumerate(['o', '+', 'x']):
mask = (spk.numpy() == i)
spk_set = X.numpy()[mask]
spk_set = Variable(torch.from_numpy(spk_set).float())
spk_set = transform(spk_set).data.numpy()
plt.scatter(spk_set[:, 0], spk_set[:, 1],
c=phn.numpy()[mask], cmap=self._cmap, marker=m)
self._show_plot()
return self.last_axes_boundaries()
def plot_preds(self, name, X, y, colors):
if self._no_plot:
return self.last_axes_boundaries()
plt.figure(name)
plt.scatter(X.numpy()[:, 0], X.numpy()[:, 1], c=colors)
self._show_plot()
return self.last_axes_boundaries()
def last_axes_boundaries(self):
axes = plt.gca()
ymin, ymax = axes.get_ylim()
xmin, xmax = axes.get_xlim()
return (xmin, ymin), (xmax, ymax)
def _show_plot(self):
plt.show(block=False)
plt.pause(0.05)
def plot_preds(plotter, name, b_l, u_r,
classifier, nb_steps=100):
X = np.mgrid[b_l[0]:u_r[0]:(u_r[0]-b_l[0])/nb_steps,
b_l[1]:u_r[1]:(u_r[1]-b_l[1])/nb_steps]
X = X.reshape(2, -1).T
X = torch.from_numpy(X).float()
print(X.size())
y = classifier(Variable(X))
colors = torch.exp(y).data.numpy()
plotter.plot_preds(name, X, y, colors)
| apache-2.0 |
vigilv/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
jmikko/EasyMKL | Python/toytest_EasyMKL.py | 1 | 3068 | """
@author: Michele Donini
@email: mdonini@math.unipd.it
Toy test of the algorithm EasyMKL.py.
"""
# Test:
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import roc_auc_score
from sklearn.datasets import make_classification
from EasyMKL import EasyMKL
from komd import KOMD
from cvxopt import matrix
import numpy as np
import matplotlib.pyplot as plt
# Binary classification problem
random_state = np.random.RandomState(0)
X, Y = make_classification(n_samples=1000,
n_features=50,
n_informative=10,
n_redundant=10,
n_repeated=10,
n_classes=2,
n_clusters_per_class=5,
weights=None,
flip_y=0.0,
class_sep=0.5,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=random_state)
X = matrix(X)
Y = matrix([1.0 if y>0 else -1.0 for y in Y])
# Train & Test:
pertr = 90
idtrain = range(0,len(Y) * pertr / 100)
idtest = range(len(Y) * pertr / 100,len(Y))
Ytr = Y[idtrain]
Yte = Y[idtest]
# Selected features for each weak kernel:
featlist = [[random_state.randint(0,X.size[1]) for i in range(5)] for j in range(50)]
# Generation of the weak Kernels:
klist = [rbf_kernel(X[:,f], gamma = 0.1) for f in featlist]
klisttr = [matrix(k)[idtrain,idtrain] for k in klist]
klistte = [matrix(k)[idtest,idtrain] for k in klist]
# EasyMKL initialization:
l = 0.5 # lambda
easy = EasyMKL(lam=l, tracenorm = True)
easy.train(klisttr,Ytr)
# Evaluation:
rtr = roc_auc_score(np.array(Ytr),np.array(easy.rank(klisttr)))
print 'AUC EasyMKL train:',rtr
ranktest = np.array(easy.rank(klistte))
rte = roc_auc_score(np.array(Yte),ranktest)
print 'AUC EasyMKL test:',rte
print 'weights of kernels:', easy.weights
# Comparison with respect the single kernels:
print '\n\n\n\n\nSingle kernel analisys using KOMD:'
YYtr = matrix(np.diag(list(Ytr)))
for idx,f in enumerate(featlist):
classifier = KOMD(lam=l, Kf = 'rbf', rbf_gamma = 0.1)
y_score = classifier.fit(X[idtrain,f], Ytr).decision_function(X[idtest,f])
print 'K with features:',f,'AUC test:',roc_auc_score(np.array(Yte), np.array(y_score))
print '\t\t margin train: \t\t',(easy.gamma.T * YYtr * matrix(klist[idx])[idtrain,idtrain] * YYtr * easy.gamma)[0]
print '\t\t weight assigned: \t',easy.weights[idx]
# Some (not so useful) images, only if the X.size[1]==2 (2 dimensional datasets):
PLOT_THE_CLASS = True
ranktestnorm = [ (2 * (r - np.min(ranktest))) / (np.max(ranktest) - np.min(ranktest)) - 1.0 for r in ranktest]
if PLOT_THE_CLASS and X.size[1] == 2:
plt.figure(1)
plt.scatter(X[idtrain, 0], X[idtrain, 1], marker='*', s = 140, c=Ytr, cmap='spring')
plt.scatter(X[idtest, 0], X[idtest, 1], marker='o', s = 180, c=ranktestnorm, cmap='spring')
plt.colorbar()
| gpl-3.0 |
ewulczyn/talk_page_abuse | src/modeling/baselines.py | 1 | 7830 | import numpy as np
import pandas as pd
from scipy.stats import pearsonr,spearmanr
from scipy.stats import entropy as kl
from sklearn.metrics import roc_auc_score, f1_score, mean_squared_error
from math import sqrt
import os
import multiprocessing as mp
def get_annotator_ensemble_baseline(annotations, k, agg_function, eval_function, n_t, n_p):
assert(n_t + n_p <=k)
np.random.seed()
annotations = annotations.dropna()
groups = annotations.groupby(annotations.index)
groups = [e[1] for e in groups if e[1].shape[0]>=k]
d_ts = []
d_ps = []
for g in groups:
g = g.iloc[np.random.permutation(len(g))]
d_ts.append(g[0:n_t])
d_ps.append(g[n_t:(n_t+n_p)])
d_t = pd.concat(d_ts)
d_p = pd.concat(d_ps)
scores_t = agg_function(d_t).values
scores_p = agg_function(d_p).values
return {'score' : eval_function(scores_t, scores_p), 'n_t' : n_t, 'n_p': n_p }
def get_annotator_ensemble_baseline_helper(args):
return get_annotator_ensemble_baseline(*args)
def get_annotator_ensemble_baselines_parallel(args_list, n_jobs = 8):
"""
Run function in parallel with args in args_list, function must return dict of results.
"""
p = mp.Pool(min(n_jobs, len(args_list)))
res = p.map(get_annotator_ensemble_baseline_helper, args_list)
p.close()
p.join()
#res = [f(args) for args in args_list]
return pd.DataFrame(res)
def get_model_baseline(model_predictions, annotations, k, agg_function, eval_function, n_t):
"""
"""
assert(n_t <= k)
np.random.seed()
annotations = annotations.dropna()
groups = annotations.groupby(annotations.index)
groups = [e[1] for e in groups if e[1].shape[0]>=k]
d_ts = []
for g in groups:
g = g.iloc[np.random.permutation(len(g))]
d_ts.append(g[0:n_t])
d_t = pd.concat(d_ts)
scores_t = agg_function(d_t)
model_predictions = model_predictions.loc[scores_t.index]
return {'score' : eval_function(scores_t.values, model_predictions.values), 'n_t' : n_t }
def get_model_baseline_helper(args):
return get_model_baseline(*args)
def get_model_baselines_parallel(args_list, n_jobs = 8):
"""
Run function in parallel with args in args_list, function must return dict of results.
"""
p = mp.Pool(min(n_jobs, len(args_list)))
res = p.map(get_model_baseline_helper, args_list)
p.close()
p.join()
#res = [f(args) for args in args_list]
return pd.DataFrame(res)
# Aggregation Functions
def average(l):
"""
Average all labels with the same rev_id
"""
s = l.groupby(l.index).mean()
s.name = 'y'
return s
def remove_na(l):
l['na'] = l['na'].fillna(value = False)
s = l.groupby(l.index).filter(lambda x: np.mean(x['na']) < 0.5)
return s
def plurality(l):
"""
Take the most common label from all labels with the same rev_id.
"""
s = l.groupby(l.index).apply(lambda x:x.value_counts().index[0])
s.name = 'y'
return s
def empirical_dist(l, w = 0.0, index = None):
"""
Compute empirical distribution over all classes
using all labels with the same rev_id
"""
if not index:
index = sorted(list(set(l.dropna().values)))
data = {}
for k, g in l.groupby(l.index):
data[k] = g.value_counts().reindex(index).fillna(0) + w
labels = pd.DataFrame(data).T
labels = labels.fillna(0)
labels = labels.div(labels.sum(axis=1), axis=0)
return labels
# Regression Evaluation Metrics
def pearson(x,y):
return pearsonr(x,y)[0]
def spearman(x,y):
return spearmanr(x,y)[0]
def rmse(x,y):
return sqrt(mean_squared_error(x, y))
# Binary Classification Evaluation Metrics
def binary_roc_auc(true, pred):
true = (true > 0.5).astype(float)
return roc_auc_score(true, pred)
def binary_optimal_f1(true, pred, step = 1):
binary_true = (true > 0.5).astype(float)
ts = [np.percentile(pred, p) for p in np.arange(0, 101, step)]
f1s = []
for t in ts:
y_pred_t = pred >= t
f1 = f1_score(binary_true, y_pred_t)
# Note F1 should have a parabolic shape, so no need to continue when the score starts falling
if len(f1s) > 0 and f1 < f1s[-1] :
return f1s[-1]
else:
f1s.append(f1)
return f1s[-1]
# Multi-Class Classification Evaluation Metrics
def one_hot(y):
m = y.shape[0]
if len(y.shape) == 1:
n = len(set(y.ravel()))
idxs = y.astype(int)
else:
idxs = y.argmax(axis = 1)
n = y.shape[1]
y_oh = np.zeros((m, n))
y_oh[list(range(m)), idxs] = 1
return y_oh
def expectation(y):
classes = np.arange(y.shape[1])
return y.dot(classes)
def multi_class_roc_auc(true, pred, average = 'macro'):
true = one_hot(true)
#print(true)
return roc_auc_score(true, pred, average = average)
def multi_class_spearman(true, pred):
return spearman(expectation(true), expectation(pred))
def multi_class_pearson(true, pred):
return pearson(expectation(true), expectation(pred))
def cross_entropy(x, y):
logy = np.log(y)
logy[np.isinf(logy)] = 0
return - np.multiply(x,logy).sum(axis=1).mean()
def kl_divergence(x, y):
return kl(x.T, y.T).mean()
def tidy_labels(d):
classes = ['not_attack', 'other', 'quoting', 'recipient', 'third_party']
for e in classes:
d[e] = d.is_harassment_or_attack.str.contains(e).astype(float)
d['attack'] = d.is_harassment_or_attack.str.contains('|'.join(classes[1:])).astype(float)
return d
def map_aggression_score_to_2class(l):
if l<0.0:
return 1
if l >= 0.0:
return 0
def load_comments_and_labels(task):
base_path = '../../data/annotations/split'
splits = ['train', 'dev', 'test', 'baseline']
nss = ['user', 'article']
samples = ['blocked', 'random']
dfs = {}
for split in splits:
path = os.path.join(base_path, split, 'annotations.tsv')
df = pd.read_csv(path, sep = '\t')
#print(df.shape)
#print(len(df['rev_id'].unique()))
df.index = df.rev_id
dfs[split] = df
data = {}
for ns in nss:
data[ns] = {}
for sample in samples:
data[ns][sample] = {}
for split in splits:
data[ns][sample][split] = {'x':{}, 'y':{}}
df = dfs[split].query("ns=='%s' and sample=='%s'" % (ns, sample))
comments = df.drop_duplicates(subset='rev_id')['clean_diff']
#print(comments.shape)
labels = df[task]
data[ns][sample][split]['x']['comments'] = comments
ed = empirical_dist(labels)
data[ns][sample][split]['y']['empirical_dist'] = ed
data[ns][sample][split]['y']['one_hot'] = ed.apply(lambda x: (x > (1.0 / ed.shape[1])).astype(int))
weights = pd.Series(ed.columns, index=ed.columns)
data[ns][sample][split]['y']['average'] = (ed * weights).sum(1)
data[ns][sample][split]['y']['plurality'] = ed.idxmax(axis = 1)
return data
def assemble_data(data, xtype, ytype, nss = ['user', 'article'], samples = ['random', 'blocked'], splits = ['train', 'dev', 'test']):
xs = []
ys = []
for ns in nss:
for sample in samples:
for split in splits:
x = data[ns][sample][split]['x'][xtype]
#print(x.shape)
y = data[ns][sample][split]['y'][ytype]
#print(y.shape)
x = x.loc[y.index]
#print(x.shape)
xs.append(x)
ys.append(y)
x = pd.concat(xs).values
#print(x.shape)
y = pd.concat(ys).values
#print(y.shape)
return x, y
| apache-2.0 |
neurodata/ndstore | django/nduser/migrations/0001_initial.py | 2 | 7527 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Backup',
fields=[
('backup_id', models.AutoField(serialize=False, primary_key=True)),
('protocol', models.CharField(max_length=255, choices=[(b'local', b'file system'), (b's3', b'Amazon S3')])),
('filename', models.CharField(max_length=4096)),
('jsonfile', models.CharField(max_length=4096)),
('description', models.CharField(default=b'', max_length=4096)),
('datetimestamp', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(default=0, choices=[(0, b'Done'), (1, b'Processing'), (2, b'Failed')])),
],
options={
'db_table': 'backups',
'managed': True,
},
),
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('channel_name', models.CharField(max_length=255)),
('channel_description', models.CharField(max_length=4096, blank=True)),
('channel_type', models.CharField(max_length=255, choices=[(b'image', b'IMAGES'), (b'annotation', b'ANNOTATIONS'), (b'timeseries', b'TIMESERIES')])),
('resolution', models.IntegerField(default=0)),
('propagate', models.IntegerField(default=0, choices=[(0, b'NOT PROPAGATED'), (2, b'PROPAGATED')])),
('channel_datatype', models.CharField(max_length=255, choices=[(b'uint8', b'uint8'), (b'uint16', b'uint16'), (b'uint32', b'uint32'), (b'uint64', b'uint64'), (b'float32', b'float32')])),
('readonly', models.IntegerField(default=0, choices=[(1, b'Yes'), (0, b'No')])),
('exceptions', models.IntegerField(default=0, choices=[(1, b'Yes'), (0, b'No')])),
('startwindow', models.IntegerField(default=0)),
('endwindow', models.IntegerField(default=0)),
('default', models.BooleanField(default=False)),
('header', models.CharField(default=b'', max_length=8192, blank=True)),
],
options={
'db_table': 'channels',
'managed': True,
},
),
migrations.CreateModel(
name='Dataset',
fields=[
('dataset_name', models.CharField(max_length=255, serialize=False, verbose_name=b'Name of the Image dataset', primary_key=True)),
('dataset_description', models.CharField(max_length=4096, blank=True)),
('public', models.IntegerField(default=0, choices=[(0, b'Private'), (1, b'Public')])),
('ximagesize', models.IntegerField()),
('yimagesize', models.IntegerField()),
('zimagesize', models.IntegerField()),
('xoffset', models.IntegerField(default=0)),
('yoffset', models.IntegerField(default=0)),
('zoffset', models.IntegerField(default=0)),
('xvoxelres', models.FloatField(default=1.0)),
('yvoxelres', models.FloatField(default=1.0)),
('zvoxelres', models.FloatField(default=1.0)),
('scalingoption', models.IntegerField(default=0, choices=[(0, b'Z Slices'), (1, b'Isotropic')])),
('scalinglevels', models.IntegerField(default=0)),
('starttime', models.IntegerField(default=0)),
('endtime', models.IntegerField(default=0)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'datasets',
'managed': True,
},
),
migrations.CreateModel(
name='Project',
fields=[
('project_name', models.CharField(max_length=255, serialize=False, primary_key=True)),
('project_description', models.CharField(max_length=4096, blank=True)),
('public', models.IntegerField(default=0, choices=[(0, b'Private'), (1, b'Public')])),
('host', models.CharField(default=b'localhost', max_length=255, choices=[(b'dsp061.pha.jhu.edu', b'default'), (b'dsp061.pha.jhu.edu', b'dsp061'), (b'dsp062.pha.jhu.edu', b'dsp062'), (b'dsp063.pha.jhu.edu', b'dsp063'), (b'localhost', b'Debug')])),
('kvengine', models.CharField(default=b'MySQL', max_length=255, choices=[(b'MySQL', b'MySQL'), (b'Cassandra', b'Cassandra'), (b'Riak', b'Riak')])),
('kvserver', models.CharField(default=b'localhost', max_length=255, choices=[(b'dsp061.pha.jhu.edu', b'default'), (b'dsp061.pha.jhu.edu', b'dsp061'), (b'dsp062.pha.jhu.edu', b'dsp062'), (b'dsp063.pha.jhu.edu', b'dsp063'), (b'localhost', b'Debug')])),
('nd_version', models.CharField(default=b'0.6', max_length=255)),
('schema_version', models.CharField(default=b'0.6', max_length=255)),
('dataset', models.ForeignKey(to='nduser.Dataset')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'projects',
'managed': True,
},
),
migrations.CreateModel(
name='Token',
fields=[
('token_name', models.CharField(max_length=255, serialize=False, primary_key=True)),
('token_description', models.CharField(max_length=4096, blank=True)),
('public', models.IntegerField(default=0, choices=[(0, b'Private'), (1, b'Public')])),
('project', models.ForeignKey(to='nduser.Project')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'tokens',
'managed': True,
},
),
migrations.CreateModel(
name='NIFTIHeader',
fields=[
('channel', models.OneToOneField(primary_key=True, serialize=False, to='nduser.Channel')),
('header', models.BinaryField(max_length=1024)),
('affine', models.BinaryField(max_length=1024)),
],
options={
'db_table': 'nifti_header',
'managed': True,
},
),
migrations.AddField(
model_name='channel',
name='project',
field=models.ForeignKey(to='nduser.Project'),
),
migrations.AddField(
model_name='backup',
name='channel',
field=models.ForeignKey(blank=True, to='nduser.Channel', null=True),
),
migrations.AddField(
model_name='backup',
name='project',
field=models.ForeignKey(to='nduser.Project'),
),
migrations.AlterUniqueTogether(
name='channel',
unique_together=set([('project', 'channel_name')]),
),
migrations.AlterUniqueTogether(
name='backup',
unique_together=set([('project', 'datetimestamp')]),
),
]
| apache-2.0 |
wummel/linkchecker | third_party/miniboa-r42/miniboa/async.py | 9 | 6849 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# miniboa/async.py
# Copyright 2009 Jim Storch
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#------------------------------------------------------------------------------
"""
Handle Asynchronous Telnet Connections.
"""
import socket
import select
import sys
from miniboa.telnet import TelnetClient
from miniboa.error import BogConnectionLost
## Cap sockets to 512 on Windows because winsock can only process 512 at time
if sys.platform == 'win32':
MAX_CONNECTIONS = 512
## Cap sockets to 1000 on Linux because you can only have 1024 file descriptors
else:
MAX_CONNECTIONS = 1000
#-----------------------------------------------------Dummy Connection Handlers
def _on_connect(client):
"""
Placeholder new connection handler.
"""
print "++ Opened connection to %s, sending greeting..." % client.addrport()
client.send("Greetings from Miniboa! "
" Now it's time to add your code.\n")
def _on_disconnect(client):
"""
Placeholder lost connection handler.
"""
print "-- Lost connection to %s" % client.addrport()
#-----------------------------------------------------------------Telnet Server
class TelnetServer(object):
"""
Poll sockets for new connections and sending/receiving data from clients.
"""
def __init__(self, port=7777, host='', on_connect=_on_connect,
on_disconnect=_on_disconnect, timeout=0.005):
"""
Create a new Telnet Server.
port -- Port to listen for new connection on. On UNIX-like platforms,
you made need root access to use ports under 1025.
host -- Address of the LOCAL network interface to listen on. You
can usually leave this blank unless you want to restrict traffic
to a specific network device. This will usually NOT be the same
as the Internet address of your server.
on_connect -- function to call with new telnet connections
on_disconnect -- function to call when a client's connection dies,
either through a terminated session or client.active being set
to False.
timeout -- amount of time that Poll() will wait from user inport
before returning. Also frees a slice of CPU time.
"""
self.port = port
self.host = host
self.on_connect = on_connect
self.on_disconnect = on_disconnect
self.timeout = timeout
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((host, port))
self.address = server_socket.getsockname()
server_socket.listen(5)
self.server_socket = server_socket
self.server_fileno = server_socket.fileno()
## Dictionary of active clients,
## key = file descriptor, value = TelnetClient (see miniboa.telnet)
self.clients = {}
def client_count(self):
"""
Returns the number of active connections.
"""
return len(self.clients)
def client_list(self):
"""
Returns a list of connected clients.
"""
return self.clients.values()
def poll(self):
"""
Perform a non-blocking scan of recv and send states on the server
and client connection sockets. Process new connection requests,
read incomming data, and send outgoing data. Sends and receives may
be partial.
"""
#print len(self.connections)
## Build a list of connections to test for receive data pending
recv_list = [self.server_fileno] # always add the server
for client in self.clients.values():
if client.active:
recv_list.append(client.fileno)
## Delete inactive connections from the dictionary
else:
#print "-- Lost connection to %s" % client.addrport()
#client.sock.close()
self.on_disconnect(client)
del self.clients[client.fileno]
## Build a list of connections that need to send data
send_list = []
for client in self.clients.values():
if client.send_pending:
send_list.append(client.fileno)
## Get active socket file descriptors from select.select()
try:
rlist, slist, elist = select.select(recv_list, send_list, [],
self.timeout)
except select.error, err:
## If we can't even use select(), game over man, game over
print >> sys.stderr, ("!! FATAL SELECT error '%d:%s'!"
% (err[0], err[1]))
sys.exit(1)
## Process socket file descriptors with data to recieve
for sock_fileno in rlist:
## If it's coming from the server's socket then this is a new
## connection request.
if sock_fileno == self.server_fileno:
try:
sock, addr_tup = self.server_socket.accept()
except socket.error, err:
print >> sys.stderr, ("!! ACCEPT error '%d:%s'." %
(err[0], err[1]))
continue
## Check for maximum connections
if self.client_count() >= MAX_CONNECTIONS:
print '?? Refusing new connection; maximum in use.'
sock.close()
continue
new_client = TelnetClient(sock, addr_tup)
#print "++ Opened connection to %s" % new_client.addrport()
## Add the connection to our dictionary and call handler
self.clients[new_client.fileno] = new_client
self.on_connect(new_client)
else:
## Call the connection's recieve method
try:
self.clients[sock_fileno].socket_recv()
except BogConnectionLost:
self.clients[sock_fileno].deactivate()
## Process sockets with data to send
for sock_fileno in slist:
## Call the connection's send method
self.clients[sock_fileno].socket_send()
| gpl-2.0 |
google-research/evoflow | benchmark/benchmark/problems/tsp.py | 1 | 3737 | import json
import tensorflow as tf
from collections import defaultdict
import numpy as np
import evoflow.backend as B
from evoflow.engine import EvoFlow
from evoflow.engine import FitnessFunction
from evoflow.selection import SelectFittest
from evoflow.population import uniform_population
from evoflow.ops import Input, Shuffle, Reverse1D
class TSPFitness(FitnessFunction):
def __init__(self,
distances,
num_cities,
baseline_distance=0,
penality=100000,
**kwargs):
"""
"""
self.num_cities = num_cities
self.distances = B.flatten(distances)
self.penality = penality
self.baseline_distance = int(baseline_distance)
super(TSPFitness, self).__init__(**kwargs)
def call(self, population, normalize=True):
"""
Parallel lookup and distance computation:
- multiply the tensor by population shifed by 1 which gives
the id to lookup in the flat
distance array
- reduce_sum for the total distance
- 1/reduce_sum so fitness goes from 0 to 1
"""
shifted_population = B.roll(population, 1, axis=1)
idxs = (population * self.num_cities) + shifted_population
distances = B.take(self.distances, idxs)
# total distance
total_distance = B.sum(distances, axis=1)
return total_distance
def tsp_setup(num_cities):
# get files
zip_fname = "tsp_%s.zip" % num_cities
origin = "https://storage.googleapis.com/evoflow/datasets/tsp/cities_%s.zip" % num_cities # noqa
download_path = tf.keras.utils.get_file(zip_fname, origin, extract=True)
# process city info
json_fname = "%s/cities_%s.json" % (download_path.replace(zip_fname,
''), num_cities)
cities = json.loads(open(json_fname).read())
idx2city = {}
for city in cities:
idx2city[city['idx']] = city
chart_data = defaultdict(list)
for city in cities:
chart_data['lat'].append(city['lat'])
chart_data['lon'].append(city['lon'])
chart_data['name'].append(city['name'])
chart_data['population'].append(city['population'])
distance_fname = "%sdistances_%s.npz" % (download_path.replace(
zip_fname, ''), num_cities)
distances = np.load(distance_fname)['distances']
distances = distances.astype(B.intx())
return cities, chart_data, distances, idx2city
def solve_tsp(args):
population_shape, generations, cities, chart_data, distances, idx2city = args # noqa
NUM_CITIES = len(cities)
NUM_REVERSE_OPERATIONS = 4
MAX_REVERSE_PROBABILITY = 0.3
REVERSE_POPULATION_FRACTION = 0.3
MIN_REVERSE_PROBABILITY = 0.1
SHUFFLE_POPULATION_FRACTION = 0.2
population = uniform_population(population_shape)
rpi = MAX_REVERSE_PROBABILITY / NUM_REVERSE_OPERATIONS
reverse_probabilty = 1 - rpi
# Evolution model
inputs = Input(shape=population.shape)
x = inputs
for idx in range(NUM_REVERSE_OPERATIONS):
x = Reverse1D(population_fraction=REVERSE_POPULATION_FRACTION,
max_reverse_probability=reverse_probabilty)(x)
reverse_probabilty = max(reverse_probabilty - rpi,
MIN_REVERSE_PROBABILITY)
x = Shuffle(population_fraction=SHUFFLE_POPULATION_FRACTION)(x)
outputs = x
ef = EvoFlow(inputs, outputs, debug=False)
evolution_strategy = SelectFittest(mode='min')
fitness_fn = TSPFitness(distances, NUM_CITIES)
ef.compile(evolution_strategy, fitness_fn)
ef.evolve(population, generations=generations, verbose=0)
| apache-2.0 |
Clyde-fare/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
vigilv/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 346 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
Clyde-fare/scikit-learn | examples/cluster/plot_affinity_propagation.py | 346 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
pazaan/decoding-contour-next-link | javaobj.py | 1 | 31574 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides functions for reading (writing is WIP currently) of Java
objects serialized by ObjectOutputStream. This form of object
representation is a standard data interchange format in Java world.
javaobj module exposes an API familiar to users of the standard modules
such as marshal, pickle and json.
See: http://download.oracle.com/javase/6/docs/platform/serialization/spec/protocol.html
"""
import io
import struct
try:
import logging
except ImportError:
def log_debug(message, ident=0):
pass
def log_error(message, ident=0):
pass
else:
_log = logging.getLogger(__name__)
def log_debug(message, ident=0):
_log.debug(" " * (ident * 2) + str(message))
def log_error(message, ident=0):
_log.error(" " * (ident * 2) + str(message))
__version__ = "$Revision: 20 $"
def load(file_object, *args):
"""
Deserializes Java primitive data and objects serialized by ObjectOutputStream
from a file-like object.
"""
marshaller = JavaObjectUnmarshaller(file_object)
for t in args:
marshaller.add_transformer(t)
marshaller.add_transformer(DefaultObjectTransformer())
return marshaller.readObject()
def load_all(file_object):
marshaller = JavaObjectUnmarshaller(file_object)
marshaller.add_transformer(DefaultObjectTransformer())
res = []
while marshaller.data_left:
res.append(marshaller.readObject())
return res
def loads(string, *args):
"""
Deserializes Java objects and primitive data serialized by ObjectOutputStream
from a string.
"""
f = io.StringIO(string)
marshaller = JavaObjectUnmarshaller(f)
for t in args:
marshaller.add_transformer(t)
marshaller.add_transformer(DefaultObjectTransformer())
return marshaller.readObject()
def dumps(object, *args):
"""
Serializes Java primitive data and objects unmarshaled by load(s) before into string.
"""
marshaller = JavaObjectMarshaller()
for t in args:
marshaller.add_transformer(t)
return marshaller.dump(object)
class JavaClass(object):
def __init__(self):
self.name = None
self.serialVersionUID = None
self.flags = None
self.handle = None
self.fields_names = []
self.fields_types = []
self.superclass = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return "[%s:0x%X]" % (self.name, self.serialVersionUID)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.name == other.name and
self.serialVersionUID == other.serialVersionUID and
self.flags == other.flags and
self.fields_names == other.fields_names and
self.fields_types == other.fields_types and
self.superclass == other.superclass)
class JavaObject(object):
def __init__(self):
self.classdesc = None
self.annotations = []
def get_class(self):
return self.classdesc
def __str__(self):
return self.__repr__()
def __repr__(self):
name = "UNKNOWN"
if self.classdesc:
name = self.classdesc.name
return "<javaobj:%s>" % name
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
res = (self.classdesc == other.classdesc and
self.annotations == other.annotations)
for name in self.classdesc.fields_names:
res = (res and
getattr(self, name) == getattr(other, name))
return res
def copy(self, new_object):
new_object.classdesc = self.classdesc
new_object.annotations = self.annotations
for name in self.classdesc.fields_names:
new_object.__setattr__(name, getattr(self, name))
class JavaString(str):
def __init__(self, *args, **kwargs):
str.__init__(self, *args, **kwargs)
def __eq__(self, other):
if not isinstance(other, str):
return False
return str.__eq__(self, other)
class JavaEnum(JavaObject):
def __init__(self, constant=None):
super(JavaEnum, self).__init__()
self.constant = constant
class JavaArray(list, JavaObject):
def __init__(self, classdesc=None):
list.__init__(self)
JavaObject.__init__(self)
self.classdesc = classdesc
class JavaObjectConstants:
STREAM_MAGIC = 0xaced
STREAM_VERSION = 0x05
TC_NULL = 0x70
TC_REFERENCE = 0x71
TC_CLASSDESC = 0x72
TC_OBJECT = 0x73
TC_STRING = 0x74
TC_ARRAY = 0x75
TC_CLASS = 0x76
TC_BLOCKDATA = 0x77
TC_ENDBLOCKDATA = 0x78
TC_RESET = 0x79
TC_BLOCKDATALONG = 0x7A
TC_EXCEPTION = 0x7B
TC_LONGSTRING = 0x7C
TC_PROXYCLASSDESC = 0x7D
TC_ENUM = 0x7E
TC_MAX = 0x7E
# classDescFlags
SC_WRITE_METHOD = 0x01 # if SC_SERIALIZABLE
SC_BLOCK_DATA = 0x08 # if SC_EXTERNALIZABLE
SC_SERIALIZABLE = 0x02
SC_EXTERNALIZABLE = 0x04
SC_ENUM = 0x10
# type definition chars (typecode)
TYPE_BYTE = 'B' # 0x42
TYPE_CHAR = 'C'
TYPE_DOUBLE = 'D' # 0x44
TYPE_FLOAT = 'F' # 0x46
TYPE_INTEGER = 'I' # 0x49
TYPE_LONG = 'J' # 0x4A
TYPE_SHORT = 'S' # 0x53
TYPE_BOOLEAN = 'Z' # 0x5A
TYPE_OBJECT = 'L' # 0x4C
TYPE_ARRAY = '[' # 0x5B
# list of supported typecodes listed above
TYPECODES_LIST = [
# primitive types
TYPE_BYTE,
TYPE_CHAR,
TYPE_DOUBLE,
TYPE_FLOAT,
TYPE_INTEGER,
TYPE_LONG,
TYPE_SHORT,
TYPE_BOOLEAN,
# object types
TYPE_OBJECT,
TYPE_ARRAY ]
BASE_REFERENCE_IDX = 0x7E0000
class JavaObjectUnmarshaller(JavaObjectConstants):
def __init__(self, stream=None):
self.opmap = {
self.TC_NULL: self.do_null,
self.TC_CLASSDESC: self.do_classdesc,
self.TC_OBJECT: self.do_object,
self.TC_STRING: self.do_string,
self.TC_LONGSTRING: self.do_string_long,
self.TC_ARRAY: self.do_array,
self.TC_CLASS: self.do_class,
self.TC_BLOCKDATA: self.do_blockdata,
self.TC_BLOCKDATALONG: self.do_blockdata_long,
self.TC_REFERENCE: self.do_reference,
self.TC_ENUM: self.do_enum,
self.TC_ENDBLOCKDATA: self.do_null, # note that we are reusing of do_null
}
self.current_object = None
self.reference_counter = 0
self.references = []
self.object_stream = stream
self._readStreamHeader()
self.object_transformers = []
self.data_left = True
def readObject(self):
try:
opcode, res = self._read_and_exec_opcode(ident=0) # TODO: add expects
position_bak = self.object_stream.tell()
the_rest = self.object_stream.read()
if len(the_rest):
log_error("Warning!!!!: Stream still has %s bytes left. Enable debug mode of logging to see the hexdump." % len(the_rest))
log_debug(self._create_hexdump(the_rest, position_bak))
self.data_left = True
else:
log_debug("Java Object unmarshalled succesfully!")
self.data_left = False
self.object_stream.seek(position_bak)
return res
except Exception as e:
self._oops_dump_state()
raise
def add_transformer(self, transformer):
self.object_transformers.append(transformer)
def _readStreamHeader(self):
(magic, version) = self._readStruct(">HH")
if magic != self.STREAM_MAGIC or version != self.STREAM_VERSION:
raise IOError("The stream is not java serialized object. Invalid stream header: %04X%04X" % (magic, version))
def _read_and_exec_opcode(self, ident=0, expect=None):
position = self.object_stream.tell()
(opid, ) = self._readStruct(">B")
log_debug("OpCode: 0x%X (at offset: 0x%X)" % (opid, position), ident)
if expect and opid not in expect:
raise IOError("Unexpected opcode 0x%X" % opid)
handler = self.opmap.get(opid)
if not handler:
raise RuntimeError("Unknown OpCode in the stream: 0x%x" % opid)
return (opid, handler(ident=ident))
def _readStruct(self, unpack):
length = struct.calcsize(unpack)
ba = self.object_stream.read(length)
if len(ba) != length:
raise RuntimeError("Stream has been ended unexpectedly while unmarshaling. (%d vs %d)" % (len(ba), length))
return struct.unpack(unpack, ba)
def _readString(self, mod="H"):
(length, ) = self._readStruct(">" + mod)
ba = self.object_stream.read(length)
return ba
def do_classdesc(self, parent=None, ident=0):
# TC_CLASSDESC className serialVersionUID newHandle classDescInfo
# classDescInfo:
# classDescFlags fields classAnnotation superClassDesc
# classDescFlags:
# (byte) // Defined in Terminal Symbols and Constants
# fields:
# (short)<count> fieldDesc[count]
# fieldDesc:
# primitiveDesc
# objectDesc
# primitiveDesc:
# prim_typecode fieldName
# objectDesc:
# obj_typecode fieldName className1
clazz = JavaClass()
log_debug("[classdesc]", ident)
ba = self._readString()
clazz.name = ba
log_debug("Class name: %s" % ba, ident)
(serialVersionUID, newHandle, classDescFlags) = self._readStruct(">LLB")
clazz.serialVersionUID = serialVersionUID
clazz.flags = classDescFlags
clazz.handle = newHandle
self._add_reference(clazz, ident)
log_debug("Serial: 0x%X newHandle: 0x%X. classDescFlags: 0x%X" % (serialVersionUID, newHandle, classDescFlags), ident)
(length, ) = self._readStruct(">H")
log_debug("Fields num: 0x%X" % length, ident)
clazz.fields_names = []
clazz.fields_types = []
for fieldId in range(length):
(typecode, ) = self._readStruct(">B")
field_name = self._readString()
field_type = None
field_type = self._convert_char_to_type(typecode)
if field_type == self.TYPE_ARRAY:
opcode, field_type = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE])
assert type(field_type) is JavaString
# if field_type is not None:
# field_type = "array of " + field_type
# else:
# field_type = "array of None"
elif field_type == self.TYPE_OBJECT:
opcode, field_type = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE])
assert type(field_type) is JavaString
log_debug("FieldName: 0x%X" % typecode + " " + str(field_name) + " " + str(field_type), ident)
assert field_name is not None
assert field_type is not None
clazz.fields_names.append(field_name)
clazz.fields_types.append(field_type)
if parent:
parent.__fields = clazz.fields_names
parent.__types = clazz.fields_types
# classAnnotation
(opid, ) = self._readStruct(">B")
log_debug("OpCode: 0x%X" % opid, ident)
if opid != self.TC_ENDBLOCKDATA:
raise NotImplementedError("classAnnotation isn't implemented yet")
# superClassDesc
opcode, superclassdesc = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_NULL, self.TC_REFERENCE])
log_debug(str(superclassdesc), ident)
clazz.superclass = superclassdesc
return clazz
def do_blockdata(self, parent=None, ident=0):
# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]
log_debug("[blockdata]", ident)
(length, ) = self._readStruct(">B")
ba = self.object_stream.read(length)
return ba
def do_blockdata_long(self, parent=None, ident=0):
# TC_BLOCKDATALONG (int)<size> (byte)[size]
log_debug("[blockdata]", ident)
(length, ) = self._readStruct(">I")
ba = self.object_stream.read(length)
return ba
def do_class(self, parent=None, ident=0):
# TC_CLASS classDesc newHandle
log_debug("[class]", ident)
# TODO: what to do with "(ClassDesc)prevObject". (see 3rd line for classDesc:)
opcode, classdesc = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE])
log_debug("Classdesc: %s" % classdesc, ident)
self._add_reference(classdesc, ident)
return classdesc
def do_object(self, parent=None, ident=0):
# TC_OBJECT classDesc newHandle classdata[] // data for each class
java_object = JavaObject()
log_debug("[object]", ident)
log_debug("java_object.annotations just after instantination: " + str(java_object.annotations), ident)
# TODO: what to do with "(ClassDesc)prevObject". (see 3rd line for classDesc:)
opcode, classdesc = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE])
# self.TC_REFERENCE hasn't shown in spec, but actually is here
self._add_reference(java_object, ident)
# classdata[]
# Store classdesc of this object
java_object.classdesc = classdesc
if classdesc.flags & self.SC_EXTERNALIZABLE and not classdesc.flags & self.SC_BLOCK_DATA:
raise NotImplementedError("externalContents isn't implemented yet") # TODO:
if classdesc.flags & self.SC_SERIALIZABLE:
# create megalist
tempclass = classdesc
megalist = []
megatypes = []
while tempclass:
log_debug(">>> " + str(tempclass.fields_names) + " " + str(tempclass), ident)
log_debug(">>> " + str(tempclass.fields_types), ident)
fieldscopy = tempclass.fields_names[:]
fieldscopy.extend(megalist)
megalist = fieldscopy
fieldscopy = tempclass.fields_types[:]
fieldscopy.extend(megatypes)
megatypes = fieldscopy
tempclass = tempclass.superclass
log_debug("Values count: %s" % str(len(megalist)), ident)
log_debug("Prepared list of values: %s" % str(megalist), ident)
log_debug("Prepared list of types: %s" % str(megatypes), ident)
for field_name, field_type in zip(megalist, megatypes):
res = self._read_value(field_type, ident, name=field_name)
java_object.__setattr__(field_name, res)
if classdesc.flags & self.SC_SERIALIZABLE and classdesc.flags & self.SC_WRITE_METHOD or classdesc.flags & self.SC_EXTERNALIZABLE and classdesc.flags & self.SC_BLOCK_DATA:
# objectAnnotation
log_debug("java_object.annotations before: " + str(java_object.annotations), ident)
while opcode != self.TC_ENDBLOCKDATA:
opcode, obj = self._read_and_exec_opcode(ident=ident+1) # , expect=[self.TC_ENDBLOCKDATA, self.TC_BLOCKDATA, self.TC_OBJECT, self.TC_NULL, self.TC_REFERENCE])
if opcode != self.TC_ENDBLOCKDATA:
java_object.annotations.append(obj)
log_debug("objectAnnotation value: " + str(obj), ident)
log_debug("java_object.annotations after: " + str(java_object.annotations), ident)
# Transform object
for transformer in self.object_transformers:
tmp_object = transformer.transform(java_object)
if tmp_object is not java_object:
java_object = tmp_object
break
log_debug(">>> java_object: " + str(java_object), ident)
return java_object
def do_string(self, parent=None, ident=0):
log_debug("[string]", ident)
ba = JavaString(self._readString())
self._add_reference(ba, ident)
return ba
def do_string_long(self, parent=None, ident=0):
log_debug("[long string]", ident)
ba = JavaString(self._readString("Q"))
self._add_reference(ba, ident)
return ba
def do_array(self, parent=None, ident=0):
# TC_ARRAY classDesc newHandle (int)<size> values[size]
log_debug("[array]", ident)
opcode, classdesc = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE])
array = JavaArray(classdesc)
self._add_reference(array, ident)
(size, ) = self._readStruct(">i")
log_debug("size: " + str(size), ident)
type_char = classdesc.name[0]
assert type_char == self.TYPE_ARRAY
type_char = classdesc.name[1]
if type_char == self.TYPE_OBJECT or type_char == self.TYPE_ARRAY:
for i in range(size):
opcode, res = self._read_and_exec_opcode(ident=ident+1)
log_debug("Object value: %s" % str(res), ident)
array.append(res)
else:
for i in range(size):
res = self._read_value(type_char, ident)
log_debug("Native value: %s" % str(res), ident)
array.append(res)
return array
def do_reference(self, parent=None, ident=0):
(handle, ) = self._readStruct(">L")
log_debug("## Reference handle: 0x%x" % (handle), ident)
return self.references[handle - self.BASE_REFERENCE_IDX]
def do_null(self, parent=None, ident=0):
return None
def do_enum(self, parent=None, ident=0):
# TC_ENUM classDesc newHandle enumConstantName
enum = JavaEnum()
opcode, classdesc = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE])
enum.classdesc = classdesc
self._add_reference(enum, ident)
opcode, enumConstantName = self._read_and_exec_opcode(ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE])
enum.constant = enumConstantName
return enum
def _create_hexdump(self, src, start_offset=0, length=16):
FILTER = ''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
result = []
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X" % ord(x) for x in s])
printable = s.translate(FILTER)
result.append("%04X %-*s %s\n" % (i+start_offset, length*3, hexa, printable))
return ''.join(result)
def _read_value(self, field_type, ident, name = ""):
if len(field_type) > 1:
cls = field_type[1:]
field_type = field_type[0] # We don't need details for arrays and objects
if field_type == self.TYPE_BOOLEAN:
(val, ) = self._readStruct(">B")
res = bool(val)
elif field_type == self.TYPE_BYTE:
(res, ) = self._readStruct(">b")
elif field_type == self.TYPE_SHORT:
(res, ) = self._readStruct(">h")
elif field_type == self.TYPE_INTEGER:
(res, ) = self._readStruct(">i")
elif field_type == self.TYPE_LONG:
(res, ) = self._readStruct(">q")
elif field_type == self.TYPE_FLOAT:
(res, ) = self._readStruct(">f")
elif field_type == self.TYPE_DOUBLE:
(res, ) = self._readStruct(">d")
elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY:
try:
opcode, res = self._read_and_exec_opcode(ident=ident+1)
except RuntimeError:
if cls == 'java/lang/String;':
res = JavaString(self._readString())
else:
raise
else:
raise RuntimeError("Unknown typecode: %s" % field_type)
log_debug("* %s %s: " % (field_type, name) + str(res), ident)
return res
def _convert_char_to_type(self, type_char):
typecode = type_char
if type(type_char) is int:
typecode = chr(type_char)
if typecode in self.TYPECODES_LIST:
return typecode
else:
raise RuntimeError("Typecode %s (%s) isn't supported." % (type_char, typecode))
def _add_reference(self, obj, ident=0):
log_debug('## New reference handle 0x%X' % (len(self.references) + self.BASE_REFERENCE_IDX,), ident)
self.references.append(obj)
def _oops_dump_state(self):
log_error("==Oops state dump" + "=" * (30 - 17))
log_error("References: %s" % str(self.references))
log_error("Stream seeking back at -16 byte (2nd line is an actual position!):")
self.object_stream.seek(-16, 1)
position = self.object_stream.tell()
the_rest = self.object_stream.read()
if len(the_rest):
log_error(self._create_hexdump(the_rest, position))
log_error("=" * 30)
class JavaObjectMarshaller(JavaObjectConstants):
def __init__(self, stream=None):
self.object_stream = stream
self.object_transformers = []
def add_transformer(self, transformer):
self.object_transformers.append(transformer)
def dump(self, obj):
self.object_obj = obj
self.object_stream = io.StringIO()
self._writeStreamHeader()
self.writeObject(obj)
return self.object_stream.getvalue()
def _writeStreamHeader(self):
self._writeStruct(">HH", 4, (self.STREAM_MAGIC, self.STREAM_VERSION))
def writeObject(self, obj):
log_debug("Writing object of type " + str(type(obj)) + " " + str(obj))
if isinstance(obj, JavaArray):
self.write_array(obj)
elif isinstance(obj, JavaEnum):
self.write_enum(obj)
elif isinstance(obj, JavaObject):
self.write_object(obj)
elif isinstance(obj, JavaString):
self.write_string(obj)
elif isinstance(obj, JavaClass):
self.write_class(obj)
elif obj is None:
self.write_null()
elif type(obj) is str:
self.write_blockdata(obj)
else:
raise RuntimeError("Object serialization of type %s is not supported." % str(type(obj)))
def _writeStruct(self, unpack, length, args):
ba = struct.pack(unpack, *args)
self.object_stream.write(ba)
def _writeString(self, string):
length = len(string)
self._writeStruct(">H", 2, (length, ))
self.object_stream.write(string)
def write_string(self, obj):
self._writeStruct(">B", 1, (self.TC_STRING,))
self._writeString(obj)
def write_enum(self, obj):
self._writeStruct(">B", 1, (self.TC_ENUM, ))
self.write_classdesc(obj.get_class())
self.write_string(obj.constant)
def write_blockdata(self, obj, parent=None):
# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]
length = len(obj)
if length <= 256:
self._writeStruct(">B", 1, (self.TC_BLOCKDATA, ))
self._writeStruct(">B", 1, (length, ))
else:
self._writeStruct(">B", 1, (self.TC_BLOCKDATALONG, ))
self._writeStruct(">I", 1, (length, ))
self.object_stream.write(obj)
def write_null(self):
self._writeStruct(">B", 1, (self.TC_NULL, ))
def write_object(self, obj, parent=None):
# Transform object
for transformer in self.object_transformers:
tmp_object = transformer.transform(obj)
if tmp_object is not obj:
obj = tmp_object
break
self._writeStruct(">B", 1, (self.TC_OBJECT, ))
cls = obj.get_class()
self.write_classdesc(cls)
all_names = []
all_types = []
tmpcls = cls
while tmpcls:
all_names += tmpcls.fields_names
all_types += tmpcls.fields_types
tmpcls = tmpcls.superclass
del tmpcls
for name, type in zip(all_names, all_types):
try:
self._write_value(type, getattr(obj, name))
except AttributeError as e:
log_error("%s e, %s %s" % (str(e), repr(obj), repr(dir(obj))))
raise
del all_names, all_types
if (cls.flags & self.SC_SERIALIZABLE and cls.flags & self.SC_WRITE_METHOD or
cls.flags & self.SC_EXTERNALIZABLE and cls.flags & self.SC_BLOCK_DATA):
for annot in obj.annotations:
log_debug("Write annotation %s for %s" % (repr(annot), repr(obj),))
if annot == None:
self.write_null()
else:
self.writeObject(annot)
self._writeStruct('>B', 1, (self.TC_ENDBLOCKDATA,))
def write_class(self, obj, parent=None):
self._writeStruct(">B", 1, (self.TC_CLASS,))
self.write_classdesc(obj)
def write_classdesc(self, obj, parent=None):
self._writeStruct(">B", 1, (self.TC_CLASSDESC, ))
self._writeString(obj.name)
self._writeStruct(">LLB", 1, (obj.serialVersionUID, obj.handle, obj.flags))
self._writeStruct(">H", 1, (len(obj.fields_names), ))
for name,type in zip(obj.fields_names, obj.fields_types):
self._writeStruct(">B", 1,
(self._convert_type_to_char(type),))
self._writeString(name)
if type[0] in (self.TYPE_OBJECT, self.TYPE_ARRAY):
self.write_string(type)
self._writeStruct(">B", 1, (self.TC_ENDBLOCKDATA,))
if obj.superclass:
self.write_classdesc(obj.superclass)
else:
self.write_null()
def write_array(self, obj):
self._writeStruct(">B", 1, (self.TC_ARRAY,))
self.write_classdesc(obj.get_class())
self._writeStruct(">i", 1, (len(obj),))
classdesc = obj.get_class()
type_char = classdesc.name[0]
assert type_char == self.TYPE_ARRAY
type_char = classdesc.name[1]
if type_char == self.TYPE_OBJECT:
for o in obj:
self.write_object(o)
elif type_char == self.TYPE_ARRAY:
for a in obj:
self.write_array(a)
else:
log_debug("Write array of type %s" % type_char)
for v in obj:
self._write_value(type_char, v)
def _write_value(self, field_type, value):
if len(field_type) > 1:
field_type = field_type[0] # We don't need details for arrays and objects
if field_type == self.TYPE_BOOLEAN:
self._writeStruct(">B", 1, (1 if value else 0,))
elif field_type == self.TYPE_BYTE:
if value > 127:
self._writeStruct(">B", 1, (value,))
else:
self._writeStruct(">b", 1, (value,))
elif field_type == self.TYPE_SHORT:
self._writeStruct(">h", 1, (value,))
elif field_type == self.TYPE_INTEGER:
self._writeStruct(">i", 1, (value,))
elif field_type == self.TYPE_LONG:
self._writeStruct(">q", 1, (value,))
elif field_type == self.TYPE_FLOAT:
self._writeStruct(">f", 1, (value,))
elif field_type == self.TYPE_DOUBLE:
self._writeStruct(">d", 1, (value,))
elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY:
if value == None:
self.write_null()
elif isinstance(value, JavaEnum):
self.write_enum(value)
elif isinstance(value, JavaObject):
self.write_object(value)
elif isinstance(value, JavaString):
self.write_string(value)
elif isinstance(value, str):
self.write_blockdata(value)
else:
raise RuntimeError("Unknown typecode: %s" % field_type)
else:
raise RuntimeError("Unknown typecode: %s" % field_type)
def _convert_type_to_char(self, type_char):
typecode = type_char
if type(type_char) is int:
typecode = chr(type_char)
if typecode in self.TYPECODES_LIST:
return ord(typecode)
elif len(typecode) > 1:
if typecode[0] == 'L':
return ord(self.TYPE_OBJECT)
elif typecode[0] == '[':
return ord(self.TYPE_ARRAY)
raise RuntimeError("Typecode %s (%s) isn't supported." % (type_char, typecode))
class DefaultObjectTransformer(object):
class JavaList(list, JavaObject):
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
JavaObject.__init__(self)
class JavaMap(dict, JavaObject):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
JavaObject.__init__(self)
def transform(self, object):
if object.get_class().name == "java.util.ArrayList":
# * @serialData The length of the array backing the <tt>ArrayList</tt>
# * instance is emitted (int), followed by all of its elements
# * (each an <tt>Object</tt>) in the proper order.
#print "---"
#print "java.util.ArrayList"
#print object.annotations
#print "---"
new_object = self.JavaList()
object.copy(new_object)
new_object.extend(object.annotations[1:])
#print ">>> object:", new_object
return new_object
if object.get_class().name == "java.util.LinkedList":
#print "---"
#print
#print "java.util.LinkedList"
#print object.annotations
#print "---"
new_object = self.JavaList()
object.copy(new_object)
new_object.extend(object.annotations[1:])
#print ">>> object:", new_object
return new_object
if object.get_class().name == "java.util.HashMap":
#print "---"
#print
#print "java.util.HashMap"
#print object.annotations
#print "---"
new_object = self.JavaMap()
object.copy(new_object)
for i in range(1, len(object.annotations),2):
new_object[object.annotations[i]] = object.annotations[i+1]
#print ">>> object:", new_object
return new_object
return object
| bsd-3-clause |
vigilv/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
hfp/libxsmm | samples/deeplearning/sparse_training/dlrm/dlrm_data_pytorch.py | 19 | 39934 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the dlrm benchmark
# The inpts and outputs are generated according to the following three option(s)
# 1) random distribution
# 2) synthetic distribution, based on unique accesses and distances between them
# i) R. Hassan, A. Harris, N. Topham and A. Efthymiou "Synthetic Trace-Driven
# Simulation of Cache Memory", IEEE AINAM'07
# 3) public data set
# i) Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
# ii) Criteo Terabyte Dataset
# https://labs.criteo.com/2013/12/download-terabyte-click-logs
from __future__ import absolute_import, division, print_function, unicode_literals
# others
from os import path
import bisect
import collections
import data_utils
# numpy
import numpy as np
from numpy import random as ra
# pytorch
import torch
from torch.utils.data import Dataset, RandomSampler
import data_loader_terabyte
# Kaggle Display Advertising Challenge Dataset
# dataset (str): name of dataset (Kaggle or Terabyte)
# randomize (str): determines randomization scheme
# "none": no randomization
# "day": randomizes each day"s data (only works if split = True)
# "total": randomizes total dataset
# split (bool) : to split into train, test, validation data-sets
class CriteoDataset(Dataset):
def __init__(
self,
dataset,
max_ind_range,
sub_sample_rate,
randomize,
split="train",
raw_path="",
pro_data="",
memory_map=False
):
# dataset
# tar_fea = 1 # single target
den_fea = 13 # 13 dense features
# spa_fea = 26 # 26 sparse features
# tad_fea = tar_fea + den_fea
# tot_fea = tad_fea + spa_fea
if dataset == "kaggle":
days = 7
out_file = "kaggleAdDisplayChallenge_processed"
elif dataset == "terabyte":
days = 24
out_file = "terabyte_processed"
else:
raise(ValueError("Data set option is not supported"))
self.max_ind_range = max_ind_range
self.memory_map = memory_map
# split the datafile into path and filename
lstr = raw_path.split("/")
self.d_path = "/".join(lstr[0:-1]) + "/"
self.d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1]
self.npzfile = self.d_path + (
(self.d_file + "_day") if dataset == "kaggle" else self.d_file
)
self.trafile = self.d_path + (
(self.d_file + "_fea") if dataset == "kaggle" else "fea"
)
# check if pre-processed data is available
data_ready = True
if memory_map:
for i in range(days):
reo_data = self.npzfile + "_{0}_reordered.npz".format(i)
if not path.exists(str(reo_data)):
data_ready = False
else:
if not path.exists(str(pro_data)):
data_ready = False
# pre-process data if needed
# WARNNING: when memory mapping is used we get a collection of files
if data_ready:
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
print("Reading raw data=%s" % (str(raw_path)))
file = data_utils.getCriteoAdData(
raw_path,
out_file,
max_ind_range,
sub_sample_rate,
days,
split,
randomize,
dataset == "kaggle",
memory_map
)
# get a number of samples per day
total_file = self.d_path + self.d_file + "_day_count.npz"
with np.load(total_file) as data:
total_per_file = data["total_per_file"]
# compute offsets per file
self.offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
self.offset_per_file[i + 1] += self.offset_per_file[i]
# print(self.offset_per_file)
# setup data
if memory_map:
# setup the training/testing split
self.split = split
if split == 'none' or split == 'train':
self.day = 0
self.max_day_range = days if split == 'none' else days - 1
elif split == 'test' or split == 'val':
self.day = days - 1
num_samples = self.offset_per_file[days] - \
self.offset_per_file[days - 1]
self.test_size = int(np.ceil(num_samples / 2.))
self.val_size = num_samples - self.test_size
else:
sys.exit("ERROR: dataset split is neither none, nor train or test.")
'''
# text
print("text")
for i in range(days):
fi = self.npzfile + "_{0}".format(i)
with open(fi) as data:
ttt = 0; nnn = 0
for _j, line in enumerate(data):
ttt +=1
if np.int32(line[0]) > 0:
nnn +=1
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
# processed
print("processed")
for i in range(days):
fi = self.npzfile + "_{0}_processed.npz".format(i)
with np.load(fi) as data:
yyy = data["y"]
ttt = len(yyy)
nnn = np.count_nonzero(yyy)
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
# reordered
print("reordered")
for i in range(days):
fi = self.npzfile + "_{0}_reordered.npz".format(i)
with np.load(fi) as data:
yyy = data["y"]
ttt = len(yyy)
nnn = np.count_nonzero(yyy)
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
'''
# load unique counts
with np.load(self.d_path + self.d_file + "_fea_count.npz") as data:
self.counts = data["counts"]
self.m_den = den_fea # X_int.shape[1]
self.n_emb = len(self.counts)
print("Sparse features= %d, Dense features= %d" % (self.n_emb, self.m_den))
# Load the test data
# Only a single day is used for testing
if self.split == 'test' or self.split == 'val':
# only a single day is used for testing
fi = self.npzfile + "_{0}_reordered.npz".format(
self.day
)
with np.load(fi) as data:
self.X_int = data["X_int"] # continuous feature
self.X_cat = data["X_cat"] # categorical feature
self.y = data["y"] # target
else:
# load and preprocess data
with np.load(file) as data:
X_int = data["X_int"] # continuous feature
X_cat = data["X_cat"] # categorical feature
y = data["y"] # target
self.counts = data["counts"]
self.m_den = X_int.shape[1] # den_fea
self.n_emb = len(self.counts)
print("Sparse fea = %d, Dense fea = %d" % (self.n_emb, self.m_den))
# create reordering
indices = np.arange(len(y))
if split == "none":
# randomize all data
if randomize == "total":
indices = np.random.permutation(indices)
print("Randomized indices...")
X_int[indices] = X_int
X_cat[indices] = X_cat
y[indices] = y
else:
indices = np.array_split(indices, self.offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
test_indices, val_indices = np.array_split(test_indices, 2)
print("Defined %s indices..." % (split))
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
# create training, validation, and test sets
if split == 'train':
self.X_int = [X_int[i] for i in train_indices]
self.X_cat = [X_cat[i] for i in train_indices]
self.y = [y[i] for i in train_indices]
elif split == 'val':
self.X_int = [X_int[i] for i in val_indices]
self.X_cat = [X_cat[i] for i in val_indices]
self.y = [y[i] for i in val_indices]
elif split == 'test':
self.X_int = [X_int[i] for i in test_indices]
self.X_cat = [X_cat[i] for i in test_indices]
self.y = [y[i] for i in test_indices]
print("Split data according to indices...")
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
if self.memory_map:
if self.split == 'none' or self.split == 'train':
# check if need to swicth to next day and load data
if index == self.offset_per_file[self.day]:
# print("day_boundary switch", index)
self.day_boundary = self.offset_per_file[self.day]
fi = self.npzfile + "_{0}_reordered.npz".format(
self.day
)
# print('Loading file: ', fi)
with np.load(fi) as data:
self.X_int = data["X_int"] # continuous feature
self.X_cat = data["X_cat"] # categorical feature
self.y = data["y"] # target
self.day = (self.day + 1) % self.max_day_range
i = index - self.day_boundary
elif self.split == 'test' or self.split == 'val':
# only a single day is used for testing
i = index + (0 if self.split == 'test' else self.test_size)
else:
sys.exit("ERROR: dataset split is neither none, nor train or test.")
else:
i = index
if self.max_ind_range > 0:
return self.X_int[i], self.X_cat[i] % self.max_ind_range, self.y[i]
else:
return self.X_int[i], self.X_cat[i], self.y[i]
def _default_preprocess(self, X_int, X_cat, y):
X_int = torch.log(torch.tensor(X_int, dtype=torch.float) + 1)
if self.max_ind_range > 0:
X_cat = torch.tensor(X_cat % self.max_ind_range, dtype=torch.long)
else:
X_cat = torch.tensor(X_cat, dtype=torch.long)
y = torch.tensor(y.astype(np.float32))
return X_int, X_cat, y
def __len__(self):
if self.memory_map:
if self.split == 'none':
return self.offset_per_file[-1]
elif self.split == 'train':
return self.offset_per_file[-2]
elif self.split == 'test':
return self.test_size
elif self.split == 'val':
return self.val_size
else:
sys.exit("ERROR: dataset split is neither none, nor train nor test.")
else:
return len(self.y)
def collate_wrapper_criteo(list_of_tuples):
# where each tuple is (X_int, X_cat, y)
transposed_data = list(zip(*list_of_tuples))
X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1)
X_cat = torch.tensor(transposed_data[1], dtype=torch.long)
T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1)
batchSize = X_cat.shape[0]
featureCnt = X_cat.shape[1]
lS_i = [X_cat[:, i] for i in range(featureCnt)]
lS_o = [torch.tensor(range(batchSize)) for _ in range(featureCnt)]
return X_int, torch.stack(lS_o), torch.stack(lS_i), T
def ensure_dataset_preprocessed(args, d_path):
_ = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
_ = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
for split in ['train', 'val', 'test']:
print('Running preprocessing for split =', split)
train_files = ['{}_{}_reordered.npz'.format(args.raw_data_file, day)
for
day in range(0, 23)]
test_valid_file = args.raw_data_file + '_23_reordered.npz'
output_file = d_path + '_{}.bin'.format(split)
input_files = train_files if split == 'train' else [test_valid_file]
data_loader_terabyte.numpy_to_binary(input_files=input_files,
output_file_path=output_file,
split=split)
def make_criteo_data_and_loaders(args):
if args.mlperf_logging and args.memory_map and args.data_set == "terabyte":
# more efficient for larger batches
data_directory = path.dirname(args.raw_data_file)
if args.mlperf_bin_loader:
lstr = args.processed_data_file.split("/")
d_path = "/".join(lstr[0:-1]) + "/" + lstr[-1].split(".")[0]
train_file = d_path + "_train.bin"
test_file = d_path + "_test.bin"
# val_file = d_path + "_val.bin"
counts_file = args.raw_data_file + '_fea_count.npz'
if any(not path.exists(p) for p in [train_file,
test_file,
counts_file]):
ensure_dataset_preprocessed(args, d_path)
train_data = data_loader_terabyte.CriteoBinDataset(
data_file=train_file,
counts_file=counts_file,
batch_size=args.mini_batch_size,
max_ind_range=args.max_ind_range
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
sampler=RandomSampler(train_data) if args.mlperf_bin_shuffle else None
)
test_data = data_loader_terabyte.CriteoBinDataset(
data_file=test_file,
counts_file=counts_file,
batch_size=args.test_mini_batch_size,
max_ind_range=args.max_ind_range
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
else:
data_filename = args.raw_data_file.split("/")[-1]
train_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
test_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
train_loader = data_loader_terabyte.DataLoader(
data_directory=data_directory,
data_filename=data_filename,
days=list(range(23)),
batch_size=args.mini_batch_size,
max_ind_range=args.max_ind_range,
split="train"
)
test_loader = data_loader_terabyte.DataLoader(
data_directory=data_directory,
data_filename=data_filename,
days=[23],
batch_size=args.test_mini_batch_size,
max_ind_range=args.max_ind_range,
split="test"
)
else:
train_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
test_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.mini_batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False, # True
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.test_mini_batch_size,
shuffle=False,
num_workers=args.test_num_workers,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False, # True
)
return train_data, train_loader, test_data, test_loader
# uniform ditribution (input data)
class RandomDataset(Dataset):
def __init__(
self,
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
reset_seed_on_access=False,
rand_seed=0
):
# compute batch size
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# save args (recompute data_size if needed)
self.m_den = m_den
self.ln_emb = ln_emb
self.data_size = data_size
self.num_batches = nbatches
self.mini_batch_size = mini_batch_size
self.num_indices_per_lookup = num_indices_per_lookup
self.num_indices_per_lookup_fixed = num_indices_per_lookup_fixed
self.num_targets = num_targets
self.round_targets = round_targets
self.data_generation = data_generation
self.trace_file = trace_file
self.enable_padding = enable_padding
self.reset_seed_on_access = reset_seed_on_access
self.rand_seed = rand_seed
def reset_numpy_seed(self, numpy_rand_seed):
np.random.seed(numpy_rand_seed)
# torch.manual_seed(numpy_rand_seed)
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
# WARNING: reset seed on access to first element
# (e.g. if same random samples needed across epochs)
if self.reset_seed_on_access and index == 0:
self.reset_numpy_seed(self.rand_seed)
# number of data points in a batch
n = min(self.mini_batch_size, self.data_size - (index * self.mini_batch_size))
# generate a batch of dense and sparse features
if self.data_generation == "random":
(X, lS_o, lS_i) = generate_uniform_input_batch(
self.m_den,
self.ln_emb,
n,
self.num_indices_per_lookup,
self.num_indices_per_lookup_fixed
)
elif self.data_generation == "synthetic":
(X, lS_o, lS_i) = generate_synthetic_input_batch(
self.m_den,
self.ln_emb,
n,
self.num_indices_per_lookup,
self.num_indices_per_lookup_fixed,
self.trace_file,
self.enable_padding
)
else:
sys.exit(
"ERROR: --data-generation=" + self.data_generation + " is not supported"
)
# generate a batch of target (probability of a click)
T = generate_random_output_batch(n, self.num_targets, self.round_targets)
return (X, lS_o, lS_i, T)
def __len__(self):
# WARNING: note that we produce bacthes of outputs in __getitem__
# therefore we should use num_batches rather than data_size below
return self.num_batches
def collate_wrapper_random(list_of_tuples):
# where each tuple is (X, lS_o, lS_i, T)
(X, lS_o, lS_i, T) = list_of_tuples[0]
return (X,
torch.stack(lS_o),
lS_i,
T)
def make_random_data_and_loader(args, ln_emb, m_den):
train_data = RandomDataset(
m_den,
ln_emb,
args.data_size,
args.num_batches,
args.mini_batch_size,
args.num_indices_per_lookup,
args.num_indices_per_lookup_fixed,
1, # num_targets
args.round_targets,
args.data_generation,
args.data_trace_file,
args.data_trace_enable_padding,
reset_seed_on_access=True,
rand_seed=args.numpy_rand_seed
) # WARNING: generates a batch of lookups at once
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=1,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_random,
pin_memory=False,
drop_last=False, # True
)
return train_data, train_loader
def generate_random_data(
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
):
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# inputs
lT = []
lX = []
lS_offsets = []
lS_indices = []
for j in range(0, nbatches):
# number of data points in a batch
n = min(mini_batch_size, data_size - (j * mini_batch_size))
# generate a batch of dense and sparse features
if data_generation == "random":
(Xt, lS_emb_offsets, lS_emb_indices) = generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed
)
elif data_generation == "synthetic":
(Xt, lS_emb_offsets, lS_emb_indices) = generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding
)
else:
sys.exit(
"ERROR: --data-generation=" + data_generation + " is not supported"
)
# dense feature
lX.append(Xt)
# sparse feature (sparse indices)
lS_offsets.append(lS_emb_offsets)
lS_indices.append(lS_emb_indices)
# generate a batch of target (probability of a click)
P = generate_random_output_batch(n, num_targets, round_targets)
lT.append(P)
return (nbatches, lX, lS_offsets, lS_indices, lT)
def generate_random_output_batch(n, num_targets, round_targets=False):
# target (probability of a click)
if round_targets:
P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.float32)
else:
P = ra.rand(n, num_targets).astype(np.float32)
return torch.tensor(P)
# uniform ditribution (input data)
def generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
):
# dense feature
Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))
# sparse feature (sparse indices)
lS_emb_offsets = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for size in ln_emb:
lS_batch_offsets = []
lS_batch_indices = []
offset = 0
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int64(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int64(
np.round(max([1.0], r * min(size, num_indices_per_lookup)))
)
# sparse indices to be used per embedding
r = ra.random(sparse_group_size)
sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int64(sparse_group.size)
# store lengths and indices
lS_batch_offsets += [offset]
lS_batch_indices += sparse_group.tolist()
# update offset for next iteration
offset += sparse_group_size
lS_emb_offsets.append(torch.tensor(lS_batch_offsets))
lS_emb_indices.append(torch.tensor(lS_batch_indices))
return (Xt, lS_emb_offsets, lS_emb_indices)
# synthetic distribution (input data)
def generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding=False,
):
# dense feature
Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))
# sparse feature (sparse indices)
lS_emb_offsets = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for i, size in enumerate(ln_emb):
lS_batch_offsets = []
lS_batch_indices = []
offset = 0
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int64(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int64(
max(1, np.round(r * min(size, num_indices_per_lookup))[0])
)
# sparse indices to be used per embedding
file_path = trace_file
line_accesses, list_sd, cumm_sd = read_dist_from_file(
file_path.replace("j", str(i))
)
# debug prints
# print("input")
# print(line_accesses); print(list_sd); print(cumm_sd);
# print(sparse_group_size)
# approach 1: rand
# r = trace_generate_rand(
# line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding
# )
# approach 2: lru
r = trace_generate_lru(
line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding
)
# WARNING: if the distribution in the file is not consistent
# with embedding table dimensions, below mod guards against out
# of range access
sparse_group = np.unique(r).astype(np.int64)
minsg = np.min(sparse_group)
maxsg = np.max(sparse_group)
if (minsg < 0) or (size <= maxsg):
print(
"WARNING: distribution is inconsistent with embedding "
+ "table size (using mod to recover and continue)"
)
sparse_group = np.mod(sparse_group, size).astype(np.int64)
# sparse_group = np.unique(np.array(np.mod(r, size-1)).astype(np.int64))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int64(sparse_group.size)
# store lengths and indices
lS_batch_offsets += [offset]
lS_batch_indices += sparse_group.tolist()
# update offset for next iteration
offset += sparse_group_size
lS_emb_offsets.append(torch.tensor(lS_batch_offsets))
lS_emb_indices.append(torch.tensor(lS_batch_indices))
return (Xt, lS_emb_offsets, lS_emb_indices)
def generate_stack_distance(cumm_val, cumm_dist, max_i, i, enable_padding=False):
u = ra.rand(1)
if i < max_i:
# only generate stack distances up to the number of new references seen so far
j = bisect.bisect(cumm_val, i) - 1
fi = cumm_dist[j]
u *= fi # shrink distribution support to exclude last values
elif enable_padding:
# WARNING: disable generation of new references (once all have been seen)
fi = cumm_dist[0]
u = (1.0 - fi) * u + fi # remap distribution support to exclude first value
for (j, f) in enumerate(cumm_dist):
if u <= f:
return cumm_val[j]
# WARNING: global define, must be consistent across all synthetic functions
cache_line_size = 1
def trace_generate_lru(
line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False
):
max_sd = list_sd[-1]
l = len(line_accesses)
i = 0
ztrace = []
for _ in range(out_trace_len):
sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)
mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0
# generate memory reference
if sd == 0: # new reference #
line_ref = line_accesses.pop(0)
line_accesses.append(line_ref)
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
i += 1
else: # existing reference #
line_ref = line_accesses[l - sd]
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
line_accesses.pop(l - sd)
line_accesses.append(line_ref)
# save generated memory reference
ztrace.append(mem_ref)
return ztrace
def trace_generate_rand(
line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False
):
max_sd = list_sd[-1]
l = len(line_accesses) # !!!Unique,
i = 0
ztrace = []
for _ in range(out_trace_len):
sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)
mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0
# generate memory reference
if sd == 0: # new reference #
line_ref = line_accesses.pop(0)
line_accesses.append(line_ref)
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
i += 1
else: # existing reference #
line_ref = line_accesses[l - sd]
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
ztrace.append(mem_ref)
return ztrace
def trace_profile(trace, enable_padding=False):
# number of elements in the array (assuming 1D)
# n = trace.size
rstack = [] # S
stack_distances = [] # SDS
line_accesses = [] # L
for x in trace:
r = np.uint64(x / cache_line_size)
l = len(rstack)
try: # found #
i = rstack.index(r)
# WARNING: I believe below is the correct depth in terms of meaning of the
# algorithm, but that is not what seems to be in the paper alg.
# -1 can be subtracted if we defined the distance between
# consecutive accesses (e.g. r, r) as 0 rather than 1.
sd = l - i # - 1
# push r to the end of stack_distances
stack_distances.insert(0, sd)
# remove r from its position and insert to the top of stack
rstack.pop(i) # rstack.remove(r)
rstack.insert(l - 1, r)
except ValueError: # not found #
sd = 0 # -1
# push r to the end of stack_distances/line_accesses
stack_distances.insert(0, sd)
line_accesses.insert(0, r)
# push r to the top of stack
rstack.insert(l, r)
if enable_padding:
# WARNING: notice that as the ratio between the number of samples (l)
# and cardinality (c) of a sample increases the probability of
# generating a sample gets smaller and smaller because there are
# few new samples compared to repeated samples. This means that for a
# long trace with relatively small cardinality it will take longer to
# generate all new samples and therefore obtain full distribution support
# and hence it takes longer for distribution to resemble the original.
# Therefore, we may pad the number of new samples to be on par with
# average number of samples l/c artificially.
l = len(stack_distances)
c = max(stack_distances)
padding = int(np.ceil(l / c))
stack_distances = stack_distances + [0] * padding
return (rstack, stack_distances, line_accesses)
# auxiliary read/write routines
def read_trace_from_file(file_path):
try:
with open(file_path) as f:
if args.trace_file_binary_type:
array = np.fromfile(f, dtype=np.uint64)
trace = array.astype(np.uint64).tolist()
else:
line = f.readline()
trace = list(map(lambda x: np.uint64(x), line.split(", ")))
return trace
except Exception:
print("ERROR: no input trace file has been provided")
def write_trace_to_file(file_path, trace):
try:
if args.trace_file_binary_type:
with open(file_path, "wb+") as f:
np.array(trace).astype(np.uint64).tofile(f)
else:
with open(file_path, "w+") as f:
s = str(trace)
f.write(s[1 : len(s) - 1])
except Exception:
print("ERROR: no output trace file has been provided")
def read_dist_from_file(file_path):
try:
with open(file_path, "r") as f:
lines = f.read().splitlines()
except Exception:
print("Wrong file or file path")
# read unique accesses
unique_accesses = [int(el) for el in lines[0].split(", ")]
# read cumulative distribution (elements are passed as two separate lists)
list_sd = [int(el) for el in lines[1].split(", ")]
cumm_sd = [float(el) for el in lines[2].split(", ")]
return unique_accesses, list_sd, cumm_sd
def write_dist_to_file(file_path, unique_accesses, list_sd, cumm_sd):
try:
with open(file_path, "w") as f:
# unique_acesses
s = str(unique_accesses)
f.write(s[1 : len(s) - 1] + "\n")
# list_sd
s = str(list_sd)
f.write(s[1 : len(s) - 1] + "\n")
# cumm_sd
s = str(cumm_sd)
f.write(s[1 : len(s) - 1] + "\n")
except Exception:
print("Wrong file or file path")
if __name__ == "__main__":
import sys
import operator
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(description="Generate Synthetic Distributions")
parser.add_argument("--trace-file", type=str, default="./input/trace.log")
parser.add_argument("--trace-file-binary-type", type=bool, default=False)
parser.add_argument("--trace-enable-padding", type=bool, default=False)
parser.add_argument("--dist-file", type=str, default="./input/dist.log")
parser.add_argument(
"--synthetic-file", type=str, default="./input/trace_synthetic.log"
)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--print-precision", type=int, default=5)
args = parser.parse_args()
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
### read trace ###
trace = read_trace_from_file(args.trace_file)
# print(trace)
### profile trace ###
(_, stack_distances, line_accesses) = trace_profile(
trace, args.trace_enable_padding
)
stack_distances.reverse()
line_accesses.reverse()
# print(line_accesses)
# print(stack_distances)
### compute probability distribution ###
# count items
l = len(stack_distances)
dc = sorted(
collections.Counter(stack_distances).items(), key=operator.itemgetter(0)
)
# create a distribution
list_sd = list(map(lambda tuple_x_k: tuple_x_k[0], dc)) # x = tuple_x_k[0]
dist_sd = list(
map(lambda tuple_x_k: tuple_x_k[1] / float(l), dc)
) # k = tuple_x_k[1]
cumm_sd = [] # np.cumsum(dc).tolist() #prefixsum
for i, (_, k) in enumerate(dc):
if i == 0:
cumm_sd.append(k / float(l))
else:
# add the 2nd element of the i-th tuple in the dist_sd list
cumm_sd.append(cumm_sd[i - 1] + (k / float(l)))
### write stack_distance and line_accesses to a file ###
write_dist_to_file(args.dist_file, line_accesses, list_sd, cumm_sd)
### generate correspondinf synthetic ###
# line_accesses, list_sd, cumm_sd = read_dist_from_file(args.dist_file)
synthetic_trace = trace_generate_lru(
line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding
)
# synthetic_trace = trace_generate_rand(
# line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding
# )
write_trace_to_file(args.synthetic_file, synthetic_trace)
| bsd-3-clause |
microsoft/onnxruntime | onnxruntime/test/python/transformers/test_parity_gelu.py | 1 | 7531 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Below are test results for Gelu or FastGelu FP32 kernels using CUDA:
Formula Input(BeforeCast) MaxDiff MaxDiff(Optimized)
0(gelu_python) FP32 2.38E-07 4.77E-07
0(gelu_python) FP16 0 6.10E-05
1(gelu) FP32 4.77E-07 0
1(gelu) FP16 6.10E-05 0
2(erf_gelu) FP32 2.38E-07 9.54E-07
2(erf_gelu) FP16 1.22E-04 1.95E-03
3(gelu_new) FP32 2.38E-07 2.38E-07
3(gelu_new) FP16 0 0
4(gelu_fast) FP32 0 2.38E-07
4(gelu_fast) FP16 0 3.05E-05
5(openai_gelu) FP32 0 2.38E-07
5(openai_gelu) FP16 0 3.05E-05
For comparison, CPU has MaxDiff=4.77E-07 for each formula.
"""
import math
import os
import unittest
import torch
from parity_utilities import *
from torch import nn
class Gelu(nn.Module):
def __init__(self, formula=4, fp32_gelu_op=False):
super().__init__()
self.formula = formula
self.fp32_gelu_op = True
def gelu(self, x):
if self.formula == 0:
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
elif self.formula == 1:
return nn.functional.gelu(x)
elif self.formula == 2:
# erf_gelu in Megatron: x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype))
return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype) + 1.0)
elif self.formula == 3:
# gelu_new in huggingface transformers
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
elif self.formula == 4:
# gelu_fast in huggingface transformers with lower precision in a constant (0.7978845608)
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
else:
# openai_gelu in Megatron
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x)))
@staticmethod
def get_fused_op(formula):
return "Gelu" if formula in [0, 1, 2] else "FastGelu"
def forward(self, x):
if self.fp32_gelu_op and x.dtype == torch.float16:
# This test only evaluates FP32 kernels so add data type cast for input and output.
casted_output = self.gelu(x.to(torch.float32)).to(torch.float16)
return (casted_output,)
else:
output = self.gelu(x)
return (output,)
def get_output_names():
outputs = ["output"]
return outputs
def run(
batch_size,
float16,
optimized,
hidden_size,
device,
test_cases,
formula=0,
sequence_length=2,
fp32_gelu_op=True,
):
test_name = f"device={device}, float16={float16}, optimized={optimized}, batch_size={batch_size}, sequence_length={sequence_length}, hidden_size={hidden_size}, formula={formula}, fp32_gelu_op={fp32_gelu_op}"
print(f"\nTesting: {test_name}")
model = Gelu(formula=formula, fp32_gelu_op=fp32_gelu_op)
model.eval()
model.to(device)
if float16:
model.half()
# Do not re-use onnx file from previous test since weights of model are random.
onnx_model_path = "./temp/gelu_{}_{}.onnx".format(formula, "fp16" if float16 else "fp32")
export_onnx(model, onnx_model_path, float16, hidden_size, device)
if optimized:
optimized_onnx_path = "./temp/gelu_{}_opt_{}.onnx".format(formula, "fp16" if float16 else "fp32")
use_gpu = float16 and not fp32_gelu_op
optimize_onnx(
onnx_model_path,
optimized_onnx_path,
Gelu.get_fused_op(formula),
use_gpu=use_gpu,
opt_level=2 if use_gpu else None,
)
onnx_path = optimized_onnx_path
else:
onnx_path = onnx_model_path
num_failure = run_parity(
model,
onnx_path,
batch_size,
hidden_size,
sequence_length,
float16,
device,
optimized,
test_cases,
verbose=False,
)
# clean up onnx file
os.remove(onnx_model_path)
if optimized:
os.remove(onnx_path)
return num_failure, test_name
class TestGeluParity(unittest.TestCase):
def setUp(self):
self.optimized = True # Change it to False if you want to test parity of non optimized ONNX
self.test_cases = 100 # Number of test cases per test run
self.sequence_length = 2
self.hidden_size = 768
self.formula_to_test = [0, 1, 2, 3, 4, 5]
self.formula_must_pass = [
0,
1,
3,
4,
5,
] # formula 2 cannot pass precision test.
def run_test(
self,
batch_size,
float16,
optimized,
hidden_size,
device,
formula,
enable_assert=True,
fp32_gelu_op=True,
):
if float16 and device.type == "cpu": # CPU does not support FP16
return
num_failure, test_name = run(
batch_size,
float16,
optimized,
hidden_size,
device,
self.test_cases,
formula,
self.sequence_length,
fp32_gelu_op,
)
if enable_assert:
self.assertTrue(num_failure == 0, "Failed: " + test_name)
def run_one(self, optimized, device, hidden_size=768, formula=0):
for batch_size in [4]:
self.run_test(
batch_size,
float16=False,
optimized=optimized,
hidden_size=hidden_size,
device=device,
formula=formula,
enable_assert=formula in self.formula_must_pass,
)
self.run_test(
batch_size,
float16=True,
optimized=optimized,
hidden_size=hidden_size,
device=device,
formula=formula,
enable_assert=formula in self.formula_must_pass,
fp32_gelu_op=True,
)
self.run_test(
batch_size,
float16=True,
optimized=optimized,
hidden_size=hidden_size,
device=device,
formula=formula,
enable_assert=formula in self.formula_must_pass,
fp32_gelu_op=False,
)
def test_cpu(self):
cpu = torch.device("cpu")
for i in self.formula_to_test:
self.run_one(self.optimized, cpu, hidden_size=self.hidden_size, formula=i)
def test_cuda(self):
if not torch.cuda.is_available():
import pytest
pytest.skip("test requires GPU and torch+cuda")
else:
gpu = torch.device("cuda")
for i in self.formula_to_test:
self.run_one(self.optimized, gpu, hidden_size=self.hidden_size, formula=i)
if __name__ == "__main__":
unittest.main()
| mit |
vigilv/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 380 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
mobilegenome/teddypi | teddypi.py | 1 | 8155 | #!/usr/bin/env python2.7
"""
teddypi.py
(c) Fritjof Lammers
"""
import os
import yaml
import argparse
import tpi_filter
import tpi_helpers
from tpi_svintegration import cluster_calls, nonredundant_2_sets
from pybedtools import BedTool
from collections import defaultdict
from sys import argv
def parse_args(args):
parser = argparse.ArgumentParser(description='Arguments passed to the program')
parser.add_argument('-c', '--config', required=True, help=' config file path')
return parser.parse_args(args)
def main():
"""
Start the TeddyPi pipeline, loads main configuration file, collects input files and parses TE/SV caller specific configuration.
This module returns filtered and integrated datasets for tpi_ortho.py.
"""
options = parse_args(argv[1:])
modulename = "TeddyPi"
print u"TeddyPi - Transposable Element detection and discovery for Phylogenetic Inference"
print u"---------------------------------------------------------------------------------\n"
print u"[ {} ] Initialize configuration from {}...".format(modulename, options.config),
# Load main configuration
with open(options.config) as fin:
config = yaml.load(fin)
programs = config['programs']
print u"done."
tpi_helpers.create_out_path(config['out_dir']) # Create output directory
transposons = config['refte'] # Load reference TE file
# 1. Filter operations for each program and species
filtered_files = defaultdict(dict)
for samplename in config['samples']:
print u"[ {} ] Loading data for sample {}; ".format(modulename, samplename)
print u"[ {} ] Config has info on these TE/SV callers: {}".format(modulename,
",".join([elem['name'] for elem in programs]))
per_sample_files = (fname for fname in os.listdir(config['data_dir']) if
fname.startswith(samplename) and fname.endswith(
".vcf")) # TODO avoid reloading processed files
for sample_file in per_sample_files:
# print "%s, " % sample_file
per_sample_vcf = tpi_filter.LoadVCF(data_dir=config['data_dir'],
out_dir=config['out_dir'],
fname=sample_file,
sname=samplename)
simple_source = per_sample_vcf.vcf_source.split(" ")[0].lower()
if config['programs'] == "auto" or simple_source in [elem['name'] for elem in programs]:
per_sample_vcf.skip = False # flag to skip filtering
per_sample_vcf.filter_variants()
print u"[ {} ] Filtered variants written to: {}\n".format(modulename, per_sample_vcf.out_fname)
filtered_files[samplename][simple_source] = per_sample_vcf.out_fname
else:
print u"[ {} ] Error: Auto-detection of TE/SV callers disabled and VCF-source {} not mentioned in " \
u"config.\nskipping...".format(modulename, simple_source)
# 2. Integrate SV-deletions and convert to Ref+ TE calls
# tpi_svintegration.py
if 'call_operations' in config.keys():
print u"[ {} ] Call operations found in configfile".format(modulename)
for op, sources in config['call_operations'].iteritems():
try:
assert (set([elem['name'] for elem in programs]) >= (set(sources)))
except AssertionError:
print u"VCF sources for operations have not been parsed."
print u"[ {} ] For operation {}, sources {} were not parsed. Check \' programs \' parameter in {}" \
.format(modulename, op, ",".join(sources), options.config)
continue
if op == "non_redundant":
print u"[ {} ] Starting operation {} on sources {} over all samples,".format(modulename, op,
",".join(sources))
for sample in filtered_files.keys():
print u"[ %s ] %s " % (op, sample)
sets = (BedTool(os.path.join(config['out_dir'], filtered_files[sample][src])) for src in sources)
nr = nonredundant_2_sets(sets)
nr_set_outfile = "{s}.{t}.nr.bed".format(s=sample, t="DEL")
nr_set_outfile = os.path.join(config['out_dir'], nr_set_outfile)
nr.saveas(nr_set_outfile)
print u"[ {} ] non_redundant set saved to {}".format(op, nr_set_outfile)
te_isect_outfile = "{s}.{t}.bed".format(s=sample, t="TE")
sv_set = nr.window(transposons, w=config['ortho_merge_distance']).saveas(
os.path.join(config['out_dir'], te_isect_outfile))
print u"[ {} ] TE intersected set saved to {}".format(op, os.path.join(config['out_dir'],
te_isect_outfile))
te_cls_outfile = "{s}.{t}.cls.bed".format(s=sample, t="TE")
sv_set = BedTool(cluster_calls(sv_set)).saveas(os.path.join(config['out_dir'], te_cls_outfile))
print u"[ {} ] clustered set saved to {}".format(op,
os.path.join(config['out_dir'], te_cls_outfile))
elif op == "intersection":
print u"[ {} ] Starting operation {} on sources {} over all samples,".format(modulename, op,
",".join(sources))
for sample in filtered_files.keys():
print u"[ %s ] %s " % (op, sample)
sets = (BedTool(os.path.join(config['out_dir'], filtered_files[sample][src])) for src in sources)
isect = sets.next().window(sets.next(), w=100, u=True).sort()
isect_set_outfile = "{s}.{t}.is.vcf".format(s=sample, t="NONREF_ISEC")
isect.saveas(os.path.join(config['out_dir'], isect_set_outfile))
print u"[ {} ] intersected set saved to {}".format(op, os.path.join(config['out_dir'],
isect_set_outfile))
elif op == "te_intersect":
print u"[ {} ] Starting operation {} on sources {} over all samples,".format(modulename, op,
",".join(sources))
for sample in filtered_files.keys():
print u"[ %s ] %s " % (op, sample)
assert len(sources) == 1
src = sources[0]
bt_set = tpi_helpers.make_BED_fromVCF(os.path.join(config['out_dir'], filtered_files[sample][src]))
te_isect_outfile = "{s}.{t}.bed".format(s=sample, t="TE")
sv_set = bt_set.window(transposons, w=50).saveas(
os.path.join(config['out_dir'], te_isect_outfile))
print u"[ {} ] TE intersected set saved to {}".format(op, os.path.join(config['out_dir'],
te_isect_outfile))
te_cls_outfile = "{s}.{t}.cls.bed".format(s=sample, t="TE")
sv_set = BedTool(cluster_calls(sv_set)).saveas(os.path.join(config['out_dir'], te_cls_outfile))
print u"[ {} ] clustered set saved to {}".format(op,
os.path.join(config['out_dir'], te_cls_outfile))
else:
print u"[ {} ] Operation '{}' not known. Nothing will be done. Check the configuration file.".format(
modulename, op)
return 1
if __name__ == '__main__':
main()
| mit |
mhugo/QGIS | python/plugins/processing/algs/grass7/ext/v_net.py | 47 | 4994 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_net.py
--------
Date : December 2015
Copyright : (C) 2015 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
This Python module handles pre-treatment operations for v.net.* GRASS7 modules.
Before using a v.net module you often have to incorporate a points layer into
the network vector map.
"""
__author__ = 'Médéric Ribreux'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Médéric Ribreux'
import os
from qgis.core import QgsProcessingException
from processing.tools.system import getTempFilename
def incorporatePoints(alg, parameters, context, feedback, pointLayerName='points', networkLayerName='input'):
"""
incorporate points with lines to form a GRASS network
"""
# Grab the point layer and delete this parameter
pointLayer = alg.parameterAsVectorLayer(parameters, pointLayerName, context)
if pointLayer:
# Create an intermediate GRASS layer which is the combination of network + centers
intLayer = 'net' + os.path.basename(getTempFilename())
pointLayer = alg.exportedLayers[pointLayerName]
# Grab the network layer
lineLayer = alg.parameterAsVectorLayer(parameters, networkLayerName, context)
if lineLayer:
lineLayer = alg.exportedLayers[networkLayerName]
else:
raise QgsProcessingException(
alg.tr('GRASS GIS 7 v.net requires a lines layer!'))
threshold = alg.parameterAsDouble(parameters, 'threshold', context)
# Create the v.net connect command for point layer integration
command = 'v.net -s input={} points={} output={} operation=connect threshold={}'.format(
lineLayer, pointLayer, intLayer, threshold)
alg.commands.append(command)
# Connect the point layer database to the layer 2 of the network
command = 'v.db.connect -o map={} table={} layer=2'.format(intLayer, pointLayer)
alg.commands.append(command)
# remove undesired parameters
alg.removeParameter(pointLayerName)
# Use temp layer for input
alg.exportedLayers[networkLayerName] = intLayer
# Process the command
if 'threshold' in parameters:
alg.removeParameter('threshold')
alg.processCommand(parameters, context, feedback)
def variableOutput(alg, layers, parameters, context, nocats=True):
""" Handle variable data output for v.net modules:
:param layers:
layers is a dict of outputs:
{ 'outputName': ['srcLayer', 'output_type', output_layer_number, nocats],
...
}
where:
- outputName is the name of the output in the description file.
- srcLayer is the grass name of the layer to export.
- output_type is the GRASS datatype (point/line/area/etc.).
- output_layer_number is the GRASS layer number for multiple layers datasets.
- nocats indicates weither we need to also export without categories items.
:param parameters:
:param context:
:param nocats: do not add categories.
"""
for outputName, typeList in layers.items():
if not isinstance(typeList, list):
continue
file_name = alg.parameterAsOutputLayer(parameters, outputName, context)
src_layer = typeList[0]
output_type = typeList[1]
output_layer_number = typeList[2]
no_cats = typeList[3]
grass_name = '{}{}'.format(src_layer, alg.uniqueSuffix)
alg.exportVectorLayer(grassName=grass_name,
fileName=file_name,
layer=output_layer_number,
exportnocat=no_cats,
dataType=output_type)
def processOutputs(alg, parameters, context, feedback):
idx = alg.parameterAsInt(parameters, 'operation', context)
operations = alg.parameterDefinition('operation').options()
operation = operations[idx]
if operation == 'nodes':
outputParameter = {'output': ['output', 'point', 2, True]}
elif operation == 'connect':
outputParameter = {'output': ['output', 'line', 1, False]}
elif operation == 'arcs':
outputParameter = {'output': ['output', 'line', 1, True]}
variableOutput(alg, outputParameter, parameters, context)
| gpl-2.0 |
Subsets and Splits