max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py | BadDevCode/lumberyard | 1,738 | 1455 | <filename>dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
"""
Implement transformation on Numba IR
"""
from __future__ import absolute_import, print_function
from collections import namedtuple, defaultdict
import logging
from numba.analysis import compute_cfg_from_blocks, find_top_level_loops
from numba import ir, errors, ir_utils
from numba.analysis import compute_use_defs
_logger = logging.getLogger(__name__)
def _extract_loop_lifting_candidates(cfg, blocks):
"""
Returns a list of loops that are candidate for loop lifting
"""
# check well-formed-ness of the loop
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
succs = set(x for x, _ in cfg.successors(k))
if not succs:
# If the exit point has no successor, it contains an return
# statement, which is not handled by the looplifting code.
# Thus, this loop is not a candidate.
_logger.debug("return-statement in loop.")
return False
outedges |= succs
ok = len(outedges) == 1
_logger.debug("same_exit_point=%s (%s)", ok, outedges)
return ok
def one_entry(loop):
"there is one entry"
ok = len(loop.entries) == 1
_logger.debug("one_entry=%s", ok)
return ok
def cannot_yield(loop):
"cannot have yield inside the loop"
insiders = set(loop.body) | set(loop.entries) | set(loop.exits)
for blk in map(blocks.__getitem__, insiders):
for inst in blk.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Yield):
_logger.debug("has yield")
return False
_logger.debug("no yield")
return True
_logger.info('finding looplift candidates')
# the check for cfg.entry_point in the loop.entries is to prevent a bad
# rewrite where a prelude for a lifted loop would get written into block -1
# if a loop entry were in block 0
candidates = []
for loop in find_top_level_loops(cfg):
_logger.debug("top-level loop: %s", loop)
if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and
cfg.entry_point() not in loop.entries):
candidates.append(loop)
_logger.debug("add candidate: %s", loop)
return candidates
def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids):
"""Find input and output variables to a block region.
"""
inputs = livemap[callfrom]
outputs = livemap[returnto]
# ensure live variables are actually used in the blocks, else remove,
# saves having to create something valid to run through postproc
# to achieve similar
loopblocks = {}
for k in body_block_ids:
loopblocks[k] = blocks[k]
used_vars = set()
def_vars = set()
defs = compute_use_defs(loopblocks)
for vs in defs.usemap.values():
used_vars |= vs
for vs in defs.defmap.values():
def_vars |= vs
used_or_defined = used_vars | def_vars
# note: sorted for stable ordering
inputs = sorted(set(inputs) & used_or_defined)
outputs = sorted(set(outputs) & used_or_defined & def_vars)
return inputs, outputs
_loop_lift_info = namedtuple('loop_lift_info',
'loop,inputs,outputs,callfrom,returnto')
def _loop_lift_get_candidate_infos(cfg, blocks, livemap):
"""
Returns information on looplifting candidates.
"""
loops = _extract_loop_lifting_candidates(cfg, blocks)
loopinfos = []
for loop in loops:
[callfrom] = loop.entries # requirement checked earlier
an_exit = next(iter(loop.exits)) # anyone of the exit block
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
else:
# Post-Py3.8 DO NOT have multiple exits
returnto = an_exit
local_block_ids = set(loop.body) | set(loop.entries)
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=livemap,
callfrom=callfrom,
returnto=returnto,
body_block_ids=local_block_ids,
)
lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs,
callfrom=callfrom, returnto=returnto)
loopinfos.append(lli)
return loopinfos
def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto):
"""
Transform calling block from top-level function to call the lifted loop.
"""
scope = block.scope
loc = block.loc
blk = ir.Block(scope=scope, loc=loc)
ir_utils.fill_block_with_call(
newblock=blk,
callee=liftedloop,
label_next=returnto,
inputs=inputs,
outputs=outputs,
)
return blk
def _loop_lift_prepare_loop_func(loopinfo, blocks):
"""
Inplace transform loop blocks for use as lifted loop.
"""
entry_block = blocks[loopinfo.callfrom]
scope = entry_block.scope
loc = entry_block.loc
# Lowering assumes the first block to be the one with the smallest offset
firstblk = min(blocks) - 1
blocks[firstblk] = ir_utils.fill_callee_prologue(
block=ir.Block(scope=scope, loc=loc),
inputs=loopinfo.inputs,
label_next=loopinfo.callfrom,
)
blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue(
block=ir.Block(scope=scope, loc=loc),
outputs=loopinfo.outputs,
)
def _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals):
"""
Modify the block inplace to call to the lifted-loop.
Returns a dictionary of blocks of the lifted-loop.
"""
from numba.dispatcher import LiftedLoop
# Copy loop blocks
loop = loopinfo.loop
loopblockkeys = set(loop.body) | set(loop.entries)
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
loopblockkeys |= loop.exits
loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys)
# Modify the loop blocks
_loop_lift_prepare_loop_func(loopinfo, loopblocks)
# Create a new IR for the lifted loop
lifted_ir = func_ir.derive(blocks=loopblocks,
arg_names=tuple(loopinfo.inputs),
arg_count=len(loopinfo.inputs),
force_non_generator=True)
liftedloop = LiftedLoop(lifted_ir,
typingctx, targetctx, flags, locals)
# modify for calling into liftedloop
callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom],
loopinfo.inputs, loopinfo.outputs,
loopinfo.returnto)
# remove blocks
for k in loopblockkeys:
del blocks[k]
# update main interpreter callsite into the liftedloop
blocks[loopinfo.callfrom] = callblock
return liftedloop
def loop_lifting(func_ir, typingctx, targetctx, flags, locals):
"""
Loop lifting transformation.
Given a interpreter `func_ir` returns a 2 tuple of
`(toplevel_interp, [loop0_interp, loop1_interp, ....])`
"""
blocks = func_ir.blocks.copy()
cfg = compute_cfg_from_blocks(blocks)
loopinfos = _loop_lift_get_candidate_infos(cfg, blocks,
func_ir.variable_lifetime.livemap)
loops = []
if loopinfos:
_logger.debug('loop lifting this IR with %d candidates:\n%s',
len(loopinfos), func_ir.dump_to_string())
for loopinfo in loopinfos:
lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals)
loops.append(lifted)
# Make main IR
main = func_ir.derive(blocks=blocks)
return main, loops
def canonicalize_cfg_single_backedge(blocks):
"""
Rewrite loops that have multiple backedges.
"""
cfg = compute_cfg_from_blocks(blocks)
newblocks = blocks.copy()
def new_block_id():
return max(newblocks.keys()) + 1
def has_multiple_backedges(loop):
count = 0
for k in loop.body:
blk = blocks[k]
edges = blk.terminator.get_targets()
# is a backedge?
if loop.header in edges:
count += 1
if count > 1:
# early exit
return True
return False
def yield_loops_with_multiple_backedges():
for lp in cfg.loops().values():
if has_multiple_backedges(lp):
yield lp
def replace_target(term, src, dst):
def replace(target):
return (dst if target == src else target)
if isinstance(term, ir.Branch):
return ir.Branch(cond=term.cond,
truebr=replace(term.truebr),
falsebr=replace(term.falsebr),
loc=term.loc)
elif isinstance(term, ir.Jump):
return ir.Jump(target=replace(term.target), loc=term.loc)
else:
assert not term.get_targets()
return term
def rewrite_single_backedge(loop):
"""
Add new tail block that gathers all the backedges
"""
header = loop.header
tailkey = new_block_id()
for blkkey in loop.body:
blk = newblocks[blkkey]
if header in blk.terminator.get_targets():
newblk = blk.copy()
# rewrite backedge into jumps to new tail block
newblk.body[-1] = replace_target(blk.terminator, header,
tailkey)
newblocks[blkkey] = newblk
# create new tail block
entryblk = newblocks[header]
tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc)
# add backedge
tailblk.append(ir.Jump(target=header, loc=tailblk.loc))
newblocks[tailkey] = tailblk
for loop in yield_loops_with_multiple_backedges():
rewrite_single_backedge(loop)
return newblocks
def canonicalize_cfg(blocks):
"""
Rewrite the given blocks to canonicalize the CFG.
Returns a new dictionary of blocks.
"""
return canonicalize_cfg_single_backedge(blocks)
def with_lifting(func_ir, typingctx, targetctx, flags, locals):
"""With-lifting transformation
Rewrite the IR to extract all withs.
Only the top-level withs are extracted.
Returns the (the_new_ir, the_lifted_with_ir)
"""
from numba import postproc
def dispatcher_factory(func_ir, objectmode=False, **kwargs):
from numba.dispatcher import LiftedWith, ObjModeLiftedWith
myflags = flags.copy()
if objectmode:
# Lifted with-block cannot looplift
myflags.enable_looplift = False
# Lifted with-block uses object mode
myflags.enable_pyobject = True
myflags.force_pyobject = True
myflags.no_cpython_wrapper = False
cls = ObjModeLiftedWith
else:
cls = LiftedWith
return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs)
postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime
assert func_ir.variable_lifetime
vlt = func_ir.variable_lifetime
blocks = func_ir.blocks.copy()
# find where with-contexts regions are
withs = find_setupwiths(blocks)
cfg = vlt.cfg
_legalize_withs_cfg(withs, cfg, blocks)
# For each with-regions, mutate them according to
# the kind of contextmanager
sub_irs = []
for (blk_start, blk_end) in withs:
body_blocks = []
for node in _cfg_nodes_in_region(cfg, blk_start, blk_end):
body_blocks.append(node)
_legalize_with_head(blocks[blk_start])
# Find the contextmanager
cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start)
# Mutate the body and get new IR
sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory,
extra)
sub_irs.append(sub)
if not sub_irs:
# Unchanged
new_ir = func_ir
else:
new_ir = func_ir.derive(blocks)
return new_ir, sub_irs
def _get_with_contextmanager(func_ir, blocks, blk_start):
"""Get the global object used for the context manager
"""
_illegal_cm_msg = "Illegal use of context-manager."
def get_var_dfn(var):
"""Get the definition given a variable"""
return func_ir.get_definition(var)
def get_ctxmgr_obj(var_ref):
"""Return the context-manager object and extra info.
The extra contains the arguments if the context-manager is used
as a call.
"""
# If the contextmanager used as a Call
dfn = func_ir.get_definition(var_ref)
if isinstance(dfn, ir.Expr) and dfn.op == 'call':
args = [get_var_dfn(x) for x in dfn.args]
kws = {k: get_var_dfn(v) for k, v in dfn.kws}
extra = {'args': args, 'kwargs': kws}
var_ref = dfn.func
else:
extra = None
ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref)
# check the contextmanager object
if ctxobj is ir.UNDEFINED:
raise errors.CompilerError(
"Undefined variable used as context manager",
loc=blocks[blk_start].loc,
)
if ctxobj is None:
raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc)
return ctxobj, extra
# Scan the start of the with-region for the contextmanager
for stmt in blocks[blk_start].body:
if isinstance(stmt, ir.EnterWith):
var_ref = stmt.contextmanager
ctxobj, extra = get_ctxmgr_obj(var_ref)
if not hasattr(ctxobj, 'mutate_with_body'):
raise errors.CompilerError(
"Unsupported context manager in use",
loc=blocks[blk_start].loc,
)
return ctxobj, extra
# No contextmanager found?
raise errors.CompilerError(
"malformed with-context usage",
loc=blocks[blk_start].loc,
)
def _legalize_with_head(blk):
"""Given *blk*, the head block of the with-context, check that it doesn't
do anything else.
"""
counters = defaultdict(int)
for stmt in blk.body:
counters[type(stmt)] += 1
if counters.pop(ir.EnterWith) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 ENTER_WITH",
loc=blk.loc,
)
if counters.pop(ir.Jump) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 JUMP",
loc=blk.loc,
)
# Can have any number of del
counters.pop(ir.Del, None)
# There MUST NOT be any other statements
if counters:
raise errors.CompilerError(
"illegal statements in with's head-block",
loc=blk.loc,
)
def _cfg_nodes_in_region(cfg, region_begin, region_end):
"""Find the set of CFG nodes that are in the given region
"""
region_nodes = set()
stack = [region_begin]
while stack:
tos = stack.pop()
succs, _ = zip(*cfg.successors(tos))
nodes = set([node for node in succs
if node not in region_nodes and
node != region_end])
stack.extend(nodes)
region_nodes |= nodes
return region_nodes
def _legalize_withs_cfg(withs, cfg, blocks):
"""Verify the CFG of the with-context(s).
"""
doms = cfg.dominators()
postdoms = cfg.post_dominators()
# Verify that the with-context has no side-exits
for s, e in withs:
loc = blocks[s].loc
if s not in doms[e]:
# Not sure what condition can trigger this error.
msg = "Entry of with-context not dominating the exit."
raise errors.CompilerError(msg, loc=loc)
if e not in postdoms[s]:
msg = (
"Does not support with-context that contain branches "
"(i.e. break/return/raise) that can leave the with-context. "
"Details: exit of with-context not post-dominating the entry. "
)
raise errors.CompilerError(msg, loc=loc)
def find_setupwiths(blocks):
"""Find all top-level with.
Returns a list of ranges for the with-regions.
"""
def find_ranges(blocks):
for blk in blocks.values():
for ew in blk.find_insts(ir.EnterWith):
yield ew.begin, ew.end
def previously_occurred(start, known_ranges):
for a, b in known_ranges:
if s >= a and s < b:
return True
return False
known_ranges = []
for s, e in sorted(find_ranges(blocks)):
if not previously_occurred(s, known_ranges):
if e not in blocks:
# this's possible if there's an exit path in the with-block
raise errors.CompilerError(
'unsupported controlflow due to return/raise '
'statements inside with block'
)
assert s in blocks, 'starting offset is not a label'
known_ranges.append((s, e))
return known_ranges
|
frappe/patches/v13_0/remove_web_view.py | chentaoz/frappe | 3,755 | 1463 | <filename>frappe/patches/v13_0/remove_web_view.py
import frappe
def execute():
frappe.delete_doc_if_exists("DocType", "Web View")
frappe.delete_doc_if_exists("DocType", "Web View Component")
frappe.delete_doc_if_exists("DocType", "CSS Class") |
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py | nipunjain099/AutoGuard | 147 | 1469 | import numpy as np
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
class OCROnObjects():
def __init__(self, license_plate):
character_objects = self.identify_boundary_objects(license_plate)
self.get_regions(character_objects, license_plate)
def identify_boundary_objects(self, a_license_plate):
labelImage = measure.label(a_license_plate)
character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
regionLists = regionprops(labelImage)
return regionLists
def get_regions(self, character_objects, a_license_plate):
"""
used to map out regions where the license plate charcters are
the principle of connected component analysis and labelling
were used
Parameters:
-----------
a_license_plate: 2D numpy binary image of the license plate
Returns:
--------
a dictionary containing the index
fullscale: 3D array containig 2D array of each character
columnsVal: 1D array the starting column of each character
coordinates:
"""
cord = []
counter=0
column_list = []
character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
for regions in character_objects:
minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox
character_height = maximumRow - minimumRow
character_width = maximumCol - minimumCol
roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol]
if character_height > minHeight and character_height < maxHeight and character_width > minWidth and character_width < maxWidth:
if counter == 0:
samples = resize(roi, (20,20))
cord.append(regions.bbox)
counter += 1
elif counter == 1:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
counter+=1
else:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
column_list.append(minimumCol)
if len(column_list) == 0:
self.candidates = {}
else:
self.candidates = {
'fullscale': samples,
'coordinates': np.array(cord),
'columnsVal': column_list
}
return self.candidates |
qiskit/ml/datasets/iris.py | stefan-woerner/aqua | 504 | 1480 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
iris dataset
"""
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from qiskit.aqua import MissingOptionalLibraryError
def iris(training_size, test_size, n, plot_data=False):
""" returns iris dataset """
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_iris(return_X_y=True)
sample_train, sample_test, label_train, label_test = \
train_test_split(data, target, test_size=1, random_state=42)
# Now we standardize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_test[label_test == k, :])[:test_size]
for k, key in enumerate(class_labels)}
if plot_data:
try:
import matplotlib.pyplot as plt
except ImportError as ex:
raise MissingOptionalLibraryError(
libname='Matplotlib',
name='iris',
pip_install='pip install matplotlib') from ex
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Iris dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
|
NAS/PaddleSlim/train_supernet.py | naviocean/SimpleCVReproduction | 923 | 1485 | from paddle.vision.transforms import (
ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose,
HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation
)
from paddle.vision.datasets import Cifar100
from paddle.io import DataLoader
from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup
import random
from resnet20 import *
import paddle
# supernet trainning 基于paddleslim模型压缩包
# https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star
from paddleslim.nas.ofa.convert_super import Convert, supernet
from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig
from paddleslim.nas.ofa.utils import utils
channel_list = []
for i in range(1, 21):
if 0 < i <= 7:
# channel_list.append(random.choice([ 4, 8, 12, 16]))
channel_list.append(16)
elif 7 < i <= 13:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32]))
channel_list.append(32)
elif 13 < i <= 19:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
else:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
net = ResNet20(100, channel_list)
net2 = ResNet20(100, channel_list)
net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams'))
channel_optional = []
for i in range(0, 23):
if i <= 7:
channel_optional.append([4, 8, 12, 16])
# channel_optional.append([12, 16])
elif 7 < i <= 14:
channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32])
# channel_optional.append([20, 24, 28, 32])
elif 14 < i <= 21:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
else:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
distill_config = DistillConfig(teacher_model=net2)
sp_net_config = supernet(channel=channel_optional)
sp_model = Convert(sp_net_config).convert(net)
ofa_net = OFA(sp_model, distill_config=distill_config)
ofa_net.set_task('channel')
model = paddle.Model(ofa_net)
MAX_EPOCH = 300
LR = 0.1
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
BATCH_SIZE = 128
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.1942, 0.1918, 0.1958]
DATA_FILE = './data/data76994/cifar-100-python.tar.gz'
model.prepare(
paddle.optimizer.Momentum(
learning_rate=LinearWarmup(
CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR),
momentum=MOMENTUM,
parameters=model.parameters(),
weight_decay=WEIGHT_DECAY),
CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
transforms = Compose([
RandomCrop(32, padding=4),
RandomApply(BrightnessTransform(0.1)),
RandomApply(ContrastTransform(0.1)),
RandomHorizontalFlip(),
RandomRotation(15),
ToArray(),
Normalize(CIFAR_MEAN, CIFAR_STD),
])
val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
train_set = Cifar100(DATA_FILE, mode='train', transform=transforms)
test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms)
callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')]
model.fit(
train_set,
test_set,
epochs=MAX_EPOCH,
batch_size=BATCH_SIZE,
save_dir='checkpoints',
save_freq=100,
shuffle=True,
num_workers=4,
verbose=1,
callbacks=callbacks,
)
|
chainer/_version.py | yumetov/chainer | 3,705 | 1520 | __version__ = '7.8.0'
_optional_dependencies = [
{
'name': 'CuPy',
'packages': [
'cupy-cuda120',
'cupy-cuda114',
'cupy-cuda113',
'cupy-cuda112',
'cupy-cuda111',
'cupy-cuda110',
'cupy-cuda102',
'cupy-cuda101',
'cupy-cuda100',
'cupy-cuda92',
'cupy-cuda91',
'cupy-cuda90',
'cupy-cuda80',
'cupy',
],
'specifier': '>=7.7.0,<8.0.0',
'help': 'https://docs.cupy.dev/en/latest/install.html',
},
{
'name': 'iDeep',
'packages': [
'ideep4py',
],
'specifier': '>=2.0.0.post3, <2.1',
'help': 'https://docs.chainer.org/en/latest/tips.html',
},
]
|
saleor/core/transactions.py | fairhopeweb/saleor | 15,337 | 1534 | from contextlib import contextmanager
from django.db import DatabaseError
from ..core.tracing import traced_atomic_transaction
@contextmanager
def transaction_with_commit_on_errors():
"""Perform transaction and raise an error in any occurred."""
error = None
with traced_atomic_transaction():
try:
yield
except DatabaseError:
raise
except Exception as e:
error = e
if error:
raise error
|
grr/server/grr_response_server/databases/db_yara_test_lib.py | khanhgithead/grr | 4,238 | 1538 | <gh_stars>1000+
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with test cases for the YARA database method."""
import os
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class DatabaseTestYaraMixin(object):
"""A mixin class for testing YARA methods of database implementations."""
def testWriteYaraSignatureReferenceIncorrectUsername(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
with self.assertRaises(db.UnknownGRRUserError) as context:
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="quux")
self.assertEqual(context.exception.username, "quux")
def testWriteYaraSignatureReferenceDuplicated(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
# Writing duplicated signatures is possible, it should not raise.
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
def testVerifyYaraSignatureReferenceSimple(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id))
def testVerifyYaraSignatureReferenceIncorrect(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))
|
tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py | gpspelle/pose-estimation | 862 | 1574 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
slim = tf.contrib.slim
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]:
tf.reset_default_graph()
_, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2)
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet.training_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
|
lib/modeling/VGG16.py | rsumner31/Detectron | 429 | 1591 | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""VGG16 from https://arxiv.org/abs/1409.1556."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.config import cfg
def add_VGG16_conv5_body(model):
model.Conv('data', 'conv1_1', 3, 64, 3, pad=1, stride=1)
model.Relu('conv1_1', 'conv1_1')
model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1, stride=1)
model.Relu('conv1_2', 'conv1_2')
model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2)
model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1, stride=1)
model.Relu('conv2_1', 'conv2_1')
model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1, stride=1)
model.Relu('conv2_2', 'conv2_2')
model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2)
model.StopGradient('pool2', 'pool2')
model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1, stride=1)
model.Relu('conv3_1', 'conv3_1')
model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1, stride=1)
model.Relu('conv3_2', 'conv3_2')
model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1, stride=1)
model.Relu('conv3_3', 'conv3_3')
model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2)
model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1, stride=1)
model.Relu('conv4_1', 'conv4_1')
model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1, stride=1)
model.Relu('conv4_2', 'conv4_2')
model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1, stride=1)
model.Relu('conv4_3', 'conv4_3')
model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2)
model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1, stride=1)
model.Relu('conv5_1', 'conv5_1')
model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1, stride=1)
model.Relu('conv5_2', 'conv5_2')
model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1, stride=1)
blob_out = model.Relu('conv5_3', 'conv5_3')
return blob_out, 512, 1. / 16.
def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale):
model.RoIFeatureTransform(
blob_in,
'pool5',
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=7,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
model.FC('pool5', 'fc6', dim_in * 7 * 7, 4096)
model.Relu('fc6', 'fc6')
model.FC('fc6', 'fc7', 4096, 4096)
blob_out = model.Relu('fc7', 'fc7')
return blob_out, 4096
|
setup.py | yangjing1127/xmind2testcase | 537 | 1592 | #!/usr/env/bin python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import setup, find_packages, Command
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom
exec(f.read(), about)
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
install_requires = [ # custom
"xmind",
"flask",
"arrow",
]
class PyPiCommand(Command):
""" Build and publish this package and make a tag.
Support: python setup.py pypi
Copied from requests_html
"""
user_options = []
@staticmethod
def status(s):
"""Prints things in green color."""
print('\033[0;32m{0}\033[0m'.format(s))
def initialize_options(self):
""" override
"""
pass
def finalize_options(self):
""" override
"""
pass
def run(self):
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
self.status('Publishing git tags...')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
try:
self.status('Removing current build artifacts...')
rmtree(os.path.join(here, 'dist'))
rmtree(os.path.join(here, 'build'))
rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom
except OSError:
pass
self.status('Congratulations! Upload PyPi and publish git tag successfully...')
sys.exit()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
long_description_content_type='text/markdown',
keywords=about['__keywords__'],
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom
package_data={ # custom
'': ['README.md'],
'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'],
},
install_requires=install_requires,
extras_require={},
python_requires='>=3.0, <4', # custom
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={ # custom
'console_scripts': [
'xmind2testcase=xmind2testcase.cli:cli_main',
]
},
cmdclass={
# python3 setup.py pypi
'pypi': PyPiCommand
}
)
|
AppServer/google/appengine/tools/devappserver2/login.py | loftwah/appscale | 790 | 1602 | <reponame>loftwah/appscale
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handles login/logout pages and dealing with user cookies.
Includes a WSGI application that serves the login page and handles login and
logout HTTP requests. It accepts these GET query parameters:
continue: URL to redirect to after a login or logout has completed.
email: Email address to set for the client.
admin: If 'True', the client should be logged in as an admin.
action: What action to take ('Login' or 'Logout').
To view the current user information and a form for logging in and out,
supply no parameters.
"""
import cgi
import Cookie
import hashlib
import logging
import os
import sha
import sys
import urllib
import uuid
import webapp2
app_dashboard_lib = '/../../../../../AppDashboard/lib'
sys.path.append(os.path.dirname(__file__) + app_dashboard_lib)
from app_dashboard_helper import AppDashboardHelper
# URL of the login page within the dev appserver.
LOGIN_URL_RELATIVE = '_ah/login'
# CGI parameter constants.
CONTINUE_PARAM = 'continue'
_EMAIL_PARAM = 'email'
_ADMIN_PARAM = 'admin'
ACTION_PARAM = 'action'
# Values for the action parameter.
LOGOUT_ACTION = 'logout'
LOGIN_ACTION = 'login'
# Name of the cookie that stores the user info.
_COOKIE_NAME = 'dev_appserver_login'
# Indicates that the user has admin access to all applications.
CLOUD_ADMIN_MARKER = 'CLOUD_ADMIN'
# The port that the AppDashboard serves HTTPS traffic on.
DASHBOARD_HTTPS_PORT = "1443"
def get_user_info(http_cookie, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from an HTTP Cookie header.
Args:
http_cookie: The value of the 'Cookie' HTTP request header.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
try:
cookie = Cookie.SimpleCookie(http_cookie)
except Cookie.CookieError:
return '', False, ''
cookie_dict = dict((k, v.value) for k, v in cookie.iteritems())
return _get_user_info_from_dict(cookie_dict, cookie_name)
def _get_user_info_from_dict(cookie_dict, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from a cookie dictionary.
Args:
cookie_dict: A dictionary mapping cookie names onto values.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
cookie_secret = os.environ['COOKIE_SECRET']
cookie_value = cookie_dict.get(cookie_name, '')
cookie_value = cookie_value.replace("%3A",":")
cookie_value = cookie_value.replace("%40",'@')
cookie_value = cookie_value.replace("%2C",",")
email, nickname, admin, hsh = (cookie_value.split(':') + ['', '', '', ''])[:4]
if email == '':
nickname = ''
admin = ''
return '', False, ''
else:
vhsh = sha.new(email+nickname+admin+cookie_secret).hexdigest()
if hsh != vhsh:
logging.info("{0} has an invalid cookie, so ignoring it.".format(email))
return '', False, ''
admin_apps = admin.split(',')
current_app = os.environ['APPLICATION_ID']
is_admin = current_app in admin_apps or CLOUD_ADMIN_MARKER in admin_apps
return email, is_admin, nickname
def _create_cookie_data(email, admin):
"""Creates cookie payload data.
Args:
email: The user's email address.
admin: True if the user is an admin; False otherwise.
Returns:
A string containing the cookie payload.
"""
if email:
user_id_digest = hashlib.md5(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
else:
user_id = ''
return '%s:%s:%s' % (email, admin, user_id)
def _set_user_info_cookie(email, admin, cookie_name=_COOKIE_NAME):
"""Creates a cookie to set the user information for the requestor.
Args:
email: The email to set for the user.
admin: True if the user should be admin; False otherwise.
cookie_name: The name of the cookie that stores the user info.
Returns:
Set-Cookie value for setting the user info of the requestor.
"""
cookie_value = _create_cookie_data(email, admin)
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = cookie_value
cookie[cookie_name]['path'] = '/'
return cookie[cookie_name].OutputString()
def _clear_user_info_cookie(cookie_name=_COOKIE_NAME):
"""Clears the user info cookie from the requestor, logging them out.
Args:
cookie_name: The name of the cookie that stores the user info.
Returns:
A Set-Cookie value for clearing the user info of the requestor.
"""
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['path'] = '/'
cookie[cookie_name]['max-age'] = '0'
if AppDashboardHelper.USE_SHIBBOLETH:
cookie[cookie_name]['domain'] = AppDashboardHelper.\
SHIBBOLETH_COOKIE_DOMAIN
return cookie[cookie_name].OutputString()
_LOGIN_TEMPLATE = """<html>
<head>
<title>Login</title>
</head>
<body>
<form method="get" action="%(login_url)s"
style="text-align:center; font: 13px sans-serif">
<div style="width: 20em; margin: 1em auto;
text-align:left;
padding: 0 2em 1.25em 2em;
background-color: #d6e9f8;
border: 2px solid #67a7e3">
<h3>%(login_message)s</h3>
<p style="padding: 0; margin: 0">
<label for="email" style="width: 3em">Email:</label>
<input name="email" type="email" value="%(email)s" id="email"/>
</p>
<p style="margin: .5em 0 0 3em; font-size:12px">
<input name="admin" type="checkbox" value="True"
%(admin_checked)s id="admin"/>
<label for="admin">Sign in as Administrator</label>
</p>
<p style="margin-left: 3em">
<input name="action" value="Login" type="submit"
id="submit-login" />
<input name="action" value="Logout" type="submit"
id="submit-logout" />
</p>
</div>
<input name="continue" type="hidden" value="%(continue_url)s"/>
</form>
</body>
</html>
"""
def _render_login_template(login_url, continue_url, email, admin):
"""Renders the login page.
Args:
login_url: The parameter to _login_response.
continue_url: The parameter to _login_response.
email: The email address of the current user, if any.
admin: True if the user is currently an admin; False otherwise.
Returns:
A string containing the contents of the login page.
"""
if email:
login_message = 'Logged in'
else:
login_message = 'Not logged in'
email = 'test\x40example.com'
admin_checked = 'checked' if admin else ''
template_dict = {
'email': cgi.escape(email, quote=True),
'admin_checked': admin_checked,
'login_message': login_message,
'login_url': cgi.escape(login_url, quote=True),
'continue_url': cgi.escape(continue_url, quote=True),
}
return _LOGIN_TEMPLATE % template_dict
def login_redirect(application_url, continue_url, start_response):
"""Writes a login redirection URL to a user.
This redirects to login_url with a continue parameter to return to
continue_url. The login_url should be on the canonical front-end server,
regardless of the host:port the user connected to.
Args:
application_url: The URL of the dev appserver domain
(e.g., 'http://localhost:8080').
continue_url: The URL to continue to after the user logs in.
start_response: A WSGI start_response function.
Returns:
An (empty) iterable over strings containing the body of the HTTP response.
"""
if AppDashboardHelper.USE_SHIBBOLETH:
redirect_url = '{0}:{1}/login?{2}={3}'.format(
AppDashboardHelper.SHIBBOLETH_CONNECTOR,
AppDashboardHelper.SHIBBOLETH_CONNECTOR_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url)
)
else:
hostname = os.environ['NGINX_HOST']
redirect_url = 'https://{0}:{1}/login?{2}={3}'.format(
hostname,
DASHBOARD_HTTPS_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url))
start_response('302 Requires login',
[('Location', redirect_url)])
return []
def fake_admin():
""" Generate the fake admin login secret
Returns:
A string containing the fake login secret
"""
return hashlib.sha1('{}/{}'.format(
os.environ.get('APPNAME', str(uuid.uuid4())),
os.environ.get('COOKIE_SECRET', str(uuid.uuid4())))).hexdigest()
class Handler(webapp2.RequestHandler):
"""The request handler for the login and logout pages."""
def get(self):
action = self.request.get(ACTION_PARAM)
set_email = self.request.get(_EMAIL_PARAM)
set_admin = self.request.get(_ADMIN_PARAM).lower() == 'true'
continue_url = self.request.get(CONTINUE_PARAM)
login_url = self.request.path_url
if action:
redirect_url = continue_url or login_url
# Perform the action.
if action.lower() == LOGOUT_ACTION.lower():
self.response.headers['Set-Cookie'] = _clear_user_info_cookie()
if AppDashboardHelper.USE_SHIBBOLETH:
redirect_url = AppDashboardHelper.SHIBBOLETH_LOGOUT_URL
elif action.lower() == LOGIN_ACTION.lower() and set_email:
self.response.headers['Set-Cookie'] = _set_user_info_cookie(set_email,
set_admin)
# URLs should be ASCII-only byte strings.
if isinstance(redirect_url, unicode):
redirect_url = redirect_url.encode('ascii')
# Redirect the user after performing the action.
self.response.status = 302
self.response.status_message = 'Redirecting to continue URL'
self.response.headers['Location'] = redirect_url
else:
# Send the user to the AppDashboard to log in before letting them view the
# specified URL.
if AppDashboardHelper.USE_SHIBBOLETH:
appscale_login_url = "{0}:{1}/login".format(
AppDashboardHelper.SHIBBOLETH_CONNECTOR, DASHBOARD_HTTPS_PORT)
else:
appscale_login_url = "https://{0}:{1}/login".format(
os.environ['NGINX_HOST'], DASHBOARD_HTTPS_PORT)
redirect_url = '{0}?{1}={2}'.format(appscale_login_url, CONTINUE_PARAM,
continue_url)
self.response.status = 302
self.response.status_message = 'Redirecting to login service URL'
self.response.headers['Location'] = redirect_url
application = webapp2.WSGIApplication([('/.*', Handler)], debug=True)
|
stanza/models/common/dropout.py | rasimuvaikas/stanza | 3,633 | 1615 | <reponame>rasimuvaikas/stanza
import torch
import torch.nn as nn
class WordDropout(nn.Module):
""" A word dropout layer that's designed for embedded inputs (e.g., any inputs to an LSTM layer).
Given a batch of embedded inputs, this layer randomly set some of them to be a replacement state.
Note that this layer assumes the last dimension of the input to be the hidden dimension of a unit.
"""
def __init__(self, dropprob):
super().__init__()
self.dropprob = dropprob
def forward(self, x, replacement=None):
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
masksize[-1] = 1
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, 0)
if replacement is not None:
res = res + dropmask.float() * replacement
return res
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class LockedDropout(nn.Module):
"""
A variant of dropout layer that consistently drops out the same parameters over time. Also known as the variational dropout.
This implementation was modified from the LockedDropout implementation in the flair library (https://github.com/zalandoresearch/flair).
"""
def __init__(self, dropprob, batch_first=True):
super().__init__()
self.dropprob = dropprob
self.batch_first = batch_first
def forward(self, x):
if not self.training or self.dropprob == 0:
return x
if not self.batch_first:
m = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
else:
m = x.new_empty(x.size(0), 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
mask = m.div(1 - self.dropprob).expand_as(x)
return mask * x
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class SequenceUnitDropout(nn.Module):
""" A unit dropout layer that's designed for input of sequence units (e.g., word sequence, char sequence, etc.).
Given a sequence of unit indices, this layer randomly set some of them to be a replacement id (usually set to be <UNK>).
"""
def __init__(self, dropprob, replacement_id):
super().__init__()
self.dropprob = dropprob
self.replacement_id = replacement_id
def forward(self, x):
""" :param: x must be a LongTensor of unit indices. """
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, self.replacement_id)
return res
def extra_repr(self):
return 'p={}, replacement_id={}'.format(self.dropprob, self.replacement_id)
|
k2/python/host/k2host/properties.py | Jarvan-Wang/k2 | 144 | 1631 | <reponame>Jarvan-Wang/k2<filename>k2/python/host/k2host/properties.py
# Copyright (c) 2020 Xiaomi Corporation (author: <NAME>)
# See ../../../LICENSE for clarification regarding multiple authors
import torch
from torch.utils.dlpack import to_dlpack
from .fsa import Fsa
from _k2host import _is_valid
from _k2host import _is_top_sorted
from _k2host import _is_arc_sorted
from _k2host import _has_self_loops
from _k2host import _is_acyclic
from _k2host import _is_deterministic
from _k2host import _is_epsilon_free
from _k2host import _is_connected
from _k2host import _is_empty
def is_valid(fsa: Fsa) -> bool:
return _is_valid(fsa.get_base())
def is_top_sorted(fsa: Fsa) -> bool:
return _is_top_sorted(fsa.get_base())
def is_arc_sorted(fsa: Fsa) -> bool:
return _is_arc_sorted(fsa.get_base())
def has_self_loops(fsa: Fsa) -> bool:
return _has_self_loops(fsa.get_base())
def is_acyclic(fsa: Fsa) -> bool:
return _is_acyclic(fsa.get_base())
def is_deterministic(fsa: Fsa) -> bool:
return _is_deterministic(fsa.get_base())
def is_epsilon_free(fsa: Fsa) -> bool:
return _is_epsilon_free(fsa.get_base())
def is_connected(fsa: Fsa) -> bool:
return _is_connected(fsa.get_base())
def is_empty(fsa: Fsa) -> bool:
return _is_empty(fsa.get_base())
|
server/www/packages/packages-windows/x86/ldap3/utils/asn1.py | zhoulhb/teleport | 640 | 1635 | """
"""
# Created on 2015.08.19
#
# Author: <NAME>
#
# Copyright 2015 - 2018 <NAME>
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from pyasn1 import __version__ as pyasn1_version
from pyasn1.codec.ber import decoder # for usage in other modules
from pyasn1.codec.ber.encoder import Encoder # for monkeypatching of boolean value
from ..core.results import RESULT_CODES
from ..utils.conv import to_unicode
from ..protocol.convert import referrals_to_list
CLASSES = {(False, False): 0, # Universal
(False, True): 1, # Application
(True, False): 2, # Context
(True, True): 3} # Private
# Monkeypatching of pyasn1 for encoding Boolean with the value 0xFF for TRUE
# THIS IS NOT PART OF THE FAST BER DECODER
if pyasn1_version == 'xxx0.2.3':
from pyasn1.codec.ber.encoder import tagMap, BooleanEncoder, encode
from pyasn1.type.univ import Boolean
from pyasn1.compat.octets import ints2octs
class BooleanCEREncoder(BooleanEncoder):
_true = ints2octs((255,))
tagMap[Boolean.tagSet] = BooleanCEREncoder()
else:
from pyasn1.codec.ber.encoder import tagMap, typeMap, AbstractItemEncoder
from pyasn1.type.univ import Boolean
from copy import deepcopy
class LDAPBooleanEncoder(AbstractItemEncoder):
supportIndefLenMode = False
if pyasn1_version <= '0.2.3':
from pyasn1.compat.octets import ints2octs
_true = ints2octs((255,))
_false = ints2octs((0,))
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value and self._true or self._false, 0
elif pyasn1_version <= '0.3.1':
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value and (255,) or (0,), False, False
elif pyasn1_version <= '0.3.4':
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return value and (255,) or (0,), False, False
elif pyasn1_version <= '0.3.7':
def encodeValue(self, value, encodeFun, **options):
return value and (255,) or (0,), False, False
else:
def encodeValue(self, value, asn1Spec, encodeFun, **options):
return value and (255,) or (0,), False, False
customTagMap = deepcopy(tagMap)
customTypeMap = deepcopy(typeMap)
customTagMap[Boolean.tagSet] = LDAPBooleanEncoder()
customTypeMap[Boolean.typeId] = LDAPBooleanEncoder()
encode = Encoder(customTagMap, customTypeMap)
# end of monkey patching
# a fast BER decoder for LDAP responses only
def compute_ber_size(data):
"""
Compute size according to BER definite length rules
Returns size of value and value offset
"""
if data[1] <= 127: # BER definite length - short form. Highest bit of byte 1 is 0, message length is in the last 7 bits - Value can be up to 127 bytes long
return data[1], 2
else: # BER definite length - long form. Highest bit of byte 1 is 1, last 7 bits counts the number of following octets containing the value length
bytes_length = data[1] - 128
value_length = 0
cont = bytes_length
for byte in data[2: 2 + bytes_length]:
cont -= 1
value_length += byte * (256 ** cont)
return value_length, bytes_length + 2
def decode_message_fast(message):
ber_len, ber_value_offset = compute_ber_size(get_bytes(message[:10])) # get start of sequence, at maximum 3 bytes for length
decoded = decode_sequence(message, ber_value_offset, ber_len + ber_value_offset, LDAP_MESSAGE_CONTEXT)
return {
'messageID': decoded[0][3],
'protocolOp': decoded[1][2],
'payload': decoded[1][3],
'controls': decoded[2][3] if len(decoded) == 3 else None
}
def decode_sequence(message, start, stop, context_decoders=None):
decoded = []
while start < stop:
octet = get_byte(message[start])
ber_class = CLASSES[(bool(octet & 0b10000000), bool(octet & 0b01000000))]
ber_constructed = bool(octet & 0b00100000)
ber_type = octet & 0b00011111
ber_decoder = DECODERS[(ber_class, octet & 0b00011111)] if ber_class < 2 else None
ber_len, ber_value_offset = compute_ber_size(get_bytes(message[start: start + 10]))
start += ber_value_offset
if ber_decoder:
value = ber_decoder(message, start, start + ber_len, context_decoders) # call value decode function
else:
# try:
value = context_decoders[ber_type](message, start, start + ber_len) # call value decode function for context class
# except KeyError:
# if ber_type == 3: # Referral in result
# value = decode_sequence(message, start, start + ber_len)
# else:
# raise # re-raise, should never happen
decoded.append((ber_class, ber_constructed, ber_type, value))
start += ber_len
return decoded
def decode_integer(message, start, stop, context_decoders=None):
first = message[start]
value = -1 if get_byte(first) & 0x80 else 0
for octet in message[start: stop]:
value = value << 8 | get_byte(octet)
return value
def decode_octet_string(message, start, stop, context_decoders=None):
return message[start: stop]
def decode_boolean(message, start, stop, context_decoders=None):
return False if message[start: stop] == 0 else True
def decode_bind_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, BIND_RESPONSE_CONTEXT)
def decode_extended_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, EXTENDED_RESPONSE_CONTEXT)
def decode_intermediate_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, INTERMEDIATE_RESPONSE_CONTEXT)
def decode_controls(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, CONTROLS_CONTEXT)
def ldap_result_to_dict_fast(response):
response_dict = dict()
response_dict['result'] = int(response[0][3]) # resultCode
response_dict['description'] = RESULT_CODES[response_dict['result']]
response_dict['dn'] = to_unicode(response[1][3], from_server=True) # matchedDN
response_dict['message'] = to_unicode(response[2][3], from_server=True) # diagnosticMessage
if len(response) == 4:
response_dict['referrals'] = referrals_to_list([to_unicode(referral[3], from_server=True) for referral in response[3][3]]) # referrals
else:
response_dict['referrals'] = None
return response_dict
######
if str is not bytes: # Python 3
def get_byte(x):
return x
def get_bytes(x):
return x
else: # Python 2
def get_byte(x):
return ord(x)
def get_bytes(x):
return bytearray(x)
DECODERS = {
# Universal
(0, 1): decode_boolean, # Boolean
(0, 2): decode_integer, # Integer
(0, 4): decode_octet_string, # Octet String
(0, 10): decode_integer, # Enumerated
(0, 16): decode_sequence, # Sequence
(0, 17): decode_sequence, # Set
# Application
(1, 1): decode_bind_response, # Bind response
(1, 4): decode_sequence, # Search result entry
(1, 5): decode_sequence, # Search result done
(1, 7): decode_sequence, # Modify response
(1, 9): decode_sequence, # Add response
(1, 11): decode_sequence, # Delete response
(1, 13): decode_sequence, # ModifyDN response
(1, 15): decode_sequence, # Compare response
(1, 19): decode_sequence, # Search result reference
(1, 24): decode_extended_response, # Extended response
(1, 25): decode_intermediate_response, # intermediate response
(2, 3): decode_octet_string #
}
BIND_RESPONSE_CONTEXT = {
7: decode_octet_string # SaslCredentials
}
EXTENDED_RESPONSE_CONTEXT = {
10: decode_octet_string, # ResponseName
11: decode_octet_string # Response Value
}
INTERMEDIATE_RESPONSE_CONTEXT = {
0: decode_octet_string, # IntermediateResponseName
1: decode_octet_string # IntermediateResponseValue
}
LDAP_MESSAGE_CONTEXT = {
0: decode_controls, # Controls
3: decode_sequence # Referral
}
CONTROLS_CONTEXT = {
0: decode_sequence # Control
}
|
iota/commands/core/get_node_info.py | EasonC13/iota.py | 347 | 1660 | import filters as f
from iota import TransactionHash, Address
from iota.commands import FilterCommand, RequestFilter, ResponseFilter
from iota.filters import Trytes
__all__ = [
'GetNodeInfoCommand',
]
class GetNodeInfoCommand(FilterCommand):
"""
Executes `getNodeInfo` command.
See :py:meth:`iota.api.StrictIota.get_node_info`.
"""
command = 'getNodeInfo'
def get_request_filter(self):
return GetNodeInfoRequestFilter()
def get_response_filter(self):
return GetNodeInfoResponseFilter()
class GetNodeInfoRequestFilter(RequestFilter):
def __init__(self) -> None:
# ``getNodeInfo`` does not accept any parameters.
# Using a filter here just to enforce that the request is empty.
super(GetNodeInfoRequestFilter, self).__init__({})
class GetNodeInfoResponseFilter(ResponseFilter):
def __init__(self) -> None:
super(GetNodeInfoResponseFilter, self).__init__({
'coordinatorAddress':
f.ByteString(encoding='ascii') | Trytes(Address),
'latestMilestone':
f.ByteString(encoding='ascii') | Trytes(TransactionHash),
'latestSolidSubtangleMilestone':
f.ByteString(encoding='ascii') | Trytes(TransactionHash),
})
|
apps/interface/settings/config.py | rainydaygit/testtcloudserver | 349 | 1670 | try:
from public_config import *
except ImportError:
pass
PORT = 9028
SERVICE_NAME = 'interface'
|
synapse/models/infotech.py | vertexproject/synapse | 216 | 1695 | import asyncio
import logging
import synapse.exc as s_exc
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
class Cpe23Str(s_types.Str):
'''
CPE 2.3 Formatted String
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf
(Section 6.2)
cpe:2.3: part : vendor : product : version : update : edition :
language : sw_edition : target_sw : target_hw : other
* = "any"
- = N/A
'''
def __init__(self, modl, name, info, opts):
opts['lower'] = True
s_types.Str.__init__(self, modl, name, info, opts)
def _splitCpe23(self, text):
part = ''
parts = []
genr = iter(text)
try:
while True:
c = next(genr)
if c == '\\':
c += next(genr)
if c == ':':
parts.append(part)
part = ''
continue
part += c
except StopIteration:
parts.append(part)
return parts
def _normPyStr(self, valu):
if not valu.startswith('cpe:2.3:'):
mesg = 'CPE 2.3 string is expected to start with "cpe:2.3:"'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
text, info = s_types.Str._normPyStr(self, valu)
parts = self._splitCpe23(text)
if len(parts) != 13:
mesg = f'CPE 2.3 string has {len(parts)} parts, expected 13.'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
subs = {
'part': parts[2],
'vendor': parts[3],
'product': parts[4],
'version': parts[5],
'update': parts[6],
'edition': parts[7],
'language': parts[8],
'sw_edition': parts[9],
'target_sw': parts[10],
'target_hw': parts[11],
'other': parts[12],
}
return ':'.join(parts), {'subs': subs}
class SemVer(s_types.Int):
'''
Provides support for parsing a semantic version string into its component
parts. This normalizes a version string into an integer to allow version
ordering. Prerelease information is disregarded for integer comparison
purposes, as we cannot map an arbitrary pre-release version into a integer
value
Major, minor and patch levels are represented as integers, with a max
width of 20 bits. The comparable integer value representing the semver
is the bitwise concatenation of the major, minor and patch levels.
Prerelease and build information will be parsed out and available as
strings if that information is present.
'''
def postTypeInit(self):
s_types.Int.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
def _normPyStr(self, valu):
valu = valu.strip()
if not valu:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='No text left after stripping whitespace')
subs = s_version.parseSemver(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Unable to parse string as a semver.')
valu = s_version.packVersion(subs.get('major'), subs.get('minor'), subs.get('patch'))
return valu, {'subs': subs}
def _normPyInt(self, valu):
if valu < 0:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a negative integer as a semver.')
if valu > s_version.mask60:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a integer larger than 1152921504606846975 as a semver.')
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.packVersion(major, minor, patch)
subs = {'major': major,
'minor': minor,
'patch': patch}
return valu, {'subs': subs}
def repr(self, valu):
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.fmtVersion(major, minor, patch)
return valu
loglevels = (
(10, 'debug'),
(20, 'info'),
(30, 'notice'),
(40, 'warning'),
(50, 'err'),
(60, 'crit'),
(70, 'alert'),
(80, 'emerg'),
)
class ItModule(s_module.CoreModule):
async def initCoreModule(self):
self.model.form('it:dev:str').onAdd(self._onFormItDevStr)
self.model.form('it:dev:pipe').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:mutex').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:regkey').onAdd(self._onFormMakeDevStr)
self.model.prop('it:prod:softver:arch').onSet(self._onPropSoftverArch)
self.model.prop('it:prod:softver:vers').onSet(self._onPropSoftverVers)
self.model.prop('it:prod:softver:software').onSet(self._onPropSoftverSoft)
def bruteVersionStr(self, valu):
'''
Brute force the version out of a string.
Args:
valu (str): String to attempt to get version information for.
Notes:
This first attempts to parse strings using the it:semver normalization
before attempting to extract version parts out of the string.
Returns:
int, dict: The system normalized version integer and a subs dictionary.
'''
try:
valu, info = self.core.model.type('it:semver').norm(valu)
subs = info.get('subs')
return valu, subs
except s_exc.BadTypeValu:
# Try doing version part extraction by noming through the string
subs = s_version.parseVersionParts(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr',
mesg='Unable to brute force version parts out of the string')
if subs:
valu = s_version.packVersion(subs.get('major'),
subs.get('minor', 0),
subs.get('patch', 0))
return valu, subs
async def _onFormItDevStr(self, node):
await node.set('norm', node.ndef[1])
async def _onFormMakeDevStr(self, node):
pprop = node.ndef[1]
await node.snap.addNode('it:dev:str', pprop)
async def _onPropSoftverSoft(self, node, oldv):
# Check to see if name is available and set it if possible
prop = node.get('software')
if prop:
opts = {'vars': {'soft': prop}}
nodes = await node.snap.nodes('it:prod:soft=$soft', opts=opts)
if nodes:
name = nodes[0].get('name')
if name:
await node.set('software:name', name)
async def _onPropSoftverArch(self, node, oldv):
# make it:dev:str for arch
prop = node.get('arch')
if prop:
await node.snap.addNode('it:dev:str', prop)
async def _onPropSoftverVers(self, node, oldv):
# Set vers:norm and make it's normed valu
prop = node.get('vers')
if not prop:
return
await node.set('vers:norm', prop)
# Make it:dev:str from version str
await node.snap.addNode('it:dev:str', prop)
# form the semver properly or bruteforce parts
try:
valu, subs = self.bruteVersionStr(prop)
await node.set('semver', valu)
for k, v in subs.items():
await node.set(f'semver:{k}', v)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception('Failed to brute force version string [%s]', prop)
def getModelDefs(self):
modl = {
'ctors': (
('it:semver', 'synapse.models.infotech.SemVer', {}, {
'doc': 'Semantic Version type.',
}),
('it:sec:cpe', 'synapse.models.infotech.Cpe23Str', {}, {
'doc': 'A NIST CPE 2.3 Formatted String',
}),
),
'types': (
('it:hostname', ('str', {'strip': True, 'lower': True}), {
'doc': 'The name of a host or system.',
}),
('it:host', ('guid', {}), {
'doc': 'A GUID that represents a host or system.'
}),
('it:log:event', ('guid', {}), {
'doc': 'A GUID representing an individual log event.',
'interfaces': ('it:host:activity',),
}),
('it:network', ('guid', {}), {
'doc': 'A GUID that represents a logical network.'
}),
('it:domain', ('guid', {}), {
'doc': 'A logical boundary of authentication and configuration such as a windows domain.'
}),
('it:account', ('guid', {}), {
'doc': 'A GUID that represents an account on a host or network.'
}),
('it:group', ('guid', {}), {
'doc': 'A GUID that represents a group on a host or network.'
}),
('it:logon', ('guid', {}), {
'doc': 'A GUID that represents an individual logon/logoff event.'
}),
('it:hosturl', ('comp', {'fields': (('host', 'it:host'), ('url', 'inet:url'))}), {
'doc': 'A url hosted on or served by a host or system.',
}),
('it:sec:cve', ('str', {'lower': True, 'regex': r'(?i)^CVE-[0-9]{4}-[0-9]{4,}$'}), {
'doc': 'A vulnerability as designated by a Common Vulnerabilities and Exposures (CVE) number.',
'ex': 'cve-2012-0158'
}),
('it:sec:cwe', ('str', {'regex': r'^CWE-[0-9]{1,8}$'}), {
'doc': 'NIST NVD Common Weaknesses Enumeration Specification',
'ex': 'CWE-120',
}),
('it:mitre:attack:status', ('str', {'enums': 'current,deprecated,withdrawn'}), {
'doc': 'A Mitre ATT&CK element status.',
'ex': 'current',
}),
('it:mitre:attack:group', ('str', {'regex': r'^G[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Group ID.',
'ex': 'G0100',
}),
('it:mitre:attack:tactic', ('str', {'regex': r'^TA[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Tactic ID.',
'ex': 'TA0040',
}),
('it:mitre:attack:technique', ('str', {'regex': r'^T[0-9]{4}(.[0-9]{3})?$'}), {
'doc': 'A Mitre ATT&CK Technique ID.',
'ex': 'T1548',
}),
('it:mitre:attack:mitigation', ('str', {'regex': r'^M[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Mitigation ID.',
'ex': 'M1036',
}),
('it:mitre:attack:software', ('str', {'regex': r'^S[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Software ID.',
'ex': 'S0154',
}),
('it:dev:str', ('str', {}), {
'doc': 'A developer-selected string.'
}),
('it:dev:pipe', ('str', {}), {
'doc': 'A string representing a named pipe.',
}),
('it:dev:mutex', ('str', {}), {
'doc': 'A string representing a mutex.',
}),
('it:dev:int', ('int', {}), {
'doc': 'A developer selected integer constant.',
}),
('it:dev:regkey', ('str', {}), {
'doc': 'A Windows registry key.',
'ex': 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run',
}),
('it:dev:regval', ('guid', {}), {
'doc': 'A Windows registry key/value pair.',
}),
('it:prod:soft', ('guid', {}), {
'doc': 'A arbitrary, unversioned software product.',
}),
('it:adid', ('str', {'lower': True, 'strip': True}), {
'doc': 'An advertising identification string.'}),
('it:os:windows:sid', ('str', {'regex': r'^S-1-[0-59]-\d{2}-\d{8,10}-\d{8,10}-\d{8,10}-[1-9]\d{3}$'}), {
'doc': 'A Microsoft Windows Security Identifier.',
'ex': 'S-1-5-21-1220945662-1202665555-839525555-5555',
}),
('it:os:ios:idfa', ('it:adid', {}), {
'doc': 'An iOS advertising identification string.'}),
('it:os:android:aaid', ('it:adid', {}), {
'doc': 'An android advertising identification string.'}),
('it:os:android:perm', ('str', {}), {
'doc': 'An android permission string.'}),
('it:os:android:intent', ('str', {}), {
'doc': 'An android intent string.'}),
('it:os:android:reqperm', ('comp', {'fields': (
('app', 'it:prod:soft'),
('perm', 'it:os:android:perm'))}), {
'doc': 'The given software requests the android permission.'}),
('it:os:android:ilisten', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent'))}), {
'doc': 'The given software listens for an android intent.'}),
('it:os:android:ibroadcast', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent')
)}), {
'doc': 'The given software broadcasts the given Android intent.'}),
('it:prod:softver', ('guid', {}), {
'doc': 'A specific version of a software product.'}),
('it:prod:softfile', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('file', 'file:bytes'))}), {
'doc': 'A file is distributed by a specific software version.'}),
('it:prod:softlib', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('lib', 'it:prod:softver'))}), {
'doc': 'A software version contains a library software version.'}),
('it:prod:softos', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('os', 'it:prod:softver'))}), {
'doc': 'The software version is known to be compatible with the given os software version.'}),
('it:hostsoft', ('comp', {'fields': (('host', 'it:host'), ('softver', 'it:prod:softver'))}), {
'doc': 'A version of a software product which is present on a given host.',
}),
('it:av:sig', ('comp', {'fields': (('soft', 'it:prod:soft'), ('name', ('str', {'lower': True})))}), {
'doc': 'A signature name within the namespace of an antivirus engine name.'
}),
('it:av:filehit', ('comp', {'fields': (('file', 'file:bytes'), ('sig', 'it:av:sig'))}), {
'doc': 'A file that triggered an alert on a specific antivirus signature.',
}),
('it:av:prochit', ('guid', {}), {
'doc': 'An instance of a process triggering an alert on a specific antivirus signature.'
}),
('it:auth:passwdhash', ('guid', {}), {
'doc': 'An instance of a password hash.',
}),
('it:exec:proc', ('guid', {}), {
'doc': 'A process executing on a host. May be an actual (e.g., endpoint) or virtual (e.g., malware sandbox) host.',
}),
('it:exec:thread', ('guid', {}), {
'doc': 'A thread executing in a process.',
}),
('it:exec:loadlib', ('guid', {}), {
'doc': 'A library load event in a process.',
}),
('it:exec:mmap', ('guid', {}), {
'doc': 'A memory mapped segment located in a process.',
}),
('it:cmd', ('str', {'strip': True}), {
'doc': 'A unique command-line string.',
'ex': 'foo.exe --dostuff bar',
}),
('it:exec:mutex', ('guid', {}), {
'doc': 'A mutex created by a process at runtime.',
}),
('it:exec:pipe', ('guid', {}), {
'doc': 'A named pipe created by a process at runtime.',
}),
('it:exec:url', ('guid', {}), {
'doc': 'An instance of a host requesting a URL.',
}),
('it:exec:bind', ('guid', {}), {
'doc': 'An instance of a host binding a listening port.',
}),
('it:fs:file', ('guid', {}), {
'doc': 'A file on a host.'
}),
('it:exec:file:add', ('guid', {}), {
'doc': 'An instance of a host adding a file to a filesystem.',
}),
('it:exec:file:del', ('guid', {}), {
'doc': 'An instance of a host deleting a file from a filesystem.',
}),
('it:exec:file:read', ('guid', {}), {
'doc': 'An instance of a host reading a file from a filesystem.',
}),
('it:exec:file:write', ('guid', {}), {
'doc': 'An instance of a host writing a file to a filesystem.',
}),
('it:exec:reg:get', ('guid', {}), {
'doc': 'An instance of a host getting a registry key.',
}),
('it:exec:reg:set', ('guid', {}), {
'doc': 'An instance of a host creating or setting a registry key.',
}),
('it:exec:reg:del', ('guid', {}), {
'doc': 'An instance of a host deleting a registry key.',
}),
('it:app:yara:rule', ('guid', {}), {
'doc': 'A YARA rule unique identifier.',
}),
('it:app:yara:match', ('comp', {'fields': (('rule', 'it:app:yara:rule'), ('file', 'file:bytes'))}), {
'doc': 'A YARA rule match to a file.',
}),
('it:app:yara:procmatch', ('guid', {}), {
'doc': 'An instance of a YARA rule match to a process.',
}),
('it:app:snort:rule', ('guid', {}), {
'doc': 'A snort rule unique identifier.',
}),
('it:app:snort:hit', ('guid', {}), {
'doc': 'An instance of a snort rule hit.',
}),
('it:reveng:function', ('guid', {}), {
'doc': 'A function inside an executable.',
}),
('it:reveng:filefunc', ('comp', {'fields': (('file', 'file:bytes'), ('function', 'it:reveng:function'))}), {
'doc': 'An instance of a function in an executable.',
}),
('it:reveng:funcstr', ('comp', {'fields': (('function', 'it:reveng:function'), ('string', 'str'))}), {
'deprecated': True,
'doc': 'A reference to a string inside a function.',
}),
('it:reveng:impfunc', ('str', {'lower': 1}), {
'doc': 'A function from an imported library.',
}),
),
'interfaces': (
('it:host:activity', {
'props': (
('exe', ('file:bytes', {}), {
'doc': 'The executable file which caused the activity.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The host process which caused the activity.'}),
('thread', ('it:exec:thread', {}), {
'doc': 'The host thread which caused the activity.'}),
('host', ('it:host', {}), {
'doc': 'The host on which the activity occurred.'}),
('time', ('time', {}), {
'doc': 'The time that the activity started.'}),
),
}),
),
'forms': (
('it:hostname', {}, ()),
('it:host', {}, (
('name', ('it:hostname', {}), {
'doc': 'The name of the host or system.',
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the host.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain that the host is a member of.',
}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The last known ipv4 address for the host.'
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known location for the host.'
}),
('place', ('geo:place', {}), {
'doc': 'The place where the host resides.',
}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.',
}),
('os', ('it:prod:softver', {}), {
'doc': 'The operating system of the host.'
}),
('manu', ('str', {}), {
'doc': 'The manufacturer of the host.',
}),
('model', ('str', {}), {
'doc': 'The product model of the host.',
}),
('serial', ('str', {}), {
'doc': 'The serial number of the host.',
}),
('operator', ('ps:contact', {}), {
'doc': 'The operator of the host.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given host.',
}),
)),
('it:log:event', {}, (
('mesg', ('str', {}), {
'doc': 'The log messsage text.',
}),
('severity', ('int', {'enums': loglevels}), {
'doc': 'A log level integer that increases with severity.',
}),
('data', ('data', {}), {
'doc': 'A raw JSON record of the log event.',
}),
)),
('it:domain', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the domain.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the domain.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given domain.',
}),
)),
('it:network', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the network.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the network.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that owns/operates the network.',
}),
('net4', ('inet:net4', {}), {
'doc': 'The optional contiguous IPv4 address range of this network.',
}),
('net6', ('inet:net6', {}), {
'doc': 'The optional contiguous IPv6 address range of this network.',
}),
)),
('it:account', {}, (
('user', ('inet:user', {}), {
'doc': 'The username associated with the account',
}),
('contact', ('ps:contact', {}), {
'doc': 'Additional contact information associated with this account.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the account is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the account is registered.',
}),
('posix:uid', ('int', {}), {
'doc': 'The user ID of the account.',
'ex': '1001',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('posix:gecos', ('int', {}), {
'doc': 'The GECOS field for the POSIX account.',
}),
('posix:home', ('file:path', {}), {
'doc': "The path to the POSIX account's home directory.",
'ex': '/home/visi',
}),
('posix:shell', ('file:path', {}), {
'doc': "The path to the POSIX account's default shell.",
'ex': '/bin/bash',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the account.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'An array of groups that the account is a member of.',
}),
)),
('it:group', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the group.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the group.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the group is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the group is registered.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'Groups that are a member of this group.',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the group.',
}),
)),
('it:logon', {}, (
('time', ('time', {}), {
'doc': 'The time the logon occured.',
}),
('success', ('bool', {}), {
'doc': 'Set to false to indicate an unsuccessful logon attempt.',
}),
('logoff:time', ('time', {}), {
'doc': 'The time the logon session ended.',
}),
('host', ('it:host', {}), {
'doc': 'The host that the account logged in to.',
}),
('account', ('it:account', {}), {
'doc': 'The account that logged in.',
}),
('creds', ('auth:creds', {}), {
'doc': 'The credentials that were used for the logon.',
}),
('duration', ('duration', {}), {
'doc': 'The duration of the logon session.',
}),
('client:host', ('it:host', {}), {
'doc': 'The host where the logon originated.',
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 where the logon originated.',
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 where the logon originated.',
}),
)),
('it:hosturl', {}, (
('host', ('it:host', {}), {
'ro': True,
'doc': 'Host serving a url.',
}),
('url', ('inet:url', {}), {
'ro': True,
'doc': 'URL available on the host.',
}),
)),
('it:dev:str', {}, (
('norm', ('str', {'lower': True}), {
'doc': 'Lower case normalized version of the it:dev:str.',
}),
)),
('it:sec:cve', {}, (
('desc', ('str', {}), {
'doc': 'A free-form description of the CVE vulnerability.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CVE to a full description.',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the CVE ID.',
}),
)),
('it:sec:cpe', {}, (
('part', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "part" field from the CPE 2.3 string.'}),
('vendor', ('ou:name', {}), {
'ro': True,
'doc': 'The "vendor" field from the CPE 2.3 string.'}),
('product', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "product" field from the CPE 2.3 string.'}),
('version', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "version" field from the CPE 2.3 string.'}),
('update', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "update" field from the CPE 2.3 string.'}),
('edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "edition" field from the CPE 2.3 string.'}),
('language', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "language" field from the CPE 2.3 string.'}),
('sw_edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "sw_edition" field from the CPE 2.3 string.'}),
('target_sw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_sw" field from the CPE 2.3 string.'}),
('target_hw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_hw" field from the CPE 2.3 string.'}),
('other', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "other" field from the CPE 2.3 string.'}),
)),
('it:sec:cwe', {}, (
('name', ('str', {}), {
'doc': 'The CWE description field.',
'ex': 'Buffer Copy without Checking Size of Input (Classic Buffer Overflow)',
}),
('desc', ('str', {}), {
'doc': 'The CWE description field.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CWE to a full description.',
}),
('parents', ('array', {'type': 'it:sec:cwe',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ChildOf CWE Relationships.'
}),
)),
('it:mitre:attack:group', {}, (
('org', ('ou:org', {}), {
'doc': 'Used to map an ATT&CK group to a synapse ou:org.',
}),
('name', ('ou:name', {}), {
'doc': 'The primary name for the ATT&CK group.',
}),
('names', ('array', {'type': 'ou:name', 'uniq': True, 'sorted': True}), {
'doc': 'An array of alternate names for the ATT&CK group.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK group.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK group.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK group ID.',
'ex': 'cno.mitre.g0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK group.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs used by the group.',
}),
('software', ('array', {'type': 'it:mitre:attack:software',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK software IDs used by the group.',
}),
)),
('it:mitre:attack:tactic', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK tactic.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK tactic.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK tactic.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK tactic.',
'ex': 'cno.mitre.ta0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK tactic.',
}),
)),
('it:mitre:attack:technique', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK technique.',
}),
('status', ('it:mitre:attack:status', {}), {
'doc': 'The status of this ATT&CK technique.',
}),
('isnow', ('it:mitre:attack:technique', {}), {
'doc': 'If deprecated, this field may contain the current value for the technique.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK technique.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK technique.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK technique.',
'ex': 'cno.mitre.t0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK technique.',
}),
('parent', ('it:mitre:attack:technique', {}), {
'doc': 'The parent ATT&CK technique on this sub-technique.',
}),
('tactics', ('array', {'type': 'it:mitre:attack:tactic',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK tactics that include this technique.',
}),
)),
('it:mitre:attack:software', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Used to map an ATT&CK software to a synapse it:prod:soft.',
}),
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK software.',
}),
('names', ('array', {'type': 'str', 'uniq': True, 'sorted': True}), {
'doc': 'Associated names for the ATT&CK software.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK software.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK software.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK software.',
'ex': 'cno.mitre.s0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK software.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of techniques used by the software.',
}),
)),
('it:mitre:attack:mitigation', {}, (
# TODO map to an eventual risk:mitigation
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK mitigation.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK mitigation.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK mitigation.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK mitigation.',
'ex': 'cno.mitre.m0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK mitigation.',
}),
('addresses', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs addressed by the mitigation.',
}),
)),
('it:dev:int', {}, ()),
('it:dev:pipe', {}, ()),
('it:dev:mutex', {}, ()),
('it:dev:regkey', {}, ()),
('it:dev:regval', {}, (
('key', ('it:dev:regkey', {}), {
'doc': 'The Windows registry key.',
}),
('str', ('it:dev:str', {}), {
'doc': 'The value of the registry key, if the value is a string.',
}),
('int', ('it:dev:int', {}), {
'doc': 'The value of the registry key, if the value is an integer.',
}),
('bytes', ('file:bytes', {}), {
'doc': 'The file representing the value of the registry key, if the value is binary data.',
}),
)),
('it:prod:soft', {}, (
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'Name of the software.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software.',
}),
('desc', ('str', {}), {
'doc': 'A description of the software.',
'disp': {'hint': 'text'},
}),
('desc:short', ('str', {'lower': True}), {
'doc': 'A short description of the software.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software.',
}),
('author', ('ps:contact', {}), {
'doc': 'The contact information of the org or person who authored the software.',
}),
('author:org', ('ou:org', {}), {
'deprecated': True,
'doc': 'Organization which authored the software.',
}),
('author:acct', ('inet:web:acct', {}), {
'deprecated': True,
'doc': 'Web account of the software author.',
}),
('author:email', ('inet:email', {}), {
'deprecated': True,
'doc': 'Email address of the sofware author.',
}),
('author:person', ('ps:person', {}), {
'deprecated': True,
'doc': 'Person who authored the software.',
}),
('url', ('inet:url', {}), {
'doc': 'URL relevant for the software.',
}),
('isos', ('bool', {}), {
'doc': 'Set to True if the software is an operating system.'}),
('islib', ('bool', {}), {
'doc': 'Set to True if the software is a library.'}),
)),
('it:adid', {}, ()),
('it:os:ios:idfa', {}, ()),
('it:os:android:aaid', {}, ()),
('it:os:android:perm', {}, ()),
('it:os:android:intent', {}, ()),
('it:os:android:reqperm', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The android app which requests the permission.'}),
('perm', ('it:os:android:perm', {}), {'ro': True,
'doc': 'The android permission requested by the app.'}),
)),
('it:prod:softos', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which can run on the operating system.'}),
('os', ('it:prod:softver', {}), {'ro': True,
'doc': 'The operating system which the software can run on.'}),
)),
('it:os:android:ilisten', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which listens for the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is listened for by the app.'}),
)),
('it:os:android:ibroadcast', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which broadcasts the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is broadcast by the app.'}),
)),
('it:prod:softver', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Software associated with this version instance.',
}),
('software:name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name of the software at a particular version.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software version.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software version',
}),
('cves', ('array', {'type': 'it:sec:cve', 'uniq': True, 'sorted': True}), {
'doc': 'A list of CVEs that apply to this software version.',
}),
('vers', ('it:dev:str', {}), {
'doc': 'Version string associated with this version instance.',
}),
('vers:norm', ('str', {'lower': True}), {
'doc': 'Normalized version of the version string.',
}),
('arch', ('it:dev:str', {}), {
'doc': 'Software architecture.',
}),
('released', ('time', {}), {
'doc': 'Timestamp for when this version of the software was released.',
}),
('semver', ('it:semver', {}), {
'doc': 'System normalized semantic version number.',
}),
('semver:major', ('int', {}), {
'doc': 'Version major number.',
}),
('semver:minor', ('int', {}), {
'doc': 'Version minor number.',
}),
('semver:patch', ('int', {}), {
'doc': 'Version patch number.',
}),
('semver:pre', ('str', {}), {
'doc': 'Semver prerelease string.',
}),
('semver:build', ('str', {}), {
'doc': 'Semver build string.',
}),
('url', ('inet:url', {}), {
'doc': 'URL where a specific version of the software is available from.',
}),
)),
('it:prod:softlib', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software version that contains the library.'}),
('lib', ('it:prod:softver', {}), {'ro': True,
'doc': 'The library software version.'}),
)),
('it:prod:softfile', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which distributes the file.'}),
('file', ('file:bytes', {}), {'ro': True,
'doc': 'The file distributed by the software.'}),
('path', ('file:path', {}), {
'doc': 'The default installation path of the file.'}),
)),
('it:hostsoft', {}, (
('host', ('it:host', {}), {'ro': True,
'doc': 'Host with the software.'}),
('softver', ('it:prod:softver', {}), {'ro': True,
'doc': 'Software on the host.'})
)),
('it:av:sig', {}, (
('soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
('name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.'
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the signature.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A reference URL for information about the signature.',
})
)),
('it:av:filehit', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'ro': True,
'doc': 'The signature that the file triggered on.'
}),
('sig:name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.',
}),
('sig:soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
)),
('it:av:prochit', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'doc': 'The signature that the file triggered on.'
}),
('time', ('time', {}), {
'doc': 'The time that the AV engine detected the signature.'
}),
)),
('it:auth:passwdhash', {}, (
('salt', ('hex', {}), {
'doc': 'The (optional) hex encoded salt value used to calculate the password hash.',
}),
('hash:md5', ('hash:md5', {}), {
'doc': 'The MD5 password hash value.',
}),
('hash:sha1', ('hash:sha1', {}), {
'doc': 'The SHA1 password hash value.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'The SHA256 password hash value.',
}),
('hash:sha512', ('hash:sha512', {}), {
'doc': 'The SHA512 password hash value.',
}),
('hash:lm', ('hash:lm', {}), {
'doc': 'The LM password hash value.',
}),
('hash:ntlm', ('hash:ntlm', {}), {
'doc': 'The NTLM password hash value.',
}),
('passwd', ('inet:passwd', {}), {
'doc': 'The (optional) clear text password for this password hash.',
}),
)),
('it:cmd', {}, ()),
('it:exec:proc', {}, (
('host', ('it:host', {}), {
'doc': 'The host that executed the process. May be an actual or a virtual / notional host.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The file considered the "main" executable for the process. For example, rundll32.exe may be considered the "main" executable for DLLs loaded by that program.',
}),
('cmd', ('it:cmd', {}), {
'doc': 'The command string used to launch the process, including any command line parameters.',
'disp': {'hint': 'text'},
}),
('pid', ('int', {}), {
'doc': 'The process ID.',
}),
('time', ('time', {}), {
'doc': 'The start time for the process.',
}),
('exited', ('time', {}), {
'doc': 'The time the process exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code for the process.',
}),
('user', ('inet:user', {}), {
'doc': 'The user name of the process owner.',
}),
('path', ('file:path', {}), {
'doc': 'The path to the executable of the process.',
}),
('src:exe', ('file:path', {}), {
'doc': 'The path to the executable which started the process.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'The process which created the process.'
}),
('killedby', ('it:exec:proc', {}), {
'doc': 'The process which killed this process.',
}),
)),
('it:exec:thread', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process which contains the thread.',
}),
('created', ('time', {}), {
'doc': 'The time the thread was created.',
}),
('exited', ('time', {}), {
'doc': 'The time the thread exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code or return value for the thread.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'An external process which created the thread.',
}),
('src:thread', ('it:exec:thread', {}), {
'doc': 'The thread which created this thread.',
}),
)),
('it:exec:loadlib', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the library was loaded.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the library was loaded in the process.',
}),
('loaded', ('time', {}), {
'doc': 'The time the library was loaded.',
}),
('unloaded', ('time', {}), {
'doc': 'The time the library was unloaded.',
}),
('path', ('file:path', {}), {
'doc': 'The path that the library was loaded from.',
}),
('file', ('file:bytes', {}), {
'doc': 'The library file that was loaded.',
}),
)),
('it:exec:mmap', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the memory was mapped.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the map was created in the process.',
}),
('size', ('int', {}), {
'doc': 'The size of the memory map in bytes.',
}),
('perms:read', ('bool', {}), {
'doc': 'True if the mmap is mapped with read permissions.',
}),
('perms:write', ('bool', {}), {
'doc': 'True if the mmap is mapped with write permissions.',
}),
('perms:execute', ('bool', {}), {
'doc': 'True if the mmap is mapped with execute permissions.',
}),
('created', ('time', {}), {
'doc': 'The time the memory map was created.',
}),
('deleted', ('time', {}), {
'doc': 'The time the memory map was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The file path if the mmap is a mapped view of a file.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'A SHA256 hash of the memory map. Bytes may optionally be present in the axon.',
}),
)),
('it:exec:mutex', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the mutex.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the mutex. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the mutex. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the mutex was created.',
}),
('name', ('it:dev:mutex', {}), {
'doc': 'The mutex string.',
}),
)),
('it:exec:pipe', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the named pipe.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the named pipe. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the named pipe. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the named pipe was created.',
}),
('name', ('it:dev:pipe', {}), {
'doc': 'The named pipe string.',
}),
)),
('it:exec:url', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that requested the URL.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that requested the URL. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that requested the URL. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the URL was requested.',
}),
('url', ('inet:url', {}), {
'doc': 'The URL that was requested.',
}),
('client', ('inet:client', {}), {
'doc': 'The address of the client during the URL retrieval.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 of the client during the URL retrieval..'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 of the client during the URL retrieval..'
}),
('client:port', ('inet:port', {}), {
'doc': 'The client port during the URL retrieval..'
}),
)),
('it:exec:bind', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that bound the listening port.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that bound the listening port. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that bound the listening port. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the port was bound.',
}),
('server', ('inet:server', {}), {
'doc': 'The inet:addr of the server when binding the port.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address specified to bind().'
}),
('server:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address specified to bind().'
}),
('server:port', ('inet:port', {}), {
'doc': 'The bound (listening) TCP port.'
}),
)),
('it:fs:file', {}, (
('host', ('it:host', {}), {
'doc': 'The host containing the file.',
}),
('path', ('file:path', {}), {
'doc': 'The path for the file.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file on the host.',
}),
('ctime', ('time', {}), {
'doc': 'The file creation time.',
}),
('mtime', ('time', {}), {
'doc': 'The file modification time.',
}),
('atime', ('time', {}), {
'doc': 'The file access time.',
}),
('user', ('inet:user', {}), {
'doc': 'The owner of the file.',
}),
('group', ('inet:user', {}), {
'doc': 'The group owner of the file.',
}),
)),
('it:exec:file:add', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the new file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the new file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the new file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was created.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was created.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was created.',
}),
)),
('it:exec:file:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was deleted.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was deleted.',
}),
)),
('it:exec:file:read', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was read.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was read.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was read.',
}),
)),
('it:exec:file:write', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to / modified the existing file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was written to/modified.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was written to/modified.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was modified.',
}),
)),
('it:exec:reg:get', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was read.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was read.',
}),
)),
('it:exec:reg:set', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was written to.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was written to.',
}),
)),
('it:exec:reg:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted data from the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted data from the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted data from the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the data from the registry was deleted.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was deleted.',
}),
)),
('it:app:snort:rule', {}, (
('text', ('str', {}), {
'doc': 'The snort rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the snort rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
)),
('it:app:snort:hit', {}, (
('rule', ('it:app:snort:rule', {}), {
'doc': 'The snort rule that matched the file.'}),
('flow', ('inet:flow', {}), {
'doc': 'The inet:flow that matched the snort rule.'}),
('src', ('inet:addr', {}), {
'doc': 'The source address of flow that caused the hit.'}),
('src:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the flow that caused the hit.'}),
('src:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the flow that caused the hit.'}),
('src:port', ('inet:port', {}), {
'doc': 'The source port of the flow that caused the hit.'}),
('dst', ('inet:addr', {}), {
'doc': 'The destination address of the trigger.'}),
('dst:ipv4', ('inet:ipv4', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:ipv6', ('inet:ipv6', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:port', ('inet:port', {}), {
'doc': 'The destination port of the flow that caused the hit.'}),
('time', ('time', {}), {
'doc': 'The time of the network flow that caused the hit.'}),
('sensor', ('it:host', {}), {
'doc': 'The sensor host node that produced the hit.'}),
('version', ('it:semver', {}), {
'doc': 'The version of the rule at the time of match.'}),
)),
('it:app:yara:rule', {}, (
('text', ('str', {}), {
'doc': 'The YARA rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the YARA rule.'}),
('author', ('ps:contact', {}), {
'doc': 'Contact info for the author of the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
('enabled', ('bool', {}), {
'doc': 'The rule enabled status to be used for YARA evaluation engines.'}),
)),
('it:app:yara:match', {}, (
('rule', ('it:app:yara:rule', {}), {
'ro': True,
'doc': 'The YARA rule that matched the file.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that matched the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:app:yara:procmatch', {}, (
('rule', ('it:app:yara:rule', {}), {
'doc': 'The YARA rule that matched the file.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The process that matched the YARA rule.'}),
('time', ('time', {}), {
'doc': 'The time that the YARA engine matched the process to the rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:reveng:function', {}, (
('name', ('str', {}), {
'doc': 'The name of the function.'}),
('description', ('str', {}), {
'doc': 'Notes concerning the function.'}),
('impcalls', ('array', {'type': 'it:reveng:impfunc'}), {
'doc': 'Calls to imported library functions within the scope of the function.',
}),
('strings', ('array', {'type': 'it:dev:str', 'uniq': True}), {
'doc': 'An array of strings referenced within the function.',
}),
)),
('it:reveng:filefunc', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that contains the function.'}),
('va', ('int', {}), {
'doc': 'The virtual address of the first codeblock of the function.'}),
('rank', ('int', {}), {
'doc': 'The function rank score used to evaluate if it exhibits interesting behavior.'}),
('complexity', ('int', {}), {
'doc': 'The complexity of the function.'}),
('funccalls', ('array', {'type': 'it:reveng:filefunc'}), {
'doc': 'Other function calls within the scope of the function.',
}),
)),
('it:reveng:funcstr', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('string', ('str', {}), {
'ro': True,
'doc': 'The string that the function references.'}),
)),
('it:reveng:impfunc', {}, ()),
),
}
name = 'it'
return ((name, modl), )
|
tests/sources/test_clang_format.py | Justin-Fisher/webots | 1,561 | 1697 | #!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the C, C++ and shader source code is compliant with ClangFormat."""
import unittest
import difflib
import os
import subprocess
from io import open
from distutils.spawn import find_executable
class TestClangFormat(unittest.TestCase):
"""Unit test for ClangFormat compliance."""
def setUp(self):
"""Set up called before each test."""
self.WEBOTS_HOME = os.environ['WEBOTS_HOME']
def _runClangFormat(self, f):
"""Run clang format on 'f' file."""
return subprocess.check_output(['clang-format', '-style=file', f])
def test_clang_format_is_correctly_installed(self):
"""Test ClangFormat is correctly installed."""
self.assertTrue(
find_executable('clang-format') is not None,
msg='ClangFormat is not installed on this computer.'
)
clangFormatConfigFile = self.WEBOTS_HOME + os.sep + '.clang-format'
self.assertTrue(
os.path.exists(clangFormatConfigFile),
msg=clangFormatConfigFile + ' not found.'
)
def test_sources_are_clang_format_compliant(self):
"""Test that sources are ClangFormat compliant."""
directories = [
'include/controller',
'projects',
'resources/projects',
'resources/wren/shaders',
'tests',
'include/wren',
'src/controller/c',
'src/controller/cpp',
'src/license/sign',
'src/webots',
'src/wren'
]
skippedPaths = [
'projects/default/controllers/ros/include',
'projects/robots/gctronic/e-puck/transfer',
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba',
'projects/robots/mobsya/thymio/libraries/dashel',
'projects/robots/mobsya/thymio/libraries/dashel-src',
'projects/robots/robotis/darwin-op/libraries/libssh',
'projects/robots/robotis/darwin-op/libraries/libzip',
'projects/robots/robotis/darwin-op/libraries/robotis-op2/robotis',
'projects/robots/robotis/darwin-op/remote_control/libjpeg-turbo',
'projects/vehicles/controllers/ros_automobile/include',
'src/webots/external'
]
skippedFiles = [
'projects/robots/robotis/darwin-op/plugins/remote_controls/robotis-op2_tcpip/stb_image.h'
]
skippedDirectories = [
'build',
'python',
'java'
]
extensions = ['c', 'h', 'cpp', 'hpp', 'cc', 'hh', 'c++', 'h++', 'vert', 'frag']
modified_files = os.path.join(self.WEBOTS_HOME, 'tests', 'sources', 'modified_files.txt')
sources = []
if os.path.isfile(modified_files):
with open(modified_files, 'r') as file:
for line in file:
line = line.strip()
extension = os.path.splitext(line)[1][1:].lower()
if extension not in extensions:
continue
found = False
for directory in directories:
if line.startswith(directory):
found = True
break
if not found:
continue
found = False
for directory in skippedPaths + skippedFiles:
if line.startswith(directory):
found = True
break
if found:
continue
for directory in skippedDirectories:
currentDirectories = line.split(os.sep)
if directory in currentDirectories:
found = True
if found:
continue
sources.append(line.replace('/', os.sep))
else:
for directory in directories:
path = self.WEBOTS_HOME + os.sep + directory.replace('/', os.sep)
for rootPath, dirNames, fileNames in os.walk(path):
shouldContinue = False
for path in skippedPaths:
if rootPath.startswith(self.WEBOTS_HOME + os.sep + path.replace('/', os.sep)):
shouldContinue = True
break
for directory in skippedDirectories:
currentDirectories = rootPath.replace(self.WEBOTS_HOME, '').split(os.sep)
if directory in currentDirectories:
shouldContinue = True
break
if shouldContinue:
continue
for fileName in fileNames:
extension = os.path.splitext(fileName)[1][1:].lower()
if extension not in extensions:
continue
path = os.path.normpath(os.path.join(rootPath, fileName))
skipFile = False
for file in skippedFiles:
if os.path.normpath((self.WEBOTS_HOME + os.sep + file.replace('/', os.sep))) == path:
skipFile = True
break
if not skipFile:
sources.append(path)
curdir = os.getcwd()
os.chdir(self.WEBOTS_HOME)
for source in sources:
diff = ''
with open(source, encoding='utf8') as file:
try:
for line in difflib.context_diff(self._runClangFormat(source).decode('utf-8').splitlines(),
file.read().splitlines()):
diff += line + '\n'
except UnicodeDecodeError:
self.assertTrue(False, msg='utf-8 decode problem in %s' % source)
self.assertTrue(
len(diff) == 0,
msg='Source file "%s" is not compliant with ClangFormat:\n\nDIFF:%s' % (source, diff)
)
os.chdir(curdir)
if __name__ == '__main__':
unittest.main()
|
src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/__init__.py | Mannan2812/azure-cli-extensions | 207 | 1705 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .available_operation_display import AvailableOperationDisplay
from .error_details_model import ErrorDetailsModel
from .error_error_model import ErrorErrorModel
from .error_model import ErrorModel, ErrorModelException
from .operation_result import OperationResult
from .provisioned_resource_properties import ProvisionedResourceProperties
from .proxy_resource import ProxyResource
from .managed_proxy_resource import ManagedProxyResource
from .resource import Resource
from .tracked_resource import TrackedResource
from .secret_resource_properties import SecretResourceProperties
from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties
from .secret_resource_properties_base import SecretResourcePropertiesBase
from .secret_resource_description import SecretResourceDescription
from .secret_value import SecretValue
from .secret_value_properties import SecretValueProperties
from .secret_value_resource_description import SecretValueResourceDescription
from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile
from .volume_properties import VolumeProperties
from .volume_reference import VolumeReference
from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters
from .application_scoped_volume import ApplicationScopedVolume
from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk
from .volume_resource_description import VolumeResourceDescription
from .network_resource_properties import NetworkResourceProperties
from .local_network_resource_properties import LocalNetworkResourceProperties
from .endpoint_ref import EndpointRef
from .network_ref import NetworkRef
from .network_resource_properties_base import NetworkResourcePropertiesBase
from .network_resource_description import NetworkResourceDescription
from .gateway_destination import GatewayDestination
from .tcp_config import TcpConfig
from .http_route_match_path import HttpRouteMatchPath
from .http_route_match_header import HttpRouteMatchHeader
from .http_route_match_rule import HttpRouteMatchRule
from .http_route_config import HttpRouteConfig
from .http_host_config import HttpHostConfig
from .http_config import HttpConfig
from .gateway_properties import GatewayProperties
from .gateway_resource_description import GatewayResourceDescription
from .image_registry_credential import ImageRegistryCredential
from .environment_variable import EnvironmentVariable
from .setting import Setting
from .container_label import ContainerLabel
from .endpoint_properties import EndpointProperties
from .resource_requests import ResourceRequests
from .resource_limits import ResourceLimits
from .resource_requirements import ResourceRequirements
from .diagnostics_ref import DiagnosticsRef
from .reliable_collections_ref import ReliableCollectionsRef
from .container_state import ContainerState
from .container_event import ContainerEvent
from .container_instance_view import ContainerInstanceView
from .container_code_package_properties import ContainerCodePackageProperties
from .auto_scaling_trigger import AutoScalingTrigger
from .auto_scaling_mechanism import AutoScalingMechanism
from .auto_scaling_policy import AutoScalingPolicy
from .service_resource_description import ServiceResourceDescription
from .diagnostics_sink_properties import DiagnosticsSinkProperties
from .diagnostics_description import DiagnosticsDescription
from .application_properties import ApplicationProperties
from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription
from .application_resource_description import ApplicationResourceDescription
from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism
from .auto_scaling_metric import AutoScalingMetric
from .auto_scaling_resource_metric import AutoScalingResourceMetric
from .service_properties import ServiceProperties
from .service_replica_properties import ServiceReplicaProperties
from .service_replica_description import ServiceReplicaDescription
from .average_load_scaling_trigger import AverageLoadScalingTrigger
from .container_logs import ContainerLogs
from .operation_result_paged import OperationResultPaged
from .secret_resource_description_paged import SecretResourceDescriptionPaged
from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged
from .volume_resource_description_paged import VolumeResourceDescriptionPaged
from .network_resource_description_paged import NetworkResourceDescriptionPaged
from .gateway_resource_description_paged import GatewayResourceDescriptionPaged
from .application_resource_description_paged import ApplicationResourceDescriptionPaged
from .service_resource_description_paged import ServiceResourceDescriptionPaged
from .service_replica_description_paged import ServiceReplicaDescriptionPaged
from .service_fabric_mesh_management_client_enums import (
ResourceStatus,
HealthState,
SecretKind,
VolumeProvider,
SizeTypes,
ApplicationScopedVolumeKind,
NetworkKind,
HeaderMatchType,
OperatingSystemType,
DiagnosticsSinkKind,
AutoScalingMechanismKind,
AutoScalingMetricKind,
AutoScalingResourceMetricName,
AutoScalingTriggerKind,
)
__all__ = [
'AvailableOperationDisplay',
'ErrorDetailsModel',
'ErrorErrorModel',
'ErrorModel', 'ErrorModelException',
'OperationResult',
'ProvisionedResourceProperties',
'ProxyResource',
'ManagedProxyResource',
'Resource',
'TrackedResource',
'SecretResourceProperties',
'InlinedValueSecretResourceProperties',
'SecretResourcePropertiesBase',
'SecretResourceDescription',
'SecretValue',
'SecretValueProperties',
'SecretValueResourceDescription',
'VolumeProviderParametersAzureFile',
'VolumeProperties',
'VolumeReference',
'ApplicationScopedVolumeCreationParameters',
'ApplicationScopedVolume',
'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk',
'VolumeResourceDescription',
'NetworkResourceProperties',
'LocalNetworkResourceProperties',
'EndpointRef',
'NetworkRef',
'NetworkResourcePropertiesBase',
'NetworkResourceDescription',
'GatewayDestination',
'TcpConfig',
'HttpRouteMatchPath',
'HttpRouteMatchHeader',
'HttpRouteMatchRule',
'HttpRouteConfig',
'HttpHostConfig',
'HttpConfig',
'GatewayProperties',
'GatewayResourceDescription',
'ImageRegistryCredential',
'EnvironmentVariable',
'Setting',
'ContainerLabel',
'EndpointProperties',
'ResourceRequests',
'ResourceLimits',
'ResourceRequirements',
'DiagnosticsRef',
'ReliableCollectionsRef',
'ContainerState',
'ContainerEvent',
'ContainerInstanceView',
'ContainerCodePackageProperties',
'AutoScalingTrigger',
'AutoScalingMechanism',
'AutoScalingPolicy',
'ServiceResourceDescription',
'DiagnosticsSinkProperties',
'DiagnosticsDescription',
'ApplicationProperties',
'AzureInternalMonitoringPipelineSinkDescription',
'ApplicationResourceDescription',
'AddRemoveReplicaScalingMechanism',
'AutoScalingMetric',
'AutoScalingResourceMetric',
'ServiceProperties',
'ServiceReplicaProperties',
'ServiceReplicaDescription',
'AverageLoadScalingTrigger',
'ContainerLogs',
'OperationResultPaged',
'SecretResourceDescriptionPaged',
'SecretValueResourceDescriptionPaged',
'VolumeResourceDescriptionPaged',
'NetworkResourceDescriptionPaged',
'GatewayResourceDescriptionPaged',
'ApplicationResourceDescriptionPaged',
'ServiceResourceDescriptionPaged',
'ServiceReplicaDescriptionPaged',
'ResourceStatus',
'HealthState',
'SecretKind',
'VolumeProvider',
'SizeTypes',
'ApplicationScopedVolumeKind',
'NetworkKind',
'HeaderMatchType',
'OperatingSystemType',
'DiagnosticsSinkKind',
'AutoScalingMechanismKind',
'AutoScalingMetricKind',
'AutoScalingResourceMetricName',
'AutoScalingTriggerKind',
]
|
sdk/python/pulumi_azure/desktopvirtualization/workspace.py | henriktao/pulumi-azure | 109 | 1708 | <filename>sdk/python/pulumi_azure/desktopvirtualization/workspace.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['WorkspaceArgs', 'Workspace']
@pulumi.input_type
class WorkspaceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Workspace resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
A friendly name for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _WorkspaceState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Workspace resources.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
A friendly name for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Workspace(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Virtual Desktop Workspace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example", location="West Europe")
workspace = azure.desktopvirtualization.Workspace("workspace",
location=example.location,
resource_group_name=example.name,
friendly_name="FriendlyName",
description="A description of my workspace")
```
## Import
Virtual Desktop Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkspaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Virtual Desktop Workspace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example", location="West Europe")
workspace = azure.desktopvirtualization.Workspace("workspace",
location=example.location,
resource_group_name=example.name,
friendly_name="FriendlyName",
description="A description of my workspace")
```
## Import
Virtual Desktop Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace
```
:param str resource_name: The name of the resource.
:param WorkspaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
super(Workspace, __self__).__init__(
'azure:desktopvirtualization/workspace:Workspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace':
"""
Get an existing Workspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.
:param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.
:param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WorkspaceState.__new__(_WorkspaceState)
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
return Workspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
A friendly name for the Virtual Desktop Workspace.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Virtual Desktop Workspace. Changing the name
forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to
create the Virtual Desktop Workspace. Changing the resource group name forces
a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
|
tests/pytorch_pfn_extras_tests/onnx/test_load_model.py | kmaehashi/pytorch-pfn-extras | 243 | 1716 | <reponame>kmaehashi/pytorch-pfn-extras<gh_stars>100-1000
import os
import pytest
import torch
import pytorch_pfn_extras.onnx as tou
from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net
@pytest.mark.filterwarnings("ignore:Named tensors .* experimental:UserWarning")
def test_onnx_load_model():
model = Net()
outdir = "out/load_model_test"
tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,
training=True, do_constant_folding=False)
tou.load_model(os.path.join(outdir, "model.onnx"))
@pytest.mark.filterwarnings("ignore:.*ONNX contains stripped .*:UserWarning")
def test_stripped_onnx_load_model():
model = Net()
outdir = "out/stripped_load_model_test"
tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,
strip_large_tensor_data=True, training=True,
do_constant_folding=False)
tou.load_model(os.path.join(outdir, "model.onnx"))
|
language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py | Xtuden-com/language | 1,199 | 1727 | <filename>language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py<gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sentencize the raw wikitext103."""
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("wiki103_raw", None,
"Path to raw wikitext103 train corpus.")
flags.DEFINE_string("output_path", None,
"Path to output the processed dataset.")
FLAGS = flags.FLAGS
def main(_):
with open(FLAGS.wiki103_raw, "r") as f:
data = f.read().strip().split("\n")
data = [x.split(" . ") for x in data if x.strip() and x.strip()[0] != "="]
sentences = []
for para in data:
for sent in para:
sentences.append(sent + ".")
data = "\n".join(sentences)
data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
data = data.replace(" ;", ";")
data = "\n".join([x for x in data.split("\n") if len(x.split()) > 3])
logging.info("length = %d", len(data.split("\n")))
with open(FLAGS.output_path, "w") as f:
f.write(data)
if __name__ == "__main__":
app.run(main)
|
torchvision/datasets/samplers/__init__.py | yoshitomo-matsubara/vision | 12,063 | 1754 | <gh_stars>1000+
from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler
__all__ = ("DistributedSampler", "UniformClipSampler", "RandomClipSampler")
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py | ishtjot/susereumutep | 14,668 | 1775 | # -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:<EMAIL>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Utilities for creating VCG and Dot diagrams"""
from logilab.common.vcgutils import VCGPrinter
from logilab.common.graph import DotBackend
from pylint.pyreverse.utils import is_exception
class DiagramWriter(object):
"""base class for writing project diagrams
"""
def __init__(self, config, styles):
self.config = config
self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles
self.printer = None # defined in set_printer
def write(self, diadefs):
"""write files for <project> according to <diadefs>
"""
for diagram in diadefs:
basename = diagram.title.strip().replace(' ', '_')
file_name = '%s.%s' % (basename, self.config.output_format)
self.set_printer(file_name, basename)
if diagram.TYPE == 'class':
self.write_classes(diagram)
else:
self.write_packages(diagram)
self.close_graph()
def write_packages(self, diagram):
"""write a package diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)):
self.printer.emit_node(i, label=self.get_title(obj), shape='box')
obj.fig_id = i
# package dependencies
for rel in diagram.get_relationships('depends'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.pkg_edges)
def write_classes(self, diagram):
"""write a class diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)):
self.printer.emit_node(i, **self.get_values(obj))
obj.fig_id = i
# inheritance links
for rel in diagram.get_relationships('specialization'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.inh_edges)
# implementation links
for rel in diagram.get_relationships('implements'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.imp_edges)
# generate associations
for rel in diagram.get_relationships('association'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
label=rel.name, **self.ass_edges)
def set_printer(self, file_name, basename):
"""set printer"""
raise NotImplementedError
def get_title(self, obj):
"""get project title"""
raise NotImplementedError
def get_values(self, obj):
"""get label and shape for classes."""
raise NotImplementedError
def close_graph(self):
"""finalize the graph"""
raise NotImplementedError
class DotWriter(DiagramWriter):
"""write dot graphs from a diagram definition and a project
"""
def __init__(self, config):
styles = [dict(arrowtail='none', arrowhead="open"),
dict(arrowtail='none', arrowhead='empty'),
dict(arrowtail='node', arrowhead='empty', style='dashed'),
dict(fontcolor='green', arrowtail='none',
arrowhead='diamond', style='solid'),
]
DiagramWriter.__init__(self, config, styles)
def set_printer(self, file_name, basename):
"""initialize DotWriter and add options for layout.
"""
layout = dict(rankdir="BT")
self.printer = DotBackend(basename, additionnal_param=layout)
self.file_name = file_name
def get_title(self, obj):
"""get project title"""
return obj.title
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
label = obj.title
if obj.shape == 'interface':
label = u'«interface»\\n%s' % label
if not self.config.only_classnames:
label = r'%s|%s\l|' % (label, r'\l'.join(obj.attrs))
for func in obj.methods:
label = r'%s%s()\l' % (label, func.name)
label = '{%s}' % label
if is_exception(obj.node):
return dict(fontcolor='red', label=label, shape='record')
return dict(label=label, shape='record')
def close_graph(self):
"""print the dot graph into <file_name>"""
self.printer.generate(self.file_name)
class VCGWriter(DiagramWriter):
"""write vcg graphs from a diagram definition and a project
"""
def __init__(self, config):
styles = [dict(arrowstyle='solid', backarrowstyle='none',
backarrowsize=0),
dict(arrowstyle='solid', backarrowstyle='none',
backarrowsize=10),
dict(arrowstyle='solid', backarrowstyle='none',
linestyle='dotted', backarrowsize=10),
dict(arrowstyle='solid', backarrowstyle='none',
textcolor='green'),
]
DiagramWriter.__init__(self, config, styles)
def set_printer(self, file_name, basename):
"""initialize VCGWriter for a UML graph"""
self.graph_file = open(file_name, 'w+')
self.printer = VCGPrinter(self.graph_file)
self.printer.open_graph(title=basename, layoutalgorithm='dfs',
late_edge_labels='yes', port_sharing='no',
manhattan_edges='yes')
self.printer.emit_node = self.printer.node
self.printer.emit_edge = self.printer.edge
def get_title(self, obj):
"""get project title in vcg format"""
return r'\fb%s\fn' % obj.title
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
if is_exception(obj.node):
label = r'\fb\f09%s\fn' % obj.title
else:
label = r'\fb%s\fn' % obj.title
if obj.shape == 'interface':
shape = 'ellipse'
else:
shape = 'box'
if not self.config.only_classnames:
attrs = obj.attrs
methods = [func.name for func in obj.methods]
# box width for UML like diagram
maxlen = max(len(name) for name in [obj.title] + methods + attrs)
line = '_' * (maxlen + 2)
label = r'%s\n\f%s' % (label, line)
for attr in attrs:
label = r'%s\n\f08%s' % (label, attr)
if attrs:
label = r'%s\n\f%s' % (label, line)
for func in methods:
label = r'%s\n\f10%s()' % (label, func)
return dict(label=label, shape=shape)
def close_graph(self):
"""close graph and file"""
self.printer.close_graph()
self.graph_file.close()
|
dffml/operation/mapping.py | SGeetansh/dffml | 171 | 1779 | from typing import Dict, List, Any
from ..df.types import Definition
from ..df.base import op
from ..util.data import traverse_get
MAPPING = Definition(name="mapping", primitive="map")
MAPPING_TRAVERSE = Definition(name="mapping_traverse", primitive="List[str]")
MAPPING_KEY = Definition(name="key", primitive="str")
MAPPING_VALUE = Definition(name="value", primitive="generic")
@op(
name="dffml.mapping.extract",
inputs={"mapping": MAPPING, "traverse": MAPPING_TRAVERSE},
outputs={"value": MAPPING_VALUE},
)
def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):
"""
Extracts value from a given mapping.
Parameters
----------
mapping : dict
The mapping to extract the value from.
traverse : list[str]
A list of keys to traverse through the mapping dictionary and extract the values.
Returns
-------
dict
A dictionary containing the value of the keys.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle)
>>>
>>> dataflow.seed.append(
... Input(
... value=[mapping_extract_value.op.outputs["value"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value={"key1": {"key2": 42}},
... definition=mapping_extract_value.op.inputs["mapping"],
... ),
... Input(
... value=["key1", "key2"],
... definition=mapping_extract_value.op.inputs["traverse"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'value': 42}
"""
return {"value": traverse_get(mapping, *traverse)}
@op(
name="dffml.mapping.create",
inputs={"key": MAPPING_KEY, "value": MAPPING_VALUE},
outputs={"mapping": MAPPING},
)
def create_mapping(key: str, value: Any):
"""
Creates a mapping of a given key and value.
Parameters
----------
key : str
The key for the mapping.
value : Any
The value for the mapping.
Returns
-------
dict
A dictionary containing the mapping created.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(create_mapping, GetSingle)
>>> dataflow.seed.append(
... Input(
... value=[create_mapping.op.outputs["mapping"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value="key1", definition=create_mapping.op.inputs["key"],
... ),
... Input(
... value=42, definition=create_mapping.op.inputs["value"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'mapping': {'key1': 42}}
"""
return {"mapping": {key: value}}
|
juriscraper/oral_args/united_states/federal_appellate/scotus.py | EvandoBlanco/juriscraper | 228 | 1781 | <reponame>EvandoBlanco/juriscraper<filename>juriscraper/oral_args/united_states/federal_appellate/scotus.py<gh_stars>100-1000
"""Scraper for Supreme Court of U.S.
CourtID: scotus
Court Short Name: scotus
History:
- 2014-07-20 - Created by <NAME>, reviewed by MLR
- 2017-10-09 - Updated by MLR.
"""
from datetime import datetime
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = (
"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx"
)
self.back_scrape_iterable = list(range(2010, 2015))
def _get_download_urls(self):
path = "id('list')//tr//a/text()"
return list(map(self._return_download_url, self.html.xpath(path)))
@staticmethod
def _return_download_url(d):
file_type = "mp3" # or 'wma' is also available for any case.
download_url = "http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}".format(
type=file_type, docket_number=d
)
return download_url
def _get_case_names(self):
path = "id('list')//tr/td/span/text()"
return [s.lstrip(". ") for s in self.html.xpath(path)]
def _get_case_dates(self):
path = "id('list')//tr/td[2]//text()"
return [
datetime.strptime(s, "%m/%d/%y").date()
for s in self.html.xpath(path)
if not "Date" in s
]
def _get_docket_numbers(self):
path = "id('list')//tr//a/text()"
return list(self.html.xpath(path))
def _download_backwards(self, year):
self.url = (
"http://www.supremecourt.gov/oral_arguments/argument_audio/%s"
% year
)
self.html = self._download()
|
script_tests/maf_extract_ranges_indexed_tests.py | lldelisle/bx-python | 122 | 1789 | import unittest
import base
class Test(base.BaseScriptTest, unittest.TestCase):
command_line = "./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8."
input_stdin = base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.bed")
output_stdout = base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.maf")
|
qstklearn/1knn.py | elxavicio/QSTK | 339 | 1790 | <filename>qstklearn/1knn.py<gh_stars>100-1000
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Feb 20, 2011
@author: <NAME>
@organization: Georgia Institute of Technology
@contact: <EMAIL>
@summary: This is an implementation of the 1-KNN algorithm for ranking features quickly.
It uses the knn implementation.
@status: oneKNN functions correctly, optimized to use n^2/2 algorithm.
'''
import matplotlib.pyplot as plt
from pylab import gca
import itertools
import string
import numpy as np
import math
import knn
from time import clock
'''
@summary: Query function for 1KNN, return value is a double between 0 and 1.
@param naData: A 2D numpy array. Each row is a data point with the final column containing the classification.
'''
def oneKnn( naData ):
if naData.ndim != 2:
raise Exception( "Data should have two dimensions" )
lLen = naData.shape[0]
''' # of dimensions, subtract one for classification '''
lDim = naData.shape[1] - 1
''' Start best distances as very large '''
ldDistances = [1E300] * lLen
llIndexes = [-1] * lLen
dDistance = 0.0;
''' Loop through finding closest neighbors '''
for i in range( lLen ):
for j in range( i+1, lLen ):
dDistance = 0.0
for k in range( 0, lDim ):
dDistance += (naData[i][k] - naData[j][k])**2
dDistance = math.sqrt( dDistance )
''' Two distances to check, for i's best, and j's best '''
if dDistance < ldDistances[i]:
ldDistances[i] = dDistance
llIndexes[i] = j
if dDistance < ldDistances[j]:
ldDistances[j] = dDistance
llIndexes[j] = i
lCount = 0
''' Now count # of matching pairs '''
for i in range( lLen ):
if naData[i][-1] == naData[ llIndexes[i] ][-1]:
lCount = lCount + 1
return float(lCount) / lLen
''' Test function to plot results '''
def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ):
plt.clf()
plt.subplot(311)
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
#plt.ylabel( 'Feature 2' )
#plt.xlabel( 'Feature 1' )
#gca().annotate( '', xy=( .8, 0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) )
gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) )
plt.title( 'Data Distribution' )
plt.subplot(312)
plt.plot( range( len(lfOneKnn) ), lfOneKnn )
plt.ylabel( '1-KNN Value' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '1-KNN Performance' )
plt.subplot(313)
plt.plot( range( len(lf5Knn) ), lf5Knn )
plt.ylabel( '% Correct Classification' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '5-KNN Performance' )
plt.subplots_adjust()
plt.show()
''' Function to plot 2 distributions '''
def _plotDist( naDist1, naDist2, i ):
plt.clf()
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
plt.ylabel( 'Feature 2' )
plt.xlabel( 'Feature 1' )
plt.title( 'Iteration ' + str(i) )
plt.show()
''' Function to test KNN performance '''
def _knnResult( naData ):
''' Split up data into training/testing '''
lSplit = naData.shape[0] * .7
naTrain = naData[:lSplit, :]
naTest = naData[lSplit:, :]
knn.addEvidence( naTrain.astype(float), 1 );
''' Query with last column omitted and 5 nearest neighbors '''
naResults = knn.query( naTest[:,:-1], 5, 'mode')
''' Count returns which are correct '''
lCount = 0
for i, dVal in enumerate(naResults):
if dVal == naTest[i,-1]:
lCount = lCount + 1
dResult = float(lCount) / naResults.size
return dResult
''' Tests performance of 1-KNN '''
def _test1():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
for i in range(3):
''' Select one of three distributions '''
if i == 0:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
elif i == 1:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
else:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] )
naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) )
naOrig = np.vstack( (naTest1, naTest2) )
naBoth = np.vstack( (naTest1, naTest2) )
''' Keep track of runtimes '''
t = clock()
cOneRuntime = t-t;
cKnnRuntime = t-t;
lfResults = []
lfKnnResults = []
for i in range( 15 ):
#_plotDist( naTest1, naBoth[100:,:], i )
t = clock()
lfResults.append( oneKnn( naBoth ) )
cOneRuntime = cOneRuntime + (clock() - t)
t = clock()
lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) )
cKnnRuntime = cKnnRuntime + (clock() - t)
naBoth[500:,0] = naBoth[500:,0] - .1
print 'Runtime OneKnn:', cOneRuntime
print 'Runtime 5-KNN:', cKnnRuntime
_plotResults( naTest1, naTest2, lfResults, lfKnnResults )
''' Tests performance of 1-KNN '''
def _test2():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
np.random.seed( 12345 )
''' Create 5 distributions for each of the 5 attributes '''
dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
lDists = [ dist1, dist2, dist3, dist4, dist5 ]
''' All features used except for distribution 4 '''
distY = np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5 )
distY = distY.reshape( -1, 1 )
for i, fVal in enumerate( distY ):
if fVal >= 0:
distY[i] = 1
else:
distY[i] = 0
for i in range( 1, 6 ):
lsNames = []
lf1Vals = []
lfVals = []
for perm in itertools.combinations( '12345', i ):
''' set test distribution to first element '''
naTest = lDists[ int(perm[0]) - 1 ]
sPerm = perm[0]
''' stack other distributions on '''
for j in range( 1, len(perm) ):
sPerm = sPerm + str(perm[j])
naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) )
''' finally stack y values '''
naTest = np.hstack( (naTest, distY) )
lf1Vals.append( oneKnn( naTest ) )
lfVals.append( _knnResult( np.random.permutation(naTest) ) )
lsNames.append( sPerm )
''' Plot results '''
plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' )
plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' )
plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') )
plt.ylabel('1-KNN Value/KNN Classification')
plt.xlabel('Feature Set')
plt.title('Combinations of ' + str(i) + ' Features')
plt.ylim( (0,1) )
if len(lf1Vals) < 2:
plt.xlim( (-1,1) )
gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 )
gca().xaxis.set_ticklabels( lsNames )
plt.show()
if __name__ == '__main__':
_test1()
#_test2()
|
classification/model/build_gen.py | LittleWat/MCD_DA | 464 | 1793 | import svhn2mnist
import usps
import syn2gtrsb
import syndig2svhn
def Generator(source, target, pixelda=False):
if source == 'usps' or target == 'usps':
return usps.Feature()
elif source == 'svhn':
return svhn2mnist.Feature()
elif source == 'synth':
return syn2gtrsb.Feature()
def Classifier(source, target):
if source == 'usps' or target == 'usps':
return usps.Predictor()
if source == 'svhn':
return svhn2mnist.Predictor()
if source == 'synth':
return syn2gtrsb.Predictor()
|
samples/modules/tensorflow/magic_wand/train/data_split_person.py | lviala-zaack/zephyr | 6,224 | 1799 | # Lint as: python3
# coding=utf-8
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split data into train, validation and test dataset according to person.
That is, use some people's data as train, some other people's data as
validation, and the rest ones' data as test. These data would be saved
separately under "/person_split".
It will generate new files with the following structure:
├──person_split
│ ├── test
│ ├── train
│ └──valid
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from data_split import read_data
from data_split import write_data
def person_split(whole_data, train_names, valid_names, test_names):
"""Split data by person."""
random.seed(30)
random.shuffle(whole_data)
train_data = []
valid_data = []
test_data = []
for idx, data in enumerate(whole_data): # pylint: disable=unused-variable
if data["name"] in train_names:
train_data.append(data)
elif data["name"] in valid_names:
valid_data.append(data)
elif data["name"] in test_names:
test_data.append(data)
print("train_length:" + str(len(train_data)))
print("valid_length:" + str(len(valid_data)))
print("test_length:" + str(len(test_data)))
return train_data, valid_data, test_data
if __name__ == "__main__":
data = read_data("./data/complete_data")
train_names = [
"hyw", "shiyun", "tangsy", "dengyl", "jiangyh", "xunkai", "negative3",
"negative4", "negative5", "negative6"
]
valid_names = ["lsj", "pengxl", "negative2", "negative7"]
test_names = ["liucx", "zhangxy", "negative1", "negative8"]
train_data, valid_data, test_data = person_split(data, train_names,
valid_names, test_names)
if not os.path.exists("./person_split"):
os.makedirs("./person_split")
write_data(train_data, "./person_split/train")
write_data(valid_data, "./person_split/valid")
write_data(test_data, "./person_split/test")
|
tests/k8s_handler.py | josebalius/go-spacemesh | 586 | 1800 | from datetime import datetime
from kubernetes import client
from kubernetes.client.rest import ApiException
import os
import time
import yaml
from tests import config as conf
import tests.utils as ut
def remove_clusterrole_binding(shipper_name, crb_name):
# remove clusterrolebind
k8s_client = client.RbacAuthorizationV1Api()
try:
k8s_client.delete_cluster_role_binding(crb_name)
print(f"\nsuccessfully deleted: {crb_name}")
except Exception as e:
print(f"\n{shipper_name} cluster role binding deletion has failed, please manually delete {crb_name}:")
print(f"kubectl delete clusterrolebinding {crb_name}")
def filebeat_teardown(namespace):
# remove clusterrolebind
# TODO: find a solution for sharing the name both here and in the kube object
crb_name = f"filebeat-cluster-role-binding-{namespace}"
remove_clusterrole_binding("filebeat", crb_name)
def fluent_bit_teardown(namespace):
# remove clusterrolebind
# TODO: find a solution for sharing the name both here and in the kube object
crb_name = f"fluent-bit-clusterrole-binding-{namespace}"
remove_clusterrole_binding("fluent-bit", crb_name)
def add_elastic_cluster(namespace):
print("\nDeploying ElasticSearch\n")
add_deployment_dir(namespace, conf.ELASTIC_CONF_DIR)
def add_filebeat_cluster(namespace):
print("\nDeploying FileBeat\n")
add_deployment_dir(namespace, conf.FILEBEAT_CONF_DIR)
def add_fluent_bit_cluster(namespace):
print("\nDeploying Fluent-bit\n")
add_deployment_dir(namespace, conf.FLUENT_BIT_CONF_DIR)
def add_kibana_cluster(namespace):
print("\nDeploying Kibana\n")
add_deployment_dir(namespace, conf.KIBANA_CONF_DIR)
def add_logstash_cluster(namespace):
print("\nDeploying LogStash\n")
add_deployment_dir(namespace, conf.LOGSTASH_CONF_DIR)
def add_deployment_dir(namespace, dir_path, delete=False):
with open(os.path.join(dir_path, 'dep_order.txt')) as f:
dep_order = f.readline()
dep_lst = [x.strip() for x in dep_order.split(',')]
print(dep_lst)
phrases_to_replace = ["(?<!_)NAMESPACE", "REP_ES_USER", "REP_ES_PASS"]
values_for_replacement = [namespace, conf.ES_USER_LOCAL, conf.ES_PASS_LOCAL]
for filename in dep_lst:
# replace all phrases with the actual values if exists
modified_file_path, is_change = ut.duplicate_file_and_replace_phrases(
dir_path, filename, f"{namespace}_{filename}", phrases_to_replace, values_for_replacement
)
print(f"applying file: {filename}")
with open(modified_file_path) as f:
dep = yaml.safe_load(f)
if modified_file_path != os.path.join(dir_path, filename) and is_change:
# remove modified file
ut.delete_file(modified_file_path)
name = dep["metadata"]["name"]
if dep['kind'] == 'StatefulSet':
k8s_client = client.AppsV1Api()
if not delete:
k8s_client.create_namespaced_stateful_set(body=dep, namespace=namespace)
else:
k8s_client.delete_namespaced_stateful_set(name=name, namespace=namespace)
elif dep['kind'] == 'DaemonSet':
k8s_client = client.AppsV1Api()
k8s_client.create_namespaced_daemon_set(body=dep, namespace=namespace)
elif dep['kind'] == 'Deployment':
k8s_client = client.AppsV1Api()
k8s_client.create_namespaced_deployment(body=dep, namespace=namespace)
elif dep['kind'] == 'Service':
try:
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_service(body=dep, namespace=namespace)
except ApiException as e:
if e.status == 409:
print(f"Service exists: {dep['metadata']['name']}")
continue
raise e
elif dep['kind'] == 'PodDisruptionBudget':
k8s_client = client.PolicyV1beta1Api()
k8s_client.create_namespaced_pod_disruption_budget(body=dep, namespace=namespace)
elif dep["kind"] == 'Role':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.create_namespaced_role(body=dep, namespace=namespace)
elif dep["kind"] == 'ClusterRole':
try:
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.create_cluster_role(body=dep)
except ApiException as e:
if e.status == 409:
print(f"cluster role already exists")
continue
raise e
elif dep["kind"] == 'RoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
dep["subjects"][0]["namespace"] = namespace
k8s_client.create_namespaced_role_binding(body=dep, namespace=namespace)
elif dep["kind"] == 'ClusterRoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
try:
k8s_client.create_cluster_role_binding(body=dep)
except ApiException as e:
if e.status == 409:
print(f"cluster role binding already exists")
continue
raise e
elif dep["kind"] == 'ConfigMap':
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_config_map(body=dep, namespace=namespace)
elif dep["kind"] == 'ServiceAccount':
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_service_account(body=dep, namespace=namespace)
print("\nDone\n")
def remove_deployment_dir(namespace, dir_path):
with open(os.path.join(dir_path, 'dep_order.txt')) as f:
dep_order = f.readline()
dep_lst = [x.strip() for x in dep_order.split(',')]
print(dep_lst)
for filename in dep_lst:
print(f"deleting {filename}")
with open(os.path.join(dir_path, filename)) as f:
dep = yaml.safe_load(f)
name = dep["metadata"]["name"]
if dep['kind'] == 'StatefulSet':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_stateful_set(name=name, namespace=namespace)
elif dep['kind'] == 'DaemonSet':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_daemon_set(name=name, namespace=namespace)
elif dep['kind'] == 'Deployment':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_deployment(name=name, namespace=namespace)
elif dep['kind'] == 'Service':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_service(name=name, namespace=namespace, grace_period_seconds=0)
delete_func = k8s_client.delete_namespaced_service
list_func = k8s_client.list_namespaced_service
wait_for_namespaced_deletion(name, namespace, delete_func, list_func)
elif dep['kind'] == 'PodDisruptionBudget':
k8s_client = client.PolicyV1beta1Api()
k8s_client.delete_namespaced_pod_disruption_budget(name=name, namespace=namespace)
elif dep["kind"] == 'Role':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_namespaced_role(name=name, namespace=namespace)
elif dep["kind"] == 'RoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_namespaced_role_binding(name=name, namespace=namespace)
elif dep["kind"] == 'ClusterRoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_cluster_role_binding(name=name)
elif dep["kind"] == 'ConfigMap':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_config_map(name=name, namespace=namespace)
elif dep["kind"] == 'ServiceAccount':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_service_account(name=name, namespace=namespace)
print("\nDone\n")
def wait_for_namespaced_deletion(name, namespace, deletion_func, list_func, timeout=15):
deleted = False
orig_timeout = timeout
while not deleted:
# find by name and delete requested item
for item in list_func(namespace).items:
if item.metadata.name == name:
if timeout < 0:
raise TimeoutError(f"{orig_timeout} was not enough for deleting item:\n{item}\n")
deletion_func(name=name, namespace=namespace)
print(f"service {name} was not deleted, retrying")
time.sleep(1)
timeout -= 1
# validate item was deleted
for item in list_func(namespace).items:
deleted = True
if item.metadata.name == name:
deleted = False
return deleted
def wait_for_daemonset_to_be_ready(name, namespace, timeout=None):
wait_for_to_be_ready("daemonset", name, namespace, timeout=timeout)
def resolve_read_status_func(obj_name):
if obj_name == "daemonset":
return client.AppsV1Api().read_namespaced_daemon_set_status
else:
raise ValueError(f"resolve_read_status_func: {obj_name} is not a valid value")
def wait_for_to_be_ready(obj_name, name, namespace, timeout=None):
start = datetime.now()
while True:
read_func = resolve_read_status_func(obj_name)
resp = read_func(name=name, namespace=namespace)
total_sleep_time = (datetime.now()-start).total_seconds()
number_ready = resp.status.number_ready
updated_number_scheduled = resp.status.updated_number_scheduled
if number_ready and updated_number_scheduled and number_ready == updated_number_scheduled:
print("Total time waiting for {3} {0} [size: {1}]: {2} sec".format(name, number_ready, total_sleep_time,
obj_name))
break
print("{0}/{1} pods ready {2} sec ".format(number_ready, updated_number_scheduled, total_sleep_time), end="\r")
time.sleep(1)
if timeout and total_sleep_time > timeout:
raise Exception(f"Timeout waiting for {obj_name} to be ready")
|
onmt/keyphrase/pke/unsupervised/graph_based/expandrank.py | NaomiatLibrary/OpenNMT-kpg-release | 152 | 1808 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Date: 10-02-2018
"""ExpandRank keyphrase extraction model.
Graph-based ranking approach to keyphrase extraction described in:
* <NAME> and <NAME>.
Single Document Keyphrase Extraction Using Neighborhood Knowledge.
*In proceedings of AAAI*, pages 855-860, 2008.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from onmt.keyphrase.pke.unsupervised import SingleRank
from onmt.keyphrase.pke.base import LoadFile
import networkx as nx
import logging
class ExpandRank(SingleRank):
"""ExpandRank keyphrase extraction model.
Parameterized example::
import pke
import string
from nltk.corpus import stopwords
# 1. create an ExpandRank extractor.
extractor = pke.unsupervised.ExpandRank()
# 2. load the content of the document.
extractor.load_document(input='path/to/input.xml')
# 3. select the the longest sequences of nouns and adjectives, that do
# not contain punctuation marks or stopwords as candidates.
pos = {'NOUN', 'PROPN', 'ADJ'}
stoplist = list(string.punctuation)
stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']
stoplist += stopwords.words('english')
extractor.candidate_selection(pos=pos, stoplist=stoplist)
# 4. weight the candidates using the sum of their word's scores that are
# computed using random walk. In the graph, nodes are words (nouns
# and adjectives only) that are connected if they occur in a window
# of 10 words. A set of extra documents should be provided to expand
# the graph.
expanded_documents = [('path/to/input1.xml', similarity1),
('path/to/input2.xml', similarity2)]
extractor.candidate_weighting(window=10,
pos=pos,
expanded_documents=expanded_documents,
format='corenlp')
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
""" Redefining initializer for ExpandRank. """
super(ExpandRank, self).__init__()
def expand_word_graph(self,
input_file,
similarity,
window=10,
pos=None):
"""Expands the word graph using the given document.
Args:
input_file (str): path to the input file.
similarity (float): similarity for weighting edges.
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
"""
# define default pos tags set
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# initialize document loader
doc = LoadFile()
# read document
doc.load_document(input=input_file,
language=self.language,
normalization=self.normalization)
# flatten document and initialize nodes
sequence = []
for sentence in doc.sentences:
for j, node in enumerate(sentence.stems):
if node not in self.graph and sentence.pos[j] in pos:
self.graph.add_node(node)
sequence.append((node, sentence.pos[j]))
# loop through sequence to build the edges in the graph
for j, node_1 in enumerate(sequence):
for k in range(j + 1, min(j + window, len(sequence))):
node_2 = sequence[k]
if node_1[1] in pos and node_2[1] in pos \
and node_1[0] != node_2[0]:
if not self.graph.has_edge(node_1[0], node_2[0]):
self.graph.add_edge(node_1[0], node_2[0], weight=0)
self.graph[node_1[0]][node_2[0]]['weight'] += similarity
def candidate_weighting(self,
window=10,
pos=None,
expanded_documents=None,
normalized=False):
"""Candidate ranking using random walk.
Args:
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
expanded_documents (list): the set of documents to expand the graph,
should be a list of tuples (input_path, similarity). Defaults to
empty list, i.e. no expansion.
normalized (False): normalize keyphrase score by their length,
defaults to False.
"""
# define default pos tags set
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
if expanded_documents is None:
expanded_documents = []
logging.warning('No neighbor documents provided for ExpandRank.')
# build the word graph
self.build_word_graph(window=window, pos=pos)
# expand the word graph
for input_file, similarity in expanded_documents:
self.expand_word_graph(input_file=input_file,
similarity=similarity,
window=window,
pos=pos)
# compute the word scores using random walk
w = nx.pagerank_scipy(self.graph, alpha=0.85, weight='weight')
# loop through the candidates
for k in self.candidates.keys():
tokens = self.candidates[k].lexical_form
self.weights[k] = sum([w[t] for t in tokens])
if normalized:
self.weights[k] /= len(tokens)
|
dash/long_callback/managers/celery_manager.py | nickmelnikov82/dash | 17,143 | 1810 | import json
import inspect
import hashlib
from _plotly_utils.utils import PlotlyJSONEncoder
from dash.long_callback.managers import BaseLongCallbackManager
class CeleryLongCallbackManager(BaseLongCallbackManager):
def __init__(self, celery_app, cache_by=None, expire=None):
"""
Long callback manager that runs callback logic on a celery task queue,
and stores results using a celery result backend.
:param celery_app:
A celery.Celery application instance that must be configured with a
result backend. See the celery documentation for information on
configuration options.
:param cache_by:
A list of zero-argument functions. When provided, caching is enabled and
the return values of these functions are combined with the callback
function's input arguments and source code to generate cache keys.
:param expire:
If provided, a cache entry will be removed when it has not been accessed
for ``expire`` seconds. If not provided, the lifetime of cache entries
is determined by the default behavior of the celery result backend.
"""
try:
import celery # pylint: disable=import-outside-toplevel,import-error
from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error
DisabledBackend,
)
except ImportError as missing_imports:
raise ImportError(
"""\
CeleryLongCallbackManager requires extra dependencies which can be installed doing
$ pip install "dash[celery]"\n"""
) from missing_imports
if not isinstance(celery_app, celery.Celery):
raise ValueError("First argument must be a celery.Celery object")
if isinstance(celery_app.backend, DisabledBackend):
raise ValueError("Celery instance must be configured with a result backend")
super().__init__(cache_by)
self.handle = celery_app
self.expire = expire
def terminate_job(self, job):
if job is None:
return
self.handle.control.terminate(job)
def terminate_unhealthy_job(self, job):
task = self.get_task(job)
if task and task.status in ("FAILURE", "REVOKED"):
return self.terminate_job(job)
return False
def job_running(self, job):
future = self.get_task(job)
return future and future.status in (
"PENDING",
"RECEIVED",
"STARTED",
"RETRY",
"PROGRESS",
)
def make_job_fn(self, fn, progress, args_deps):
return _make_job_fn(fn, self.handle, progress, args_deps)
def get_task(self, job):
if job:
return self.handle.AsyncResult(job)
return None
def clear_cache_entry(self, key):
self.handle.backend.delete(key)
def call_job_fn(self, key, job_fn, args):
task = job_fn.delay(key, self._make_progress_key(key), args)
return task.task_id
def get_progress(self, key):
progress_key = self._make_progress_key(key)
progress_data = self.handle.backend.get(progress_key)
if progress_data:
return json.loads(progress_data)
return None
def result_ready(self, key):
return self.handle.backend.get(key) is not None
def get_result(self, key, job):
# Get result value
result = self.handle.backend.get(key)
if result is None:
return None
result = json.loads(result)
# Clear result if not caching
if self.cache_by is None:
self.clear_cache_entry(key)
else:
if self.expire:
# Set/update expiration time
self.handle.backend.expire(key, self.expire)
self.clear_cache_entry(self._make_progress_key(key))
self.terminate_job(job)
return result
def _make_job_fn(fn, celery_app, progress, args_deps):
cache = celery_app.backend
# Hash function source and module to create a unique (but stable) celery task name
fn_source = inspect.getsource(fn)
fn_str = fn_source
fn_hash = hashlib.sha1(fn_str.encode("utf-8")).hexdigest()
@celery_app.task(name=f"long_callback_{fn_hash}")
def job_fn(result_key, progress_key, user_callback_args, fn=fn):
def _set_progress(progress_value):
cache.set(progress_key, json.dumps(progress_value, cls=PlotlyJSONEncoder))
maybe_progress = [_set_progress] if progress else []
if isinstance(args_deps, dict):
user_callback_output = fn(*maybe_progress, **user_callback_args)
elif isinstance(args_deps, (list, tuple)):
user_callback_output = fn(*maybe_progress, *user_callback_args)
else:
user_callback_output = fn(*maybe_progress, user_callback_args)
cache.set(result_key, json.dumps(user_callback_output, cls=PlotlyJSONEncoder))
return job_fn
|
saleor/order/migrations/0081_auto_20200406_0456.py | fairhopeweb/saleor | 15,337 | 1813 | <gh_stars>1000+
# Generated by Django 3.0.4 on 2020-04-06 09:56
from django.db import migrations
from saleor.order import OrderStatus
def match_orders_with_users(apps, *_args, **_kwargs):
Order = apps.get_model("order", "Order")
User = apps.get_model("account", "User")
orders_without_user = Order.objects.filter(
user_email__isnull=False, user=None
).exclude(status=OrderStatus.DRAFT)
for order in orders_without_user:
try:
new_user = User.objects.get(email=order.user_email)
except User.DoesNotExist:
continue
order.user = new_user
order.save(update_fields=["user"])
class Migration(migrations.Migration):
dependencies = [
("order", "0080_invoice"),
]
operations = [
migrations.RunPython(match_orders_with_users),
]
|
function/python/brightics/function/textanalytics/regex.py | jhpark428/studio | 202 | 1814 | """
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.utils import check_required_parameters
from brightics.common.exception import BrighticsFunctionException
from .data import regex_format_dict
import re
def regex(table, **params):
check_required_parameters(_regex, params, ['table'])
return _regex(table, **params)
def _regex(table, input_cols, transformation_mode='extract', find_mode='all', pattern='',
user_dict_pattern='', custom_pattern='', replacement_string='', user_dict=None):
out_table = table.copy()
pattern_dict = regex_format_dict.pattern_dict
user_pattern_dict = {}
if user_dict is not None:
user_patterns = user_dict.values
for user_pattern in user_patterns:
user_pattern_name = user_pattern[0]
user_pattern_content = user_pattern[1]
user_pattern_dict[user_pattern_name] = user_pattern_dict.get(user_pattern_name, []) + [user_pattern_content]
user_pattern_dict = {key: r'|'.join(value) for key, value in user_pattern_dict.items()}
if pattern == '':
raise BrighticsFunctionException.from_errors([{'0100': "Please choose a pattern."}])
if pattern == 'custom':
raw_pattern = custom_pattern
elif pattern == 'user_dictionary':
raw_pattern = user_pattern_dict.get(user_dict_pattern)
if raw_pattern is None:
raise BrighticsFunctionException.from_errors(
[{'0100': user_dict_pattern + " is not a valid pattern name in the user dictionary."}])
else:
raw_pattern = pattern_dict.get(pattern)
regex_pattern = re.compile(raw_pattern)
def transformation(text):
if transformation_mode == 'extract':
if find_mode == 'first':
result = regex_pattern.search(text)
if result is None:
return ""
else:
return result.group()
else: # find_mode == 'all'
return regex_pattern.findall(text)
elif transformation_mode == 'replace':
if find_mode == 'first':
return regex_pattern.sub(replacement_string, text, 1)
else: # find_mode == 'all'
return regex_pattern.sub(replacement_string, text)
elif transformation_mode == 'remove':
if find_mode == 'first':
return regex_pattern.sub("", text, 1)
else: # find_mode == 'all'
return regex_pattern.sub("", text)
else: # transformation_mode == 'split'
if find_mode == 'first':
return regex_pattern.split(text, 1)
else: # find_mode == 'all'
return regex_pattern.split(text)
for col in input_cols:
result_col = table[col].apply(transformation)
out_table['regex_' + col] = result_col
return {'out_table': out_table}
|
applications/CSharpWrapperApplication/tests/test_CSharpWrapperApplication.py | lkusch/Kratos | 778 | 1816 | # import Kratos
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.CSharpWrapperApplication as CSharpWrapperApplication
import run_cpp_unit_tests
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import subprocess
import subprocess
# Using kratos_utilities
import KratosMultiphysics.kratos_utilities as kratos_utilities
if kratos_utilities.CheckIfApplicationsAvailable("ExternalSolversApplication"):
has_external_solvers_application = True
else:
has_external_solvers_application = False
# Import the tests o test_classes to create the suits
## SMALL TESTS
## NIGTHLY TESTS
## VALIDATION TESTS
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
# Create a test suit with the selected tests (Small tests):
smallSuite = suites['small']
# Create a test suit with the selected tests plus all small tests
nightlySuite = suites['nightly']
### BEGIN SMALL SUITE ###
### END SMALL SUITE ###
### BEGIN NIGHTLY SUITE ###
### END VALIDATION SUITE ###
### BEGIN VALIDATION SUITE ###
# For very long tests that should not be in nighly and you can use to validate
validationSuite = suites['validation']
validationSuite.addTests(nightlySuite)
### END VALIDATION ###
# Create a test suit that contains all the tests:
allSuite = suites['all']
allSuite.addTests(nightlySuite) # Already contains the smallSuite
validationSuite.addTests(allSuite) # Validation contains all
# Manual list for debugging
#allSuite.addTests(
#KratosUnittest.TestLoader().loadTestsFromTestCases([
#### STANDALONE
#### SMALL
#### NIGTHLY
#### VALIDATION
#])
#)
return suites
if __name__ == '__main__':
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning cpp unit tests ...")
run_cpp_unit_tests.run()
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished running cpp unit tests!")
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning python tests ...")
KratosUnittest.runTests(AssembleTestSuites())
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished python tests!")
|
spektral/datasets/qm9.py | JonaBecher/spektral | 2,145 | 1838 | import os
import os.path as osp
import numpy as np
from joblib import Parallel, delayed
from tensorflow.keras.utils import get_file
from tqdm import tqdm
from spektral.data import Dataset, Graph
from spektral.utils import label_to_one_hot, sparse
from spektral.utils.io import load_csv, load_sdf
ATOM_TYPES = [1, 6, 7, 8, 9]
BOND_TYPES = [1, 2, 3, 4]
class QM9(Dataset):
"""
The QM9 chemical data set of small molecules.
In this dataset, nodes represent atoms and edges represent chemical bonds.
There are 5 possible atom types (H, C, N, O, F) and 4 bond types (single,
double, triple, aromatic).
Node features represent the chemical properties of each atom and include:
- The atomic number, one-hot encoded;
- The atom's position in the X, Y, and Z dimensions;
- The atomic charge;
- The mass difference from the monoisotope;
The edge features represent the type of chemical bond between two atoms,
one-hot encoded.
Each graph has an 19-dimensional label for regression.
**Arguments**
- `amount`: int, load this many molecules instead of the full dataset
(useful for debugging).
- `n_jobs`: number of CPU cores to use for reading the data (-1, to use all
available cores).
"""
url = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"
def __init__(self, amount=None, n_jobs=1, **kwargs):
self.amount = amount
self.n_jobs = n_jobs
super().__init__(**kwargs)
def download(self):
get_file(
"qm9.tar.gz",
self.url,
extract=True,
cache_dir=self.path,
cache_subdir=self.path,
)
os.remove(osp.join(self.path, "qm9.tar.gz"))
def read(self):
print("Loading QM9 dataset.")
sdf_file = osp.join(self.path, "gdb9.sdf")
data = load_sdf(sdf_file, amount=self.amount) # Internal SDF format
def read_mol(mol):
x = np.array([atom_to_feature(atom) for atom in mol["atoms"]])
a, e = mol_to_adj(mol)
return x, a, e
data = Parallel(n_jobs=self.n_jobs)(
delayed(read_mol)(mol) for mol in tqdm(data, ncols=80)
)
x_list, a_list, e_list = list(zip(*data))
# Load labels
labels_file = osp.join(self.path, "gdb9.sdf.csv")
labels = load_csv(labels_file)
labels = labels.set_index("mol_id").values
if self.amount is not None:
labels = labels[: self.amount]
return [
Graph(x=x, a=a, e=e, y=y)
for x, a, e, y in zip(x_list, a_list, e_list, labels)
]
def atom_to_feature(atom):
atomic_num = label_to_one_hot(atom["atomic_num"], ATOM_TYPES)
coords = atom["coords"]
charge = atom["charge"]
iso = atom["iso"]
return np.concatenate((atomic_num, coords, [charge, iso]), -1)
def mol_to_adj(mol):
row, col, edge_features = [], [], []
for bond in mol["bonds"]:
start, end = bond["start_atom"], bond["end_atom"]
row += [start, end]
col += [end, start]
edge_features += [bond["type"]] * 2
a, e = sparse.edge_index_to_matrix(
edge_index=np.array((row, col)).T,
edge_weight=np.ones_like(row),
edge_features=label_to_one_hot(edge_features, BOND_TYPES),
)
return a, e
|
01_basics/01_building_expressions/02_vector_mat_soln.py | johny-c/theano_exercises | 711 | 1866 | <filename>01_basics/01_building_expressions/02_vector_mat_soln.py
import numpy as np
from theano import function
import theano.tensor as T
def make_vector():
"""
Returns a new Theano vector.
"""
return T.vector()
def make_matrix():
"""
Returns a new Theano matrix.
"""
return T.matrix()
def elemwise_mul(a, b):
"""
a: A theano matrix
b: A theano matrix
Returns the elementwise product of a and b
"""
return a * b
def matrix_vector_mul(a, b):
"""
a: A theano matrix
b: A theano vector
Returns the matrix-vector product of a and b
"""
return T.dot(a, b)
if __name__ == "__main__":
a = make_vector()
b = make_vector()
c = elemwise_mul(a, b)
d = make_matrix()
e = matrix_vector_mul(d, c)
f = function([a, b, d], e)
rng = np.random.RandomState([1, 2, 3])
a_value = rng.randn(5).astype(a.dtype)
b_value = rng.rand(5).astype(b.dtype)
c_value = a_value * b_value
d_value = rng.randn(5, 5).astype(d.dtype)
expected = np.dot(d_value, c_value)
actual = f(a_value, b_value, d_value)
assert np.allclose(actual, expected)
print "SUCCESS!"
|
tf_agents/bandits/agents/examples/v2/trainer.py | howards11/agents | 3,175 | 1868 | <reponame>howards11/agents<filename>tf_agents/bandits/agents/examples/v2/trainer.py
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generic TF-Agents training function for bandits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import dynamic_step_driver
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.policies import policy_saver
from tf_agents.replay_buffers import tf_uniform_replay_buffer
tf = tf.compat.v2
AGENT_CHECKPOINT_NAME = 'agent'
STEP_CHECKPOINT_NAME = 'step'
CHECKPOINT_FILE_PREFIX = 'ckpt'
def get_replay_buffer(data_spec,
batch_size,
steps_per_loop):
"""Return a `TFUniformReplayBuffer` for the given `agent`."""
buf = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=data_spec,
batch_size=batch_size,
max_length=steps_per_loop)
return buf
def set_expected_shape(experience, num_steps):
def set_time_dim(input_tensor, steps):
tensor_shape = input_tensor.shape.as_list()
tensor_shape[1] = steps
input_tensor.set_shape(tensor_shape)
tf.nest.map_structure(lambda t: set_time_dim(t, num_steps), experience)
def get_training_loop_fn(driver, replay_buffer, agent, steps):
"""Returns a `tf.function` that runs the driver and training loops.
Args:
driver: an instance of `Driver`.
replay_buffer: an instance of `ReplayBuffer`.
agent: an instance of `TFAgent`.
steps: an integer indicating how many driver steps should be
executed and presented to the trainer during each training loop.
"""
def training_loop():
"""Returns a `tf.function` that runs the training loop."""
driver.run()
batch_size = driver.env.batch_size
dataset = replay_buffer.as_dataset(
sample_batch_size=batch_size,
num_steps=steps,
single_deterministic_pass=True)
experience, unused_info = tf.data.experimental.get_single_element(dataset)
set_expected_shape(experience, steps)
loss_info = agent.train(experience)
replay_buffer.clear()
return loss_info
return training_loop
def restore_and_get_checkpoint_manager(root_dir, agent, metrics, step_metric):
"""Restores from `root_dir` and returns a function that writes checkpoints."""
trackable_objects = {metric.name: metric for metric in metrics}
trackable_objects[AGENT_CHECKPOINT_NAME] = agent
trackable_objects[STEP_CHECKPOINT_NAME] = step_metric
checkpoint = tf.train.Checkpoint(**trackable_objects)
checkpoint_manager = tf.train.CheckpointManager(checkpoint=checkpoint,
directory=root_dir,
max_to_keep=5)
latest = checkpoint_manager.latest_checkpoint
if latest is not None:
logging.info('Restoring checkpoint from %s.', latest)
checkpoint.restore(latest)
logging.info('Successfully restored to step %s.', step_metric.result())
else:
logging.info('Did not find a pre-existing checkpoint. '
'Starting from scratch.')
return checkpoint_manager
def train(root_dir,
agent,
environment,
training_loops,
steps_per_loop,
additional_metrics=(),
training_data_spec_transformation_fn=None):
"""Perform `training_loops` iterations of training.
Checkpoint results.
If one or more baseline_reward_fns are provided, the regret is computed
against each one of them. Here is example baseline_reward_fn:
def baseline_reward_fn(observation, per_action_reward_fns):
rewards = ... # compute reward for each arm
optimal_action_reward = ... # take the maximum reward
return optimal_action_reward
Args:
root_dir: path to the directory where checkpoints and metrics will be
written.
agent: an instance of `TFAgent`.
environment: an instance of `TFEnvironment`.
training_loops: an integer indicating how many training loops should be run.
steps_per_loop: an integer indicating how many driver steps should be
executed and presented to the trainer during each training loop.
additional_metrics: Tuple of metric objects to log, in addition to default
metrics `NumberOfEpisodes`, `AverageReturnMetric`, and
`AverageEpisodeLengthMetric`.
training_data_spec_transformation_fn: Optional function that transforms the
data items before they get to the replay buffer.
"""
# TODO(b/127641485): create evaluation loop with configurable metrics.
if training_data_spec_transformation_fn is None:
data_spec = agent.policy.trajectory_spec
else:
data_spec = training_data_spec_transformation_fn(
agent.policy.trajectory_spec)
replay_buffer = get_replay_buffer(data_spec, environment.batch_size,
steps_per_loop)
# `step_metric` records the number of individual rounds of bandit interaction;
# that is, (number of trajectories) * batch_size.
step_metric = tf_metrics.EnvironmentSteps()
metrics = [
tf_metrics.NumberOfEpisodes(),
tf_metrics.AverageEpisodeLengthMetric(batch_size=environment.batch_size)
] + list(additional_metrics)
if isinstance(environment.reward_spec(), dict):
metrics += [tf_metrics.AverageReturnMultiMetric(
reward_spec=environment.reward_spec(),
batch_size=environment.batch_size)]
else:
metrics += [
tf_metrics.AverageReturnMetric(batch_size=environment.batch_size)]
if training_data_spec_transformation_fn is not None:
add_batch_fn = lambda data: replay_buffer.add_batch( # pylint: disable=g-long-lambda
training_data_spec_transformation_fn(data))
else:
add_batch_fn = replay_buffer.add_batch
observers = [add_batch_fn, step_metric] + metrics
driver = dynamic_step_driver.DynamicStepDriver(
env=environment,
policy=agent.collect_policy,
num_steps=steps_per_loop * environment.batch_size,
observers=observers)
training_loop = get_training_loop_fn(
driver, replay_buffer, agent, steps_per_loop)
checkpoint_manager = restore_and_get_checkpoint_manager(
root_dir, agent, metrics, step_metric)
train_step_counter = tf.compat.v1.train.get_or_create_global_step()
saver = policy_saver.PolicySaver(agent.policy, train_step=train_step_counter)
summary_writer = tf.summary.create_file_writer(root_dir)
summary_writer.set_as_default()
for i in range(training_loops):
training_loop()
metric_utils.log_metrics(metrics)
for metric in metrics:
metric.tf_summaries(train_step=step_metric.result())
checkpoint_manager.save()
if i % 100 == 0:
saver.save(os.path.join(root_dir, 'policy_%d' % step_metric.result()))
|
SLHCUpgradeSimulations/Configuration/python/aging.py | ckamtsikis/cmssw | 852 | 1924 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# handle normal mixing or premixing
def getHcalDigitizer(process):
if hasattr(process,'mixData'):
return process.mixData
if hasattr(process,'mix') and hasattr(process.mix,'digitizers') and hasattr(process.mix.digitizers,'hcal'):
return process.mix.digitizers.hcal
return None
def getHGCalDigitizer(process,section):
if hasattr(process,'mix') and hasattr(process.mix,'digitizers'):
if section == 'EE' and hasattr(process.mix.digitizers,'hgceeDigitizer'):
return process.mix.digitizers.hgceeDigitizer
elif section == 'FH' and hasattr(process.mix.digitizers,'hgchefrontDigitizer'):
return process.mix.digitizers.hgchefrontDigitizer
elif section == 'BH' and hasattr(process.mix.digitizers,'hgchebackDigitizer'):
return process.mix.digitizers.hgchebackDigitizer
elif section == 'HFNose' and hasattr(process.mix.digitizers,'hfnoseDigitizer'):
return process.mix.digitizers.hfnoseDigitizer
return None
# change assumptions about lumi rate
def setScenarioHLLHC(module,scenarioHLLHC):
if scenarioHLLHC=="nominal":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_nominal
module.years = _years_LHC + _years_HLLHC_nominal
elif scenarioHLLHC=="ultimate":
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import _years_LHC, _years_HLLHC_ultimate
module.years = _years_LHC + _years_HLLHC_ultimate
return module
# turnon = True enables default, False disables
# recalibration and darkening always together
def ageHB(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HBDarkeningEP
process.HBDarkeningEP = HBDarkeningEP
process.HBDarkeningEP = setScenarioHLLHC(process.HBDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HBDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HBRecalibration = cms.bool(turnon)
return process
def ageHE(process,turnon,scenarioHLLHC):
if turnon:
from CalibCalorimetry.HcalPlugins.HBHEDarkening_cff import HEDarkeningEP
process.HEDarkeningEP = HEDarkeningEP
process.HEDarkeningEP = setScenarioHLLHC(process.HEDarkeningEP,scenarioHLLHC)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HEDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HERecalibration = cms.bool(turnon)
return process
def ageHF(process,turnon):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.bool(turnon)
if hasattr(process,'es_hardcode'):
process.es_hardcode.HFRecalibration = cms.bool(turnon)
return process
def agedHFNose(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HFNose_setEndOfLifeNoise
process = HFNose_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def agedHGCal(process,algo=0):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setEndOfLifeNoise
process = HGCal_setEndOfLifeNoise(process,byDose=True,byDoseAlgo=algo)
return process
def realisticHGCalStartup(process):
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCal_setRealisticStartupNoise
process = HGCal_setRealisticStartupNoise(process)
return process
# needs lumi to set proper ZS thresholds (tbd)
def ageSiPM(process,turnon,lumi):
process.es_hardcode.hbUpgrade.doRadiationDamage = turnon
process.es_hardcode.heUpgrade.doRadiationDamage = turnon
# todo: determine ZS threshold adjustments
# adjust PF thresholds for increased noise
# based on: https://baylor.box.com/s/w32ja75krcbxcycyifexu28dwlgrj7wg
hcal_lumis = [300, 1000, 3000, 4500, 1e10]
hcal_thresholds = {
300: {
"seed": [0.5, 0.625, 0.75, 0.75],
"rec": [0.4, 0.5, 0.6, 0.6],
},
1000: {
"seed": [1.0, 1.5, 1.5, 1.5],
"rec": [0.8, 1.2, 1.2, 1.2],
},
3000: {
"seed": [1.25, 2.5, 2.5, 2.5],
"rec": [1.0, 2.0, 2.0, 2.0],
},
4500: {
"seed": [1.5, 3.0, 3.0, 3.0],
"rec": [1.25, 2.5, 2.5, 2.5],
},
}
ctmodules = ['calotowermaker','caloTowerForTrk','caloTowerForTrkPreSplitting','towerMaker','towerMakerWithHO']
for ilumi, hcal_lumi in enumerate(hcal_lumis[:-1]):
if lumi >= hcal_lumi and lumi < hcal_lumis[ilumi+1]:
if hasattr(process,'particleFlowClusterHBHE'):
process.particleFlowClusterHBHE.seedFinder.thresholdsByDetector[0].seedingThreshold = hcal_thresholds[hcal_lumi]["seed"]
process.particleFlowClusterHBHE.initialClusteringStep.thresholdsByDetector[0].gatheringThreshold = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.recHitEnergyNorms[0].recHitEnergyNorm = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.positionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
process.particleFlowClusterHBHE.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowClusterHCAL'):
process.particleFlowClusterHCAL.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator = hcal_thresholds[hcal_lumi]["rec"]
if hasattr(process,'particleFlowRecHitHBHE'):
process.particleFlowRecHitHBHE.producers[0].qualityTests[0].cuts[0].threshold = hcal_thresholds[hcal_lumi]["rec"]
for ctmod in ctmodules:
if hasattr(process,ctmod):
getattr(process,ctmod).HBThreshold1 = hcal_thresholds[hcal_lumi]["rec"][0]
getattr(process,ctmod).HBThreshold2 = hcal_thresholds[hcal_lumi]["rec"][1]
getattr(process,ctmod).HBThreshold = hcal_thresholds[hcal_lumi]["rec"][-1]
break
return process
def ageHcal(process,lumi,instLumi,scenarioHLLHC):
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.DelivLuminosity = cms.double(float(lumi)) # integrated lumi in fb-1
# these lines need to be further activated by turning on 'complete' aging for HF
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.InstLuminosity = cms.double(float(instLumi))
process.g4SimHits.HCalSD.DelivLuminosity = cms.double(float(lumi))
# recalibration and darkening always together
if hasattr(process,'es_hardcode'):
process.es_hardcode.iLumi = cms.double(float(lumi))
# functions to enable individual subdet aging
process = ageHB(process,True,scenarioHLLHC)
process = ageHE(process,True,scenarioHLLHC)
process = ageHF(process,True)
process = ageSiPM(process,True,lumi)
return process
def turn_on_HB_aging(process):
process = ageHB(process,True,"")
return process
def turn_off_HB_aging(process):
process = ageHB(process,False,"")
return process
def turn_on_HE_aging(process):
process = ageHE(process,True,"")
return process
def turn_off_HE_aging(process):
process = ageHE(process,False,"")
return process
def turn_on_HF_aging(process):
process = ageHF(process,True)
return process
def turn_off_HF_aging(process):
process = ageHF(process,False)
return process
def turn_off_SiPM_aging(process):
process = ageSiPM(process,False,0.0)
return process
def hf_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.HCalSD.HFDarkening = cms.untracked.bool(True)
hcaldigi = getHcalDigitizer(process)
if hcaldigi is not None: hcaldigi.HFDarkening = cms.untracked.bool(False)
return process
def ageEcal(process,lumi,instLumi):
if hasattr(process,'g4SimHits'):
#these lines need to be further activiated by tuning on 'complete' aging for ecal
process.g4SimHits.ECalSD.InstLuminosity = cms.double(instLumi)
process.g4SimHits.ECalSD.DelivLuminosity = cms.double(float(lumi))
# available conditions
ecal_lumis = [300,1000,3000,4500]
ecal_conditions = [
['EcalIntercalibConstantsRcd','EcalIntercalibConstants_TL{:d}_upgrade_8deg_v2_mc'],
['EcalIntercalibConstantsMCRcd','EcalIntercalibConstantsMC_TL{:d}_upgrade_8deg_v2_mc'],
['EcalLaserAPDPNRatiosRcd','EcalLaserAPDPNRatios_TL{:d}_upgrade_8deg_mc'],
['EcalPedestalsRcd','EcalPedestals_TL{:d}_upgradeTIA_8deg_mc'],
['EcalTPGLinearizationConstRcd','EcalTPGLinearizationConst_TL{:d}_upgrade_8deg_mc'],
]
# update PF thresholds, based on https://indico.cern.ch/event/653123/contributions/2659235/attachments/1491385/2318364/170711_upsg_ledovskoy.pdf
ecal_thresholds = {
300 : 0.103,
1000 : 0.175,
3000 : 0.435,
4500 : 0.707,
}
ecal_seed_multiplier = 2.5
# try to get conditions
if int(lumi) in ecal_lumis:
if not hasattr(process.GlobalTag,'toGet'):
process.GlobalTag.toGet=cms.VPSet()
for ecal_condition in ecal_conditions:
process.GlobalTag.toGet.append(cms.PSet(
record = cms.string(ecal_condition[0]),
tag = cms.string(ecal_condition[1].format(int(lumi))),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
)
)
if hasattr(process,"particleFlowClusterECALUncorrected"):
_seeds = process.particleFlowClusterECALUncorrected.seedFinder.thresholdsByDetector
for iseed in range(0,len(_seeds)):
if _seeds[iseed].detector.value()=="ECAL_BARREL":
_seeds[iseed].seedingThreshold = cms.double(ecal_thresholds[int(lumi)]*ecal_seed_multiplier)
_clusters = process.particleFlowClusterECALUncorrected.initialClusteringStep.thresholdsByDetector
for icluster in range(0,len(_clusters)):
if _clusters[icluster].detector.value()=="ECAL_BARREL":
_clusters[icluster].gatheringThreshold = cms.double(ecal_thresholds[int(lumi)])
return process
def ecal_complete_aging(process):
if hasattr(process,'g4SimHits'):
process.g4SimHits.ECalSD.AgeingWithSlopeLY = cms.untracked.bool(True)
if hasattr(process,'ecal_digi_parameters'):
process.ecal_digi_parameters.UseLCcorrection = cms.untracked.bool(False)
return process
def customise_aging_300(process):
process=ageHcal(process,300,5.0e34,"nominal")
process=ageEcal(process,300,5.0e34)
return process
def customise_aging_1000(process):
process=ageHcal(process,1000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,1000,5.0e34)
return process
def customise_aging_3000(process):
process=ageHcal(process,3000,5.0e34,"nominal")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,5.0e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_3000_ultimate(process):
process=ageHcal(process,3000,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,3000,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
def customise_aging_4500_ultimate(process):
process=ageHcal(process,4500,7.5e34,"ultimate")
process=turn_off_HE_aging(process) #avoid conflict between HGCal and Hcal in phase2 geom configuration
process=ageEcal(process,4500,7.5e34)
process=agedHGCal(process)
process=agedHFNose(process)
return process
|
tests/python/correctness/simple_test_aux_index.py | dubey/weaver | 163 | 1937 | <filename>tests/python/correctness/simple_test_aux_index.py
#! /usr/bin/env python
#
# ===============================================================
# Description: Sanity check for fresh install.
#
# Created: 2014-08-12 16:42:52
#
# Author: <NAME>, <EMAIL>
#
# Copyright (C) 2013, Cornell University, see the LICENSE file
# for licensing agreement
# ===============================================================
#
import sys
try:
import weaver.client as client
except ImportError:
import client
config_file=''
if len(sys.argv) > 1:
config_file = sys.argv[1]
# create client object
c = client.Client('172.16.17.32', 2002, config_file)
# check aux index
assert c.aux_index()
# 1. create node for user ayush
c.begin_tx()
c.create_node('ayush')
c.set_node_properties({'type': 'user', 'age': '25'}, 'ayush')
c.end_tx()
# 2. create node for user egs
c.begin_tx()
c.create_node('egs')
c.set_node_property('type', 'user', 'egs')
c.end_tx()
# 3. ayush follows egs
c.begin_tx()
c.create_edge('ayush', 'egs', 'e1')
c.set_edge_property(edge='e1', key='type', value='follows')
c.create_edge('egs', 'ayush', 'e2')
c.set_edge_property(edge='e2', key='type', value='followed_by')
c.end_tx()
# 4. add a post and restrict visibility to followers only
c.begin_tx()
c.create_node('post')
c.set_node_property('type', 'post', 'post')
c.set_node_property('visibility', 'followers', 'post')
e3 = c.create_edge('egs', 'post')
c.set_edge_property(edge=e3, key='type', value='posted')
c.end_tx()
# 5. 'like' the post
c.begin_tx()
e4 = c.create_edge('post', 'ayush')
c.set_edge_property(edge=e4, key='type', value='liked_by')
c.end_tx()
# 6. list all the people who like egs's post
return_nodes = c.traverse('egs', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute()
assert len(return_nodes) == 1, 'traversal returned incorrect #nodes'
assert 'ayush' in return_nodes, 'traversal returned bad node handle'
# 7. try to create node with same handle as before
c.begin_tx()
c.create_node('ayush')
try:
c.end_tx()
assert False, 'create node passed'
except client.WeaverError:
pass
# 8. try to create edge with same handle as before
c.begin_tx()
c.create_edge('ayush', 'egs', 'e1')
try:
c.end_tx()
assert False, 'create edge passed'
except client.WeaverError:
pass
# 9. add auxiliary handles to nodes
c.begin_tx()
c.add_alias('ad688', 'ayush')
c.add_alias('el33th4x0r', 'egs')
c.end_tx()
# 10. list all the people who like egs's post
# this time with aliases instead of handles
return_nodes = c.traverse('el33th4x0r', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute()
assert len(return_nodes) == 1, 'traversal returned incorrect #nodes'
assert 'ayush' in return_nodes, 'traversal returned bad node handle'
# 11. get node and check it is valid
ad = c.get_node('ayush')
assert 'ad688' in ad.aliases
assert 'type' in ad.properties
assert 'user' in ad.properties['type']
assert 'age' in ad.properties
assert '25' in ad.properties['age']
assert 'e1' in ad.out_edges
print 'Correctly executed 11 transactions of varying complexity, pass simple_test.'
print 'Success, you have a working Weaver setup!'
|
tests/test-scripts/threadpools.py | whalesalad/filprofiler | 521 | 1958 | <gh_stars>100-1000
"""Validate that number of threads in thread pools is set to 1."""
import numexpr
import blosc
import threadpoolctl
# APIs that return previous number of threads:
assert numexpr.set_num_threads(2) == 1
assert blosc.set_nthreads(2) == 1
for d in threadpoolctl.threadpool_info():
assert d["num_threads"] == 1, d
|
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 2,728 | 1970 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Permission(msrest.serialization.Model):
"""Role definition permissions.
:param actions: Allowed actions.
:type actions: list[str]
:param not_actions: Denied actions.
:type not_actions: list[str]
:param data_actions: Allowed Data actions.
:type data_actions: list[str]
:param not_data_actions: Denied Data actions.
:type not_data_actions: list[str]
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[str]'},
'not_actions': {'key': 'notActions', 'type': '[str]'},
'data_actions': {'key': 'dataActions', 'type': '[str]'},
'not_data_actions': {'key': 'notDataActions', 'type': '[str]'},
}
def __init__(
self,
*,
actions: Optional[List[str]] = None,
not_actions: Optional[List[str]] = None,
data_actions: Optional[List[str]] = None,
not_data_actions: Optional[List[str]] = None,
**kwargs
):
super(Permission, self).__init__(**kwargs)
self.actions = actions
self.not_actions = not_actions
self.data_actions = data_actions
self.not_data_actions = not_data_actions
class PermissionGetResult(msrest.serialization.Model):
"""Permissions information.
:param value: An array of permissions.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Permission]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Permission"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PermissionGetResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProviderOperation(msrest.serialization.Model):
"""Operation.
:param name: The operation name.
:type name: str
:param display_name: The operation display name.
:type display_name: str
:param description: The operation description.
:type description: str
:param origin: The operation origin.
:type origin: str
:param properties: The operation properties.
:type properties: any
:param is_data_action: The dataAction flag to specify the operation type.
:type is_data_action: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
origin: Optional[str] = None,
properties: Optional[Any] = None,
is_data_action: Optional[bool] = None,
**kwargs
):
super(ProviderOperation, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.description = description
self.origin = origin
self.properties = properties
self.is_data_action = is_data_action
class ProviderOperationsMetadata(msrest.serialization.Model):
"""Provider Operations metadata.
:param id: The provider id.
:type id: str
:param name: The provider name.
:type name: str
:param type: The provider type.
:type type: str
:param display_name: The provider display name.
:type display_name: str
:param resource_types: The provider resource types.
:type resource_types: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ResourceType]
:param operations: The provider operations.
:type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'resource_types': {'key': 'resourceTypes', 'type': '[ResourceType]'},
'operations': {'key': 'operations', 'type': '[ProviderOperation]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
type: Optional[str] = None,
display_name: Optional[str] = None,
resource_types: Optional[List["ResourceType"]] = None,
operations: Optional[List["ProviderOperation"]] = None,
**kwargs
):
super(ProviderOperationsMetadata, self).__init__(**kwargs)
self.id = id
self.name = name
self.type = type
self.display_name = display_name
self.resource_types = resource_types
self.operations = operations
class ProviderOperationsMetadataListResult(msrest.serialization.Model):
"""Provider operations metadata list.
:param value: The list of providers.
:type value:
list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperationsMetadata]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ProviderOperationsMetadata]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ProviderOperationsMetadata"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ProviderOperationsMetadataListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceType(msrest.serialization.Model):
"""Resource Type.
:param name: The resource type name.
:type name: str
:param display_name: The resource type display name.
:type display_name: str
:param operations: The resource type operations.
:type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'operations': {'key': 'operations', 'type': '[ProviderOperation]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
operations: Optional[List["ProviderOperation"]] = None,
**kwargs
):
super(ResourceType, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.operations = operations
class RoleAssignment(msrest.serialization.Model):
"""Role Assignments.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment ID.
:vartype id: str
:ivar name: The role assignment name.
:vartype name: str
:ivar type: The role assignment type.
:vartype type: str
:param scope: The role assignment scope.
:type scope: str
:param role_definition_id: The role definition ID.
:type role_definition_id: str
:param principal_id: The principal ID.
:type principal_id: str
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignment, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentCreateParameters(msrest.serialization.Model):
"""Role assignment create parameters.
All required parameters must be populated in order to send to Azure.
:param role_definition_id: Required. The role definition ID used in the role assignment.
:type role_definition_id: str
:param principal_id: Required. The principal ID assigned to the role. This maps to the ID
inside the Active Directory. It can point to a user, service principal, or security group.
:type principal_id: str
:param can_delegate: The delegation flag used for creating a role assignment.
:type can_delegate: bool
"""
_validation = {
'role_definition_id': {'required': True},
'principal_id': {'required': True},
}
_attribute_map = {
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
role_definition_id: str,
principal_id: str,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignmentCreateParameters, self).__init__(**kwargs)
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentFilter(msrest.serialization.Model):
"""Role Assignments filter.
:param principal_id: Returns role assignment of the specific principal.
:type principal_id: str
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'can_delegate': {'key': 'canDelegate', 'type': 'bool'},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
can_delegate: Optional[bool] = None,
**kwargs
):
super(RoleAssignmentFilter, self).__init__(**kwargs)
self.principal_id = principal_id
self.can_delegate = can_delegate
class RoleAssignmentListResult(msrest.serialization.Model):
"""Role assignment list operation result.
:param value: Role assignment list.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleAssignment]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleAssignment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RoleAssignment"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RoleAssignmentListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleDefinition(msrest.serialization.Model):
"""Role definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role definition ID.
:vartype id: str
:ivar name: The role definition name.
:vartype name: str
:ivar type: The role definition type.
:vartype type: str
:param role_name: The role name.
:type role_name: str
:param description: The role definition description.
:type description: str
:param role_type: The role type.
:type role_type: str
:param permissions: Role definition permissions.
:type permissions: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission]
:param assignable_scopes: Role definition assignable scopes.
:type assignable_scopes: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'role_name': {'key': 'properties.roleName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'role_type': {'key': 'properties.type', 'type': 'str'},
'permissions': {'key': 'properties.permissions', 'type': '[Permission]'},
'assignable_scopes': {'key': 'properties.assignableScopes', 'type': '[str]'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
description: Optional[str] = None,
role_type: Optional[str] = None,
permissions: Optional[List["Permission"]] = None,
assignable_scopes: Optional[List[str]] = None,
**kwargs
):
super(RoleDefinition, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.role_name = role_name
self.description = description
self.role_type = role_type
self.permissions = permissions
self.assignable_scopes = assignable_scopes
class RoleDefinitionFilter(msrest.serialization.Model):
"""Role Definitions filter.
:param role_name: Returns role definition with the specific name.
:type role_name: str
:param type: Returns role definition with the specific type.
:type type: str
"""
_attribute_map = {
'role_name': {'key': 'roleName', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
role_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(RoleDefinitionFilter, self).__init__(**kwargs)
self.role_name = role_name
self.type = type
class RoleDefinitionListResult(msrest.serialization.Model):
"""Role definition list operation result.
:param value: Role definition list.
:type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleDefinition]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RoleDefinition"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RoleDefinitionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
|
netesto/local/psPlot.py | fakeNetflix/facebook-repo-fbkutils | 346 | 1973 | #!/usr/bin/env python2
import sys
import random
import os.path
import shutil
import commands
import types
import math
#gsPath = '/usr/local/bin/gs'
gsPath = 'gs'
logFile = '/dev/null'
#logFile = 'plot.log'
#--- class PsPlot(fname, pageHeader, pageSubHeader, plotsPerPage)
#
class PsPlot(object):
def __init__(self, fname, pageHeader, pageSubHeader, plotsPerPage):
self.foutPath = os.path.dirname(fname)+'/'
if self.foutPath == '/':
self.foutPath = ''
self.foutName = os.path.basename(fname)
self.fname = fname+'.ps'
self.pageHeader = pageHeader
self.pageSubHeader = pageSubHeader
self.plotsPerPage = plotsPerPage
self.yfix1 = ''
self.yfix2 = ''
self.xGrid = 1
self.yGrid = 1
self.xUniform = False
self.xLen = 6.5 #inches
self.seriesTitle = ' '
self.x0 = 0
self.xInc = 0
self.xCount = 0
self.xList = []
self.xDict = {}
self.y1Inc = 0
self.y1Count = 0
self.y1LogScale = 0
self.y2Inc = 0
self.y2Count = 0
self.y2LogScale = 0
self.xOffset = 0
self.colors = [ (0.7,0.7,0.7), (0,0,0.8), (0.8,0,0),
(0.42,0.55,0.14), (0.6,0.5,0.3), (0.6,0.2,0.8),
(0,0.8,0),
(0.4,0.3,0.5), (0.5,0.5,0.5), (0.8,0.0,0.0), (0,0,0) ]
self.colorsN = 11
self.colorRed = (0.8,0,0)
self.colorGreen = (0,0.8,0)
self.colorBlue = (0,0,0.8)
self.colorAqua = (0,0.5,0.5)
self.colorWhite = (1,1,1)
self.ColorBlack = (0,0,0)
self.xSize = 1800
self.ySize = 900
shutil.copy('plot-header.ps', self.fname)
self.fout = open(self.fname, 'a')
self.flog = open(logFile, 'a')
# self.flog = open('./psPlot.out', 'a')
if plotsPerPage == 4:
print >>self.fout, '/doGraph { graph4v } def'
print >>self.fout, '/nextGraph { nextGraph4v } def'
elif plotsPerPage == 3:
print >>self.fout, '/doGraph { graph3v } def'
print >>self.fout, '/nextGraph { nextGraph3v } def'
elif plotsPerPage == 2:
print >>self.fout, '/doGraph { graph2v } def'
print >>self.fout, '/nextGraph { nextGraph2v } def'
else:
print >>self.fout, '/doGraph { graph1v } def'
print >>self.fout, '/nextGraph { nextGraph1v } def'
print >>self.fout, '/showpage {\n 40 742 moveto'
print >>self.fout, '/Helvetica findfont 12 scalefont setfont'
if self.pageHeader != '':
print >>self.fout, '(',self.pageHeader,') show'
if self.pageSubHeader != '':
print >>self.fout, '40 726 moveto\n (',self.pageSubHeader,') show'
print >>self.fout, 'showpage\n} bind def'
print >>self.fout, 'doGraph'
#--- End()
#
def End(self):
print >>self.fout, '\nshowpage\nend'
self.fout.close()
#--- GetInc(vMin, vMax)
def GetInc(self,vMin, vMax):
ff = 1.0
while vMax <= 1 and vMax > 0:
ff *= 0.10
vMin *= 10
vMax *= 10
v0 = int(vMin)
v1 = int(vMax+0.99)
f = 1
w = v1 - v0
if w == 0:
v1 = v0 + 1
w = 1
while w/f >= 100:
f *= 10
# w = int(w/f)
v0 = int(v0/f)
v1 = int(v1/f)
if (vMin % f) != 0 and vMax == v1:
v1 += 1
w = v1 - v0
if w <= 10:
vInc = 1
elif w <= 20:
vInc = 2
else:
m = 10
while w/m > 100:
m *= 10
if (v0 >= 0) and (v0 % m) != 0:
v0 = int(v0 / m) * m
if (v1 % m) != 0:
v1 = int(v1 / m) * m + m
w = v1 - v0
if w <= 5*m:
vInc = m/2
else:
vInc = m
else:
vInc = m
# if (vMax/f)%vInc != 0 or v1 % vInc != 0:
if v1 % vInc != 0:
v1 = int(v1/vInc)*vInc + vInc
if (v0 % vInc) != 0:
v0 = int(v0/vInc)*vInc
v0 += vInc
v0 *= (f*ff)
v1 *= (f*ff)
vInc *= (f*ff)
return v0, v1, vInc
#--- ValueConvert(v)
#
def ValueConvert(self, v, inc):
if inc > 0:
logInc = int(math.log10(v/inc))
d = math.pow(10,logInc)
if d == 0:
d = 10.0
else:
d = 10.0
if d == 1 and float(v)/inc > 1.0:
d = 10.0
if v >= 1000000000 and inc > 1:
s = int(v/(1000000000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'G'
elif v >= 1000000 and inc > 1:
s = int(v/(1000000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'M'
elif v >= 1000 and inc > 1:
s = int(v/(1000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'K'
elif v >= 1:
s = int(v*d)/d
if s*d == int(s)*d:
s = int(s)
r = str(s)
else:
r = str(int(v*100)/100.0)
return r
#--- GetAxis(vBeg, vEnd, vInc, logFlag)
#
def GetAxis(self, vBeg, vEnd, vInc, logFlag):
fix = '{ 0 add }'
if isinstance(vBeg,list):
vList = vBeg
vList.append(' ')
self.xUniform = True
v0 = 1
v1 = len(vList)
vi = 1
fix = '{ '+str(v0-vi)+' sub '+str(vi)+' div }'
logFlag = 0
else:
if vInc == 0:
v0,v1,vi = self.GetInc(vBeg,vEnd)
else:
v0 = vBeg
v1 = vEnd
vi = vInc
if vBeg > 0 and (logFlag==1 or (logFlag==0 and (vEnd/vBeg > 100))):
v0 = vBeg
v1 = vEnd
logFlag = 1
v0Log = math.log10(v0)
t = math.ceil(v0Log)
ff = math.modf(v0Log)
if math.fabs(ff[0]) < math.fabs(v0Log)/1000 and t < 0:
t += 1
logOffset = 0
while t < 1:
logOffset += 1
t += 1
v0 = math.pow(10,math.floor(v0Log)+1)
v1 = math.pow(10,math.ceil(math.log10(v1)))
vi = 1
vList = []
v = v0
while v <= v1:
vList.append(self.ValueConvert(v,0))
v *= 10
if v0 > 1:
logOffset -= (math.log10(v0) - 1)
# substract 1 from above inside parent?
fix = '{ dup 0 eq { } { log '+str(logOffset)+' add } ifelse }'
else:
logFlag = 0
v = v0
vList = []
n = 0
while True:
vList.append(self.ValueConvert(v,vi))
if v > vEnd:
break
n += 1
v = v0 + n*vi
fix = '{ '+str(v0-vi)+' sub '+str(vi)+' div }'
print >>self.flog, 'v0:',v0,' vi:',vi,' v1:',v1,' (',vEnd,')'
print >>self.flog, 'vList: ', vList
print >>self.flog, 'logFlag: ', logFlag, ' fix: ', fix
return v0,v1,vi,vList,fix,logFlag
#--- SetXLen(xlen)
def SetXLen(self, xlen):
self.xLen = xlen
print >>self.fout, '/xAxisLen %.2f def' % self.xLen
print >>self.fout, 'doGraph'
return
#--- SetXSize(xsize)
def SetXSize(self, xsize):
self.xSize = xsize
return
#--- SetYSize(ysize)
def SetYSize(self, ysize):
self.ySize = ysize
return
#--- SetPlotBgLevel(level)
#
def SetPlotBgLevel(self,level):
print >>self.fout, '/plotBgLevel ', level, 'def\n'
return
#--- SetPlotPercentDir(value)
def SetPlotPercentDir(self,value):
if value == 'Vertical':
print >>self.fout, '/plotNumPercentDir 1 def\n'
else:
print >>self.fout, '/plotNumPercentDir 0 def\n'
return
#--- SetPlotYLogScale(axis,value)
#
def SetPlotYLogScale(self,axis,value):
if value == 'Off':
v = -1
elif value == 'On':
v = 1
else:
v = 0;
if axis == 1:
self.y1LogScale = v
else:
self.y2LogScale = v
return
#--- SetPlot(xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title)
#
def SetPlot(self,xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title):
print >>self.fout, '\n\nnextGraph\n1 setlinewidth\n'
(x0,x1,xi,xList,fix,logFlag) = self.GetAxis(xbeg,xend,xinc,0)
self.x0 = x0
self.xInc = xi
self.xCount = len(xList)
self.xList = xList
self.xDict = {}
k = 1
for x in xList:
self.xDict[x] = k
k=k+1
print >>self.fout, '/xfix ', fix, ' def\n'
(y0,y1,yi,yList,fix,logFlag) = self.GetAxis(ybeg,yend,yinc,
self.y1LogScale)
self.y1Inc = yi
self.y1Count = len(yList)
self.yfix1 = '/yfix '+fix+' def\n /yinc yinc1 def'
print >>self.fout, self.yfix1
print >>self.fout, '[ '
for x in xList:
self.fout.write('('+str(x)+') ')
self.fout.write(' ]\n[ ')
for y in yList:
self.fout.write('('+str(y)+') ')
print >>self.fout, ' ]'
print >>self.fout, '('+xtitle+')\n('+ytitle+')\naxes\n'
print >>self.fout, self.xGrid, self.yGrid, ' grid\n'
print >>self.fout, '/ymtitle ypos ylen add 10 add def\n'
# Multiple lines in title are separated by '|'
print >>self.flog, 'Main Title: '+title
titleLines = title.split('|')
for t in titleLines:
if len(t) > 0:
print >>self.flog, ' '+t
print >>self.fout, '('+t+')\n'
print >>self.fout, 'Mtitles\n'
# print >>self.fout, '('+title+')\nMtitles\n'
if logFlag == 1:
print >>self.fout, 'beginFunction\n'
for ys in yList:
factor = 1
if ys[-1:] == 'K':
yss = ys[:-1]
factor = 1000
elif ys[-1:] == 'M':
yss = ys[:-1]
factor = 1000000
else:
yss = ys
y = float(yss)*factor/10.0
k = 2
while k < 10:
print >>self.fout, 0, k*y
k += 1
print >>self.fout, 'endFunction\n'
print >>self.fout, '19 { 0 0 0 setrgbcolor } plotSymbolsC\n'
return y1
#--- SetPlot2(xbeg,xend,xinc,ybeg,yend,yinc,zbeg,zend,zinc,
# xtitle,ytitle,ztitle,title)
#
def SetPlot2(self,xbeg,xend,xinc,ybeg,yend,yinc,zbeg,zend,zinc,
xtitle,ytitle,ztitle,title):
rv = self.SetPlot(xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title)
(z0,z1,zi,zList,fix,logFlag) = self.GetAxis(zbeg,zend,zinc,self.y2LogScale)
self.y2Inc = zi
self.y2Count = len(zList)
print >>self.fout, '/Flag2Yaxes 1 def'
self.yfix2 = '/yfix '+fix+' def\n/yinc yinc2 def'
print >>self.fout, 'axpos axlen add aypos aylen'
self.fout.write('[ ')
for z in zList:
self.fout.write('('+str(z)+') ')
self.fout.write(' ]')
if ztitle != '':
print >>self.fout, '('+ztitle+') vaxis2'
if logFlag == 1:
print >>self.fout, self.yfix2
print >>self.fout, 'beginFunction\n'
for zs in zList:
factor = 1
if zs[-1:] == 'K':
zss = zs[:-1]
factor = 1000
elif zs[-1:] == 'M':
zss = zs[:-1]
factor = 1000000
else:
zss = zs
y = float(zss)*factor/10.0
k = 2
while k < 10:
print >>self.fout, self.xCount, k*y
k += 1
print >>self.fout, 'endFunction\n'
print >>self.fout, '18 { 0.72 0.52 0.5 setrgbcolor } plotSymbolsC\n'
return rv
#--- SetColor(color)
#
def SetColor(self, color):
rv = ' { '+str(color[0])+' '+str(color[1])+' '+str(color[2])+ \
' setrgbcolor } '
return rv
#--- GetColorIndx(indx)
#
def GetColorIndx(self, indx):
color = self.colors[indx % self.colorsN]
rv = ' { '+str(color[0])+' '+str(color[1])+' '+str(color[2])+ \
' setrgbcolor } '
return rv
#--- SetColorIndx(indx, r, g, b)
#
def SetColorIndx(self, indx, r, g, b):
self.colors[indx][0] = r
self.colors[indx][1] = g
self.colors[indx][2] = b
return rv
#--- outputPS(string)
#
def outputPS(self, s):
print >>self.fout, s
#--- SeriesNames(names)
#
def SeriesNames(self, names):
indx = len(names) - 1
if indx == 0:
return
print >>self.fout, '('+self.seriesTitle+')'
while indx >= 0:
if names[indx] != None:
print >>self.fout, '('+names[indx]+') '
print >>self.fout, self.SetColor(self.colors[indx % self.colorsN])
indx -= 1
print >>self.fout, 'fdescriptionsC'
#--- PlotVBars(xList, type)
#
def PlotVBars(self, xList, type):
flog = self.flog
print >>self.fout, self.yfix1
print >>self.fout, 'beginFunction\n'
endFun = 'endFunction\n'
indx = 0
for x in xList:
if x == ' ' and indx == len(xList)-1:
continue
indx += 1
print >>self.fout, x, 0.0
if (indx != 0) and (indx % 1000) == 0:
print >>self.fout, endFun+type+'\nbeginFunction\n'
print >>self.fout, x
print >>self.fout, endFun, type, '\n'
return
#--- PlotData(axis, xList, yList, zList, id, type)
#
def PlotData(self, axis, xList, yList, zList, id, type):
flog = self.flog
print >>flog, 'graph xList: ', self.xList, ' xList: ', xList, \
' yList: ', yList
print >>self.fout, '%\n% Plot '+id+'\n%\n'
print >>self.fout, '/xfix { ', self.x0 - self.xInc - self.xOffset,' sub ', self.xInc, ' div ', 0.0,' add } def\n'
if axis == 2:
print >>self.fout, self.yfix2
elif axis == 1:
print >>self.fout, self.yfix1
# else:
# print >>self.fout, '/yfix { 0 add } def\n'
print >>self.fout, 'beginFunction\n'
if isinstance(zList,list):
endFun = 'endFunctionW\n'
else:
endFun = 'endFunction\n'
indx = 0
for x in xList:
if x == ' ' and indx == len(xList)-1:
continue
if len(yList) <= indx:
continue
y = yList[indx]
if isinstance(zList,list):
if len(zList) <= indx:
continue
z = zList[indx]
else:
z = ''
indx += 1
if self.xUniform == True:
g_indx = self.xDict[x]
print >>self.fout, g_indx, y, z
else:
print >>self.fout, x, y, z
if (indx != 0) and (indx % 1000) == 0:
print >>self.fout, endFun+type+'\nbeginFunction\n'
if self.xUniform == True:
print >>self.fout, g_indx, y, z
else:
print >>self.fout, x, y, z
print >>self.fout, endFun, type, '\n'
return
#--- GetImage()
#
def GetImage(self):
flog = self.flog
print >>self.fout, 'showpage\n'
self.fout.flush()
os.fsync(self.fout)
if self.plotsPerPage == 1:
# size = ' -g1200x550 '
size = ' -g%dx%d ' % (self.xSize, self.ySize)
xres = int(100 * self.xSize * 6.5 / (1200 * self.xLen))
yres = int(110 * self.ySize / 550)
res = ' -r%dx%d ' % (xres, yres)
cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'.jpg -dNOPAUSE '+ res +self.fname+' -c quit'
# cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'.jpg -dNOPAUSE -r100x100 '+self.fname+' -c quit'
else:
size = ' -g1200x1100 '
cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'%d.jpg -dNOPAUSE -r100x100 '+self.fname+' -c quit'
print >>flog, 'cmdStr: ', cmdStr
output = commands.getoutput(cmdStr)
print >>flog, 'output from gs command: ', output
return self.foutPath+self.foutName+'.jpg'
#--- Main
#
def main():
tMin = 0
tMax = 100000
stateList = [0,1,2,2,3,3,3,3,4]
fname = 'sched.txt'
if len(sys.argv) == 2:
fname = sys.argv[1]
elif len(sys.argv) == 3:
tMin = int(sys.argv[1])
tMax = int(sys.argv[2])
elif len(sys.argv) == 4:
tMin = int(sys.argv[1])
tMax = int(sys.argv[2])
fname = sys.argv[3]
elif len(sys.argv) != 1:
print 'USAGE: psPlot.py [tMin tMax] [fname]'
sys.exit(1)
print 'tMin,tMax: ', tMin, tMax, 'fname: ', fname
p = PsPlot('./p', 'Header', 'SubHeader', 1)
fromStateList = []
toStateList = []
time1List = []
time2List = []
indx = 0
oldTime = 0
fin = open(fname, 'r')
for inputLine in fin:
inputLine = inputLine.replace(' ','')
inputLine = inputLine.replace("'", '')
i1 = inputLine.find('(')
i2 = inputLine.find(')')
inputList = inputLine[i1+1:i2-1].split(',')
s1 = stateList[int(inputList[0])]
s2 = stateList[int(inputList[1])]
t = int(inputList[2])
if indx != 0 and t >= tMin and t <= tMax:
fromStateList.append(s1)
toStateList.append(s2)
time1List.append(oldTime)
time2List.append(t)
oldTime = t
indx += 1
p.SetPlot(tMin, tMax, 0, 0, 2, 0, 'Time', 'Socket/State', 'Chavey\'s Plot')
state = 0
while state <= 4:
t1List = []
t2List = []
sList = []
indx = 0
for s in toStateList:
if s == state:
t1List.append(time1List[indx])
t2List.append(time2List[indx])
sList.append(0.10 + s*0.20)
indx += 1
p.PlotData(1,t1List, t2List, sList, 'Test',
'0.1 in 0 '+p.SetColor(p.colors[state])+' plotWbarsC',
sys.stdout)
state += 1
image = p.GetImage(sys.stdout)
print 'Image file: ', image
p.End()
if __name__ == "__main__":
main()
|
func-button/klSigmode.py | xcgoo/uiKLine | 232 | 1983 | <gh_stars>100-1000
# coding: utf-8
"""
插入所有需要的库,和函数
"""
#----------------------------------------------------------------------
def klSigmode(self):
"""查找模式"""
if self.mode == 'deal':
self.canvas.updateSig(self.signalsOpen)
self.mode = 'dealOpen'
else:
self.canvas.updateSig(self.signals)
self.mode = 'deal'
|
mmdnn/conversion/caffe/writer.py | 2yz/MMdnn | 3,442 | 1991 | <reponame>2yz/MMdnn
import base64
from google.protobuf import json_format
from importlib import import_module
import json
import numpy as np
import os
import sys
from mmdnn.conversion.caffe.errors import ConversionError
from mmdnn.conversion.caffe.common_graph import fetch_attr_value
from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name
class JsonFormatter(object):
'''Dumpt a DL graph into a Json file.'''
def __init__(self, graph):
self.graph_def = graph.as_graph_def()
def dump(self, json_path):
json_txt = json_format.MessageToJson(self.graph_def)
parsed = json.loads(json_txt)
formatted = json.dumps(parsed, indent=4, sort_keys=True)
with open(json_path, 'w') as f:
f.write(formatted)
class PyWriter(object):
'''Dumpt a DL graph into a Python script.'''
def __init__(self, graph, data, target):
self.graph = graph
self.data = data
self.tab = ' ' * 4
self.prefix = ''
target = target.lower()
if target == 'tensorflow':
self.target = target
self.net = 'TensorFlowNetwork'
elif target == 'keras':
self.target = target
self.net = 'KerasNetwork'
elif target == 'caffe':
self.target = target
self.net = 'CaffeNetwork'
else:
raise ConversionError('Target %s is not supported yet.' % target)
def indent(self):
self.prefix += self.tab
def outdent(self):
self.prefix = self.prefix[:-len(self.tab)]
def statement(self, s):
return self.prefix + s + '\n'
def emit_imports(self):
return self.statement('from dlconv.%s import %s\n' % (self.target, self.net))
def emit_class_def(self, name):
return self.statement('class %s(%s):' % (name, self.net))
def emit_setup_def(self):
return self.statement('def setup(self):')
def emit_node(self, node):
'''Emits the Python source for this node.'''
def pair(key, value):
return '%s=%s' % (key, value)
args = []
for input in node.input:
input = input.strip().split(':')
name = ''.join(input[:-1])
idx = int(input[-1])
assert name in self.graph.node_dict
parent = self.graph.get_node(name)
args.append(parent.output[idx])
#FIXME:
output = [node.output[0]]
# output = node.output
for k, v in node.attr:
if k == 'cell_type':
args.append(pair(k, "'" + fetch_attr_value(v) + "'"))
else:
args.append(pair(k, fetch_attr_value(v)))
args.append(pair('name', "'" + node.name + "'")) # Set the node name
args = ', '.join(args)
return self.statement('%s = self.%s(%s)' % (', '.join(output), node.op, args))
def dump(self, code_output_dir):
if not os.path.exists(code_output_dir):
os.makedirs(code_output_dir)
file_name = get_lower_case(self.graph.name)
code_output_path = os.path.join(code_output_dir, file_name + '.py')
data_output_path = os.path.join(code_output_dir, file_name + '.npy')
with open(code_output_path, 'w') as f:
f.write(self.emit())
with open(data_output_path, 'wb') as f:
np.save(f, self.data)
return code_output_path, data_output_path
def emit(self):
# Decompose DAG into chains
chains = []
for node in self.graph.topologically_sorted():
attach_to_chain = None
if len(node.input) == 1:
parent = get_real_name(node.input[0])
for chain in chains:
if chain[-1].name == parent: # Node is part of an existing chain.
attach_to_chain = chain
break
if attach_to_chain is None: # Start a new chain for this node.
attach_to_chain = []
chains.append(attach_to_chain)
attach_to_chain.append(node)
# Generate Python code line by line
source = self.emit_imports()
source += self.emit_class_def(self.graph.name)
self.indent()
source += self.emit_setup_def()
self.indent()
blocks = []
for chain in chains:
b = ''
for node in chain:
b += self.emit_node(node)
blocks.append(b[:-1])
source += '\n\n'.join(blocks)
return source
class ModelSaver(object):
def __init__(self, code_output_path, data_output_path):
self.code_output_path = code_output_path
self.data_output_path = data_output_path
def dump(self, model_output_dir):
'''Return the file path containing graph in generated model files.'''
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
sys.path.append(os.path.dirname(self.code_output_path))
file_name = os.path.splitext(os.path.basename(self.code_output_path))[0]
module = import_module(file_name)
class_name = get_upper_case(file_name)
net = getattr(module, class_name)
return net.dump(self.data_output_path, model_output_dir)
class GraphDrawer(object):
def __init__(self, toolkit, meta_path):
self.toolkit = toolkit.lower()
self.meta_path = meta_path
def dump(self, graph_path):
if self.toolkit == 'tensorflow':
from dlconv.tensorflow.visualizer import TensorFlowVisualizer
if self._is_web_page(graph_path):
TensorFlowVisualizer(self.meta_path).dump_html(graph_path)
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
elif self.toolkit == 'keras':
from dlconv.keras.visualizer import KerasVisualizer
png_path, html_path = (None, None)
if graph_path.endswith('.png'):
png_path = graph_path
elif self._is_web_page(graph_path):
png_path = graph_path + ".png"
html_path = graph_path
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
KerasVisualizer(self.meta_path).dump_png(png_path)
if html_path:
self._png_to_html(png_path, html_path)
os.remove(png_path)
else:
raise NotImplementedError('Visualization of %s is unsupported!' % self.toolkit)
def _is_web_page(self, path):
return path.split('.')[-1] in ('html', 'htm')
def _png_to_html(self, png_path, html_path):
with open(png_path, "rb") as f:
encoded = base64.b64encode(f.read()).decode('utf-8')
source = """<!DOCTYPE>
<html>
<head>
<meta charset="utf-8">
<title>Keras</title>
</head>
<body>
<img alt="Model Graph" src="data:image/png;base64,{base64_str}" />
</body>
</html>""".format(base64_str=encoded)
with open(html_path, 'w', encoding='utf-8') as f:
f.write(source) |
examples/server/models/image_file_upload.py | ParikhKadam/django-angular | 941 | 1996 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# start tutorial
from django.db import models
from djng.forms import NgModelFormMixin, NgFormValidationMixin
from djng.styling.bootstrap3.forms import Bootstrap3ModelForm
class SubscribeUser(models.Model):
full_name = models.CharField(
"<NAME>",
max_length=99)
avatar = models.ImageField("Avatar", blank=False, null=True)
permit = models.FileField("Permit", blank=True, null=True)
class SubscribeForm(NgModelFormMixin, NgFormValidationMixin, Bootstrap3ModelForm):
use_required_attribute = False
scope_prefix = 'subscribe_data'
form_name = 'my_form'
class Meta:
model = SubscribeUser
fields = ['full_name', 'avatar', 'permit']
|
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py | yangulei/tvm | 4,640 | 1997 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Compute and schedule for add, multiply, subtract slice op
Please note the following assumptions made by the implementation:
1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting."""
from tvm import te
from tvm import tir
from tvm import topi
from ..utils import get_layout_transform_fn
def add_broadcast_compute(input_a, input_b):
"""Call the add op from topi"""
return topi.add(input_a, input_b)
def subtract_broadcast_compute(input_a, input_b):
"""Call the subtract op from topi"""
return topi.subtract(input_a, input_b)
def multiply_broadcast_compute(input_a, input_b):
"""Call the multiply op from topi"""
return topi.multiply(input_a, input_b)
def tir_broadcast_schedule(
out_m,
input_a,
input_b,
output_layout: str,
input_a_layout: str,
input_b_layout: str,
op_name: str,
):
"""Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast"""
func = te.create_prim_func([input_a, input_b, out_m])
s = tir.Schedule(func)
block_dict = {"add": "T_add", "subtract": "T_subtract", "multiply": "T_multiply"}
block = s.get_block(block_dict[op_name])
if input_a_layout == "nhwc-8h2w32c2w-2d":
input_a_transformed_layout = get_layout_transform_fn(input_a_layout)
s.transform_layout(block, buffer=("read", 0), index_map=input_a_transformed_layout)
if input_b_layout == "nhwc-8h2w32c2w-2d":
input_b_transformed_layout = get_layout_transform_fn(input_b_layout)
s.transform_layout(block, buffer=("read", 1), index_map=input_b_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout)
n, h, w, c = s.get_loops(block)
h_o, h_i = s.split(h, [None, 8])
w_o, w_i = s.split(w, [None, 4])
c_o, c_i = s.split(c, [None, 32])
wio, wii = s.split(w_i, [None, 2])
s.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii)
fused = s.fuse(c_i, wii)
s.vectorize(fused)
return s
|
chrome/test/telemetry/chromeos/login_unittest.py | Fusion-Rom/android_external_chromium_org | 231 | 1999 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import unittest
from telemetry.core import browser_finder
from telemetry.core import exceptions
from telemetry.core import extension_to_load
from telemetry.core import util
from telemetry.core.backends.chrome import cros_interface
from telemetry.unittest import options_for_unittests
class CrOSAutoTest(unittest.TestCase):
def setUp(self):
options = options_for_unittests.GetCopy()
self._cri = cros_interface.CrOSInterface(options.cros_remote,
options.cros_ssh_identity)
self._is_guest = options.browser_type == 'cros-chrome-guest'
self._username = '' if self._is_guest else options.browser_options.username
self._password = options.browser_options.password
def _IsCryptohomeMounted(self):
"""Returns True if cryptohome is mounted"""
cryptohomeJSON, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome',
'--action=status'])
cryptohomeStatus = json.loads(cryptohomeJSON)
return (cryptohomeStatus['mounts'] and
cryptohomeStatus['mounts'][0]['mounted'])
def _CreateBrowser(self, autotest_ext=False, auto_login=True):
"""Finds and creates a browser for tests. if autotest_ext is True,
also loads the autotest extension"""
options = options_for_unittests.GetCopy()
if autotest_ext:
extension_path = os.path.join(os.path.dirname(__file__), 'autotest_ext')
self._load_extension = extension_to_load.ExtensionToLoad(
path=extension_path,
browser_type=options.browser_type,
is_component=True)
options.extensions_to_load = [self._load_extension]
browser_to_create = browser_finder.FindBrowser(options)
self.assertTrue(browser_to_create)
options.browser_options.create_browser_with_oobe = True
options.browser_options.auto_login = auto_login
b = browser_to_create.Create()
b.Start()
return b
def _GetAutotestExtension(self, browser):
"""Returns the autotest extension instance"""
extension = browser.extensions[self._load_extension]
self.assertTrue(extension)
return extension
def _GetLoginStatus(self, browser):
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof('chrome.autotestPrivate') != 'undefined'"))
extension.ExecuteJavaScript('''
window.__login_status = null;
chrome.autotestPrivate.loginStatus(function(s) {
window.__login_status = s;
});
''')
return util.WaitFor(
lambda: extension.EvaluateJavaScript('window.__login_status'), 10)
def testCryptohomeMounted(self):
"""Verifies cryptohome mount status for regular and guest user and when
logged out"""
with self._CreateBrowser() as b:
self.assertEquals(1, len(b.tabs))
self.assertTrue(b.tabs[0].url)
self.assertTrue(self._IsCryptohomeMounted())
chronos_fs = self._cri.FilesystemMountedAt('/home/chronos/user')
self.assertTrue(chronos_fs)
if self._is_guest:
self.assertEquals(chronos_fs, 'guestfs')
else:
home, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome-path',
'user', self._username])
self.assertEquals(self._cri.FilesystemMountedAt(home.rstrip()),
chronos_fs)
self.assertFalse(self._IsCryptohomeMounted())
self.assertEquals(self._cri.FilesystemMountedAt('/home/chronos/user'),
'/dev/mapper/encstateful')
def testLoginStatus(self):
"""Tests autotestPrivate.loginStatus"""
with self._CreateBrowser(autotest_ext=True) as b:
login_status = self._GetLoginStatus(b)
self.assertEquals(type(login_status), dict)
self.assertEquals(not self._is_guest, login_status['isRegularUser'])
self.assertEquals(self._is_guest, login_status['isGuest'])
self.assertEquals(login_status['email'], self._username)
self.assertFalse(login_status['isScreenLocked'])
def _IsScreenLocked(self, browser):
return self._GetLoginStatus(browser)['isScreenLocked']
def _LockScreen(self, browser):
self.assertFalse(self._IsScreenLocked(browser))
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof chrome.autotestPrivate.lockScreen == 'function'"))
logging.info('Locking screen')
extension.ExecuteJavaScript('chrome.autotestPrivate.lockScreen();')
logging.info('Waiting for the lock screen')
def ScreenLocked():
return (browser.oobe and
browser.oobe.EvaluateJavaScript("typeof Oobe == 'function'") and
browser.oobe.EvaluateJavaScript(
"typeof Oobe.authenticateForTesting == 'function'"))
util.WaitFor(ScreenLocked, 10)
self.assertTrue(self._IsScreenLocked(browser))
def _AttemptUnlockBadPassword(self, browser):
logging.info('Trying a bad password')
def ErrorBubbleVisible():
return not browser.oobe.EvaluateJavaScript('''
document.getElementById('bubble').hidden
''')
self.assertFalse(ErrorBubbleVisible())
browser.oobe.ExecuteJavaScript('''
Oobe.authenticateForTesting('%s', 'bad');
''' % self._username)
util.WaitFor(ErrorBubbleVisible, 10)
self.assertTrue(self._IsScreenLocked(browser))
def _UnlockScreen(self, browser):
logging.info('Unlocking')
browser.oobe.ExecuteJavaScript('''
Oobe.authenticateForTesting('%s', '%s');
''' % (self._username, self._password))
util.WaitFor(lambda: not browser.oobe, 10)
self.assertFalse(self._IsScreenLocked(browser))
def testScreenLock(self):
"""Tests autotestPrivate.screenLock"""
with self._CreateBrowser(autotest_ext=True) as browser:
self._LockScreen(browser)
self._AttemptUnlockBadPassword(browser)
self._UnlockScreen(browser)
def testLogout(self):
"""Tests autotestPrivate.logout"""
with self._CreateBrowser(autotest_ext=True) as b:
extension = self._GetAutotestExtension(b)
try:
extension.ExecuteJavaScript('chrome.autotestPrivate.logout();')
except (exceptions.BrowserConnectionGoneException,
exceptions.BrowserGoneException):
pass
util.WaitFor(lambda: not self._IsCryptohomeMounted(), 20)
def _SwitchRegion(self, region):
self._cri.RunCmdOnDevice(['stop', 'ui'])
# Change VPD (requires RW-enabled firmware).
# To save time, region and initial_timezone are not set.
vpd = {'initial_locale': region.language_code,
'keyboard_layout': region.keyboard}
for (key, value) in vpd.items():
self._cri.RunCmdOnDevice(['vpd', '-s', '"%s"="%s"' % (key, value)])
# Remove cached files to clear initial locale info and force regeneration.
self._cri.RunCmdOnDevice(['rm', '/home/chronos/Local\ State'])
self._cri.RunCmdOnDevice(['rm', '/home/chronos/.oobe_completed'])
self._cri.RunCmdOnDevice(['dump_vpd_log', '--force'])
self._cri.RunCmdOnDevice(['start', 'ui'])
def _OobeHasOption(self, browser, selectId, value):
hasOptionJs = '''
// Check that the option is present, and selected if it is the default.
(function hasOption(selectId, value, isDefault) {
var options = document.getElementById(selectId).options;
for (var i = 0; i < options.length; i++) {
if (options[i].value == value) {
// The option is present. Make sure it's selected if necessary.
return !isDefault || options.selectedIndex == i;
}
}
return false;
})("%s", "%s", %s);
'''
return browser.oobe.EvaluateJavaScript(
hasOptionJs % (selectId, value, 'true'))
def _ResolveLanguage(self, locale):
# If the locale matches a language but not the country, fall back to
# an existing locale. See ui/base/l10n/l10n_util.cc.
lang, _, region = map(str.lower, locale.partition('-'))
if not region:
return ""
# Map from other countries to a localized country
if lang == 'es' and region == 'es':
return 'es-419'
if lang == 'zh':
if region in ('hk', 'mo'):
return 'zh-TW'
return 'zh-CN'
if lang == 'en':
if region in ('au', 'ca', 'nz', 'za'):
return 'en-GB'
return 'en-US'
# No mapping found
return ""
def testOobeLocalization(self):
"""Tests different region configurations at OOBE"""
# Save the original device localization settings.
# To save time, only read initial_locale and keyboard_layout.
initial_region = self.Region('', '', '', '', '')
initial_region.language_code, _ = self._cri.RunCmdOnDevice(
['vpd', '-g', 'initial_locale'])
initial_region.keyboard, _ = self._cri.RunCmdOnDevice(
['vpd', '-g', 'keyboard_layout'])
for region in self.REGIONS_LIST:
self._SwitchRegion(region)
with self._CreateBrowser(auto_login=False) as browser:
# Ensure the dropdown lists have been created.
util.WaitFor(lambda: browser.oobe.EvaluateJavaScript(
'document.getElementById("language-select") != null'),
10)
# Find the language, or an acceptable fallback value.
languageFound = self._OobeHasOption(browser,
'language-select',
region.language_code)
if not languageFound:
fallback = self._ResolveLanguage(region.language_code)
self.assertTrue(fallback and
self._OobeHasOption(browser,
'language-select',
fallback))
# Find the keyboard layout.
self.assertTrue(self._OobeHasOption(
browser, 'keyboard-select', region.keyboard))
# Test is finished. Restore original region settings.
self._SwitchRegion(initial_region)
# The Region class and region list will be available in regions.py.
class Region(object):
def __init__(self, region_code, keyboard, time_zone, language_code,
keyboard_mechanical_layout, description=None, notes=None):
self.region_code = region_code
self.keyboard = keyboard
self.time_zone = time_zone
self.language_code = language_code
self.keyboard_mechanical_layout = keyboard_mechanical_layout
self.description = description or region_code
self.notes = notes
class Enum(frozenset):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
KeyboardMechanicalLayout = Enum(['ANSI', 'ISO', 'JIS', 'ABNT2'])
_KML = KeyboardMechanicalLayout
REGIONS_LIST = [
Region('au', 'xkb:us::eng', 'Australia/Sydney', 'en-AU', _KML.ANSI,
'Australia'),
Region('ca.ansi', 'xkb:us::eng', 'America/Toronto', 'en-CA', _KML.ANSI,
'Canada (US keyboard)',
'Canada with US (ANSI) keyboard; see http://goto/cros-canada'),
Region('ca.fr', 'xkb:ca::fra', 'America/Toronto', 'fr-CA', _KML.ISO,
'Canada (French keyboard)',
('Canadian French (ISO) keyboard. The most common configuration for '
'Canadian French SKUs. See http://goto/cros-canada')),
Region('ca.hybrid', 'xkb:ca:eng:eng', 'America/Toronto', 'en-CA', _KML.ISO,
'Canada (hybrid)',
('Canada with hybrid xkb:ca:eng:eng + xkb:ca::fra keyboard (ISO), '
'defaulting to English language and keyboard. Used only if there '
'needs to be a single SKU for all of Canada. See '
'http://goto/cros-canada')),
Region('ca.multix', 'xkb:ca:multix:fra', 'America/Toronto', 'fr-CA',
_KML.ISO, 'Canada (multilingual)',
("Canadian Multilingual keyboard; you probably don't want this. See "
"http://goto/cros-canada")),
Region('de', 'xkb:de::ger', 'Europe/Berlin', 'de', _KML.ISO, 'Germany'),
Region('fi', 'xkb:fi::fin', 'Europe/Helsinki', 'fi', _KML.ISO, 'Finland'),
Region('fr', 'xkb:fr::fra', 'Europe/Paris', 'fr', _KML.ISO, 'France'),
Region('gb', 'xkb:gb:extd:eng', 'Europe/London', 'en-GB', _KML.ISO, 'UK'),
Region('ie', 'xkb:gb:extd:eng', 'Europe/Dublin', 'en-GB', _KML.ISO,
'Ireland'),
Region('in', 'xkb:us::eng', 'Asia/Calcutta', 'en-US', _KML.ANSI, 'India'),
Region('my', 'xkb:us::eng', 'Asia/Kuala_Lumpur', 'ms', _KML.ANSI,
'Malaysia'),
Region('nl', 'xkb:us:intl:eng', 'Europe/Amsterdam', 'nl', _KML.ANSI,
'Netherlands'),
Region('nordic', 'xkb:se::swe', 'Europe/Stockholm', 'en-US', _KML.ISO,
'Nordics',
('Unified SKU for Sweden, Norway, and Denmark. This defaults '
'to Swedish keyboard layout, but starts with US English language '
'for neutrality. Use if there is a single combined SKU for Nordic '
'countries.')),
Region('se', 'xkb:se::swe', 'Europe/Stockholm', 'sv', _KML.ISO, 'Sweden',
("Use this if there separate SKUs for Nordic countries (Sweden, "
"Norway, and Denmark), or the device is only shipping to Sweden. "
"If there is a single unified SKU, use 'nordic' instead.")),
Region('sg', 'xkb:us::eng', 'Asia/Singapore', 'en-GB', _KML.ANSI,
'Singapore'),
Region('us', 'xkb:us::eng', 'America/Los_Angeles', 'en-US', _KML.ANSI,
'United States'),
]
|
src/aprl/agents/monte_carlo.py | fkamrani/adversarial-policies | 211 | 2002 | <filename>src/aprl/agents/monte_carlo.py
"""Monte Carlo receding horizon control."""
from abc import ABC, abstractmethod
from multiprocessing import Pipe, Process
import gym
from stable_baselines.common.vec_env import CloudpickleWrapper
from aprl.common.mujoco import MujocoState, ResettableEnv
class MujocoResettableWrapper(ResettableEnv, gym.Wrapper):
"""Converts a MujocoEnv into a ResettableEnv.
Note all MuJoCo environments are resettable."""
def __init__(self, env):
"""Wraps a MujocoEnv, adding get_state and set_state methods.
:param env: a MujocoEnv. NOTE: it must not be wrapped in a TimeLimit."""
if hasattr(env, "_max_episode_steps"):
raise TypeError(
"Environment must not have a time limit " "(try passing in env.unwrapped instead)."
)
gym.Wrapper.__init__(self, env)
self.sim = env.unwrapped.sim
def get_state(self):
"""Serializes the qpos and qvel state of the MuJoCo emulator."""
return MujocoState.from_mjdata(self.sim.data).flatten()
def set_state(self, x):
"""Restores qpos and qvel, calling forward() to derive other values."""
state = MujocoState.from_flattened(x, self.sim)
state.set_mjdata(self.sim.data)
self.sim.forward() # put mjData in consistent state
def reset(self):
"""See base class."""
return self.env.reset()
def step(self, a):
"""See base class."""
return self.env.step(a)
class MonteCarlo(ABC):
"""Selects an action for a ResettableEnv by random search. Randomly samples
fixed-length sequences of actions. Evaluates each trajectory in the
environment, resetting the state to the original after each trajectory."""
@abstractmethod
def __init__(self, horizon, trajectories):
"""Constructs a MonteCarlo instance for env.
:param horizon: the length of the trajectories to search over.
:param trajectories: the number of trajectories to evaluate."""
self.horizon = horizon
self.trajectories = trajectories
@abstractmethod
def seed(self, seed):
"""Sets a seed for the PRNG for the action sequences.
:param seed (int): a seed."""
pass
@abstractmethod
def best_action(self, state):
"""Returns the best action out of a random search of action sequences.
Generates self.trajectories action sequences, each of length
self.horizon. The cumulative reward of each action sequence is computed,
starting from state. The function returns the first action and the
cumulative reward of the action sequences with the largest cumulative
reward.
:param state: a value returned by env.get_state().
:return (action, reward): the best action found and associated reward."""
pass
class MonteCarloSingle(MonteCarlo):
"""Selects an action for a ResettableEnv by random search.
See base class for details. This implementation is not parallelized."""
def __init__(self, env, horizon, trajectories):
"""See base class."""
super().__init__(horizon, trajectories)
self.env = env
def seed(self, seed):
"""Sets a seed for the PRNG for the action sequences.
:param seed (int): a seed."""
self.env.action_space.np_random.seed(seed)
def best_action(self, state):
"""Returns the best action out of a random search of action sequences.
See base class for details.
Search takes place in a single environment, which is reset to state
before evaluating each action sequence."""
res = []
for _ in range(self.trajectories):
self.env.set_state(state)
us = [self.env.action_space.sample() for _ in range(self.horizon)]
total_rew = 0
for u in us:
_ob, rew, done, _info = self.env.step(u)
total_rew += rew
if done:
break
res.append((us[0], total_rew))
self.env.set_state(state)
best = max(res, key=lambda x: x[1])
return best
def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories):
parent_remote.close()
dynamics = dynamic_fn_wrapper.var()
dynamics.reset()
mc = MonteCarloSingle(dynamics, horizon, trajectories)
try:
while True:
cmd, x = remote.recv()
if cmd == "seed":
mc.seed(x)
elif cmd == "search":
best_u, best_r = mc.best_action(x)
remote.send((best_u, best_r))
elif cmd == "close":
remote.close()
break
else:
raise NotImplementedError
except KeyboardInterrupt:
print("MonteCarloParallel worker: got KeyboardInterrupt")
finally:
dynamics.close()
class MonteCarloParallel(MonteCarlo):
"""Like MonteCarlo, but performs the random search in parallel."""
# This implementation is inspired by Baselines SubprocVecEnv.
def __init__(self, env_fns, horizon, trajectories, seed=0):
"""Launch subprocess workers and store configuration parameters.
:param env_fns (list<()->ResettableEnv>): list of thunks.
:param horizon (int): length of trajectories to search over.
:param trajectories (int): minimum number of trajectories to evaluate.
It will be rounded up to the nearest multiple of len(make_env)."""
super().__init__(horizon, trajectories)
nremotes = len(env_fns)
# Integer ceiling of self.trajectories / nworkers
traj_per_worker = (self.trajectories - 1) // nremotes + 1
pipes = [Pipe() for _ in range(nremotes)]
self.remotes, self.work_remotes = zip(*pipes)
worker_cfgs = zip(self.work_remotes, self.remotes, env_fns)
self.ps = []
for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs):
args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker)
process = Process(target=_worker, args=args)
process.daemon = True
# If the main process crashes, we should not cause things to hang
process.start()
self.ps.append(process)
for remote in self.work_remotes:
remote.close()
def seed(self, seed):
"""See base class."""
for i, remote in enumerate(self.remotes):
remote.send(("seed", seed + i))
def best_action(self, state):
"""Returns the best action out of a random search of action sequences."""
for remote in self.remotes:
remote.send(("search", state))
results = [remote.recv() for remote in self.remotes]
best = max(results, key=lambda x: x[1])
return best
def close(self):
"""Shuts down parallel workers."""
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
def receding_horizon(monte_carlo, env):
"""Receding horizon control
:param monte_carlo(MonteCarlo): a Monte Carlo controller for env or a clone of env.
:param env(ResettableEnv): a resettable environment."""
while True:
state = env.get_state()
a, _seq_rew = monte_carlo.best_action(state)
ob, rew, done, info = env.step(a)
yield a, ob, rew, done, info
if done:
break
|
book/trees/binary_search_tree.py | Web-Dev-Collaborative/algos | 153 | 2020 | # -*- coding: utf-8 -*-
"""
The `TreeNode` class provides many helper functions that make the work
done in the `BinarySearchTree` class methods much easier. The
constructor for a `TreeNode`, along with these helper functions, is
shown below. As you can see, many of these helper functions help to
classify a node according to its own position as a child, (left or
right) and the kind of children the node has. The `TreeNode` class will
also explicitly keep track of the parent as an attribute of each node.
You will see why this is important when we discuss the implementation
for the `del` operator.
One of the more interesting methods of `TreeNode` provides an interface
to simply iterate over all the keys in the tree in order. You already
know how to traverse a binary tree in order, using the `inorder`
traversal algorithm. However, because we want our iterator to operate
lazily, in this case we use the `yield` keyword to define our `__iter__`
method as a Python generator. Pay close attention to the `__iter__`
implementation as at first glance you might think that the code is
not recursive: in fact, because `__iter__` overrides the `for x
in` operation for iteration, it really is recursive!
Our full implementation of `TreeNode` is provided below. It includes
three further methods `find_successor`, `find_min` and `splice_out`
which you can ignore for now as we will return to them later when
discussing deletion.
"""
class TreeNode(object):
def __init__(self, key, val, left=None, right=None, parent=None):
self.key = key
self.val = val
self.left = left
self.right = right
self.parent = parent
def is_left_child(self):
return self.parent and self.parent.left == self
def is_right_child(self):
return self.parent and self.parent.right == self
def is_leaf(self):
return not (self.right or self.left)
def has_any_children(self):
return self.right or self.left
def has_both_children(self):
return self.right and self.left
def has_one_child(self):
return self.has_any_children() and not self.has_both_children()
def replace_node_data(self, key, val, left, right):
self.key = key
self.val = val
self.left = left
self.right = right
if self.left:
self.left.parent = self
if self.right:
self.right.parent = self
def __iter__(self):
if self is None:
return
if self.left:
# `in` calls `__iter__` so is recursive
for elem in self.left:
yield elem
yield self.key
if self.right:
# recurse again
for elem in self.right:
yield elem
def find_successor(self):
if self.right:
return self.right.find_min()
if self.parent is None:
return None
if self.is_left_child():
return self.parent
self.parent.right = None
successor = self.parent.find_successor()
self.parent.right = self
return successor
def find_min(self):
current = self
while current.left:
current = current.left
return current
def splice_out(self):
if self.is_leaf():
if self.is_left_child():
self.parent.left = None
else:
self.parent.right = None
else:
promoted_node = self.left or self.right
if self.is_left_child():
self.parent.left = promoted_node
else:
self.parent.right = promoted_node
promoted_node.parent = self.parent
"""
Now that we have our `TreeNode` class we can begin to write
`BinarySearchTree` itself. Recall that the core functionality of this
class will be to enable `put`ing to and `get`ing from the tree, so we
begin our implementation with the `put` functionality.
In order to enable the `tree[1] = 'foo'` style assignment interface for
our `BinarySearchTree` instances, we override the `__setitem__` magic
method. In this method we first check to see if the tree already has a
root. If there is not a root then we create a new `TreeNode` and set it
as the root of the tree. If a root node is already in place then `put`
calls the private, recursive, helper function `_put` to search the tree
according to the following algorithm:
- Starting at the root of the tree, search the binary tree comparing
the new key to the key in the current node. If the new key is less
than the current node, search the left subtree. If the new key is
greater than the current node, search the right subtree.
- When there is no left (or right) child to search, we have found the
position in the tree where the new node should be installed.
- To add a node to the tree, create a new `TreeNode` object and insert
the object at the point discovered in the previous step.
The code below shows the Python code for inserting a new
node in the tree. The `_put` function is written recursively following
the steps outlined above. Notice that when a new child is inserted into
the tree, the `node` is passed to the new tree as the parent.
One important problem with our implementation of insert is that
duplicate keys are not handled properly. As our tree is implemented a
duplicate key will create a new node with the same key value in the
right subtree of the node having the original key. The result of this is
that the node with the new key will never be found during a search. A
better way to handle the insertion of a duplicate key is for the value
associated with the new key to replace the old value. We leave fixing
this bug as an exercise for you.
"""
class BinarySearchTree(object):
TreeNodeClass = TreeNode
def __init__(self):
self.root = None
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def __setitem__(self, key, val):
if self.root:
self._put(key, val, self.root)
else:
self.root = self.TreeNodeClass(key, val)
self.size = self.size + 1
def _put(self, key, val, node):
if key < node.key:
if node.left:
self._put(key, val, node.left)
else:
node.left = self.TreeNodeClass(key, val, parent=node)
else:
if node.right:
self._put(key, val, node.right)
else:
node.right = self.TreeNodeClass(key, val, parent=node)
"""
The diagram below illustrates the process for inserting a new
node into a binary search tree. The lightly shaded nodes indicate the
nodes that were visited during the insertion process.
![Inserting a node with key = 19](figures/binary-search-tree-put.png)
Once the tree is constructed, the next task is to implement the
retrieval of a value for a given key. The `get` functionality is even easier
than the `put` functionality because we simply search the tree recursively
until we get to a non-matching leaf node or find a matching key. When
a matching key is found, the value stored in the val of the node is
returned.
Again, inorder to enable a `tree[1]` retrieval interface, we overload
one of Python’s magic methods—in this case `__getitem__`. Just like with
`__setitem__`, the primary purpose of this method is to handle presence
and absence of a root node, and delegates the core `get` functionality
to `_get`.
The search code in the `_get` method uses the same logic
for choosing the left or right child as the `_put` method. Notice that
the `_get` method returns a `TreeNode` to `__getitem__`, this allows `_get` to
be used as a flexible helper method for other `BinarySearchTree` methods
that may need to make use of other data from the `TreeNode` besides the
val.
"""
def __getitem__(self, key):
if self.root:
result = self._get(key, self.root)
if result:
return result.val
raise KeyError
def _get(self, key, node):
if not node:
return None
if node.key == key:
return node
if key < node.key:
return self._get(key, node.left)
return self._get(key, node.right)
"""
Using `_get`, we can implement the `in` operation by writing a
`__contains__` method for the `BinarySearchTree`. The `__contains__`
method will simply call `_get` and return `True` if `_get` returns a
value, or `False` if it returns `None`. The code for `__contains__` is
shown below.
"""
def __contains__(self, key):
return bool(self._get(key, self.root))
"""
Finally, we turn our attention to the most challenging method in the
binary search tree: the deletion of a key. The first task is
to find the node to delete by searching the tree. If the tree has more
than one node we search using the `_get` method to find the `TreeNode`
that needs to be removed. If the tree only has a single node, that means
we are removing the root of the tree, but we still must check to make
sure the key of the root matches the key that is to be deleted. In
either case if the key is not found the `del` operator raises an error.
"""
def delete(self, key):
if self.size > 1:
node_to_remove = self._get(key, self.root)
if node_to_remove:
self.remove(node_to_remove)
self.size = self.size - 1
return
elif self.size == 1 and self.root.key == key:
self.root = None
self.size = self.size - 1
return
raise KeyError('Error, key not in tree')
def __delitem__(self, key):
self.delete(key)
"""
Once we’ve found the node containing the key we want to delete, there
are three cases that we must consider:
1. The node to be deleted has no children
2. The node to be deleted has only one child
3. The node to be deleted has two children
The first case is straightforward. If
the current node has no children all we need to do is delete the node
and remove the reference to this node in the parent. The code for this
case is shown below.
"""
def remove(self, node):
if node.is_leaf() and node.parent is not None:
if node == node.parent.left:
node.parent.left = None
else:
node.parent.right = None
"""
![Deleting Node 16, a node without
children](figures/binary-search-tree-delete-1.png)
The second case is only slightly more complicated (see below). If a node
has only a single child, then we can simply promote the child to take
the place of its parent. The code for this case is shown in the next
code sample. As you look at this code you will see that there are six
cases to consider. Since the cases are symmetric with respect to either
having a left or right child we will just discuss the case where the
current node has a left child. The decision proceeds as follows:
1. If the current node is a left child then we only need to update the
parent reference of the left child to point to the parent of the
current node, and then update the left child reference of the parent
to point to the current node’s left child.
2. If the current node is a right child then we only need to update the
parent reference of the right child to point to the parent of the
current node, and then update the right child reference of the
parent to point to the current node’s right child.
3. If the current node has no parent, it must be the root. In this case
we will just replace the `key`, `val`, `left`, and
`right` data by calling the `replace_node_data` method on
the root.
Code for this decision process may look like:
"""
elif node.has_one_child():
promoted_node = node.left or node.right
if node.is_left_child():
promoted_node.parent = node.parent
node.parent.left = promoted_node
elif node.is_right_child():
promoted_node.parent = node.parent
node.parent.right = promoted_node
else:
node.replace_node_data(
promoted_node.key,
promoted_node.val,
promoted_node.left,
promoted_node.right
)
"""
![Deleting node 25, a node that has a single
child](figures/binary-search-tree-delete-2.png)
The third case is the most difficult case to handle (see below). If a
node has two children, then it is unlikely that we can simply promote
one of them to take the node’s place. We can, however, search the tree
for a node that can be used to replace the one scheduled for deletion.
What we need is a node that will preserve the binary search tree
relationships for both of the existing left and right subtrees. The node
that will do this is the node that has the next-largest key in the tree.
We call this node the **successor**, and we will look at a way to find
the successor shortly. The successor is guaranteed to have no more than
one child, so we know how to remove it using the two cases for deletion
that we have already implemented. Once the successor has been removed,
we simply put it in the tree in place of the node to be deleted.
![Deleting node 5, a node with two
children](figures/binary-search-tree-delete-3.png)
The code to handle the third case is shown below. Notice
that we make use of the helper methods `find_successor` and `find_min` to
find the successor. To remove the successor, we make use of the method
`splice_out`. The reason we use `splice_out` is that it goes directly to
the node we want to splice out and makes the right changes. We could
call `delete` recursively, but then we would waste time re-searching for
the key node.
"""
else: # has both children
successor = node.find_successor()
if successor:
successor.splice_out()
node.key = successor.key
node.val = successor.val
"""
The code to find the successor is shown above and as you can see is a
method of the `TreeNode` class. This code makes use of the same
properties of binary search trees that cause an inorder traversal to
print out the nodes in the tree from smallest to largest. There are
three cases to consider when looking for the successor:
1. If the node has a right child, then the successor is the smallest
key in the right subtree.
2. If the node has no right child and is the left child of its parent,
then the parent is the successor.
3. If the node is the right child of its parent, and itself has no
right child, then the successor to this node is the successor of its
parent, excluding this node.
The first condition is the only one that matters for us when deleting a
node from a binary search tree.
The `find_min` method is called to find the minimum key in a subtree. You
should convince yourself that the minimum valued key in any binary
search tree is the leftmost child of the tree. Therefore the `find_min`
method simply follows the `left` references in each node of the
subtree until it reaches a node that does not have a left child.
"""
|
util/headers.py | giuseppe/quay | 2,027 | 2023 | <gh_stars>1000+
import base64
def parse_basic_auth(header_value):
"""
Attempts to parse the given header value as a Base64-encoded Basic auth header.
"""
if not header_value:
return None
parts = header_value.split(" ")
if len(parts) != 2 or parts[0].lower() != "basic":
return None
try:
basic_parts = base64.b64decode(parts[1]).split(":", 1)
if len(basic_parts) != 2:
return None
return basic_parts
except ValueError:
return None
|
caffe2/python/operator_test/partition_ops_test.py | KevinKecc/caffe2 | 585 | 2032 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase, rand_array
class TestPartitionOps(TestCase):
def test_configs(self):
# (main dims, partitions, main type, [list of (extra dims, type)])
configs = [
((10, ), 3),
((4, ), 10),
((10, 10), 4),
((100, ), 2),
((5, ), 1),
((1, ), 1),
((2, 10), 2),
]
suffixes = [
[],
[((2, 2), np.float32)],
[((3, ), np.int64), ((2, ), np.float32)],
]
return [
(main_dims, parts, main_type, extra, pack)
for main_dims, parts in configs
for main_type in [np.int32, np.int64] for extra in suffixes
for pack in [False, True]
]
def testPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
ins = ['in' + str(i) for i in range(1 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(1 + len(extra_ins))
]
op = core.CreateOperator(
'Partition', ins, outs, pack_first_input=(1 if pack else 0))
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i], d)
x.append(d)
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
print(x)
print(ref)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
# test inverse operation (GatherByKey)
if len(main_dims) == 1:
# currently only 1D key tensor supported
for i in range(len(extra_ins)):
expected_out = ins[i + 1]
gather_ins = [ins[0]] + [
outs[len(ins) * p + i + 1] for p in range(parts)]
actual_out = expected_out + '_actual'
op = core.CreateOperator(
'GatherByKey', gather_ins, actual_out)
workspace.RunOperatorOnce(op)
expected = workspace.FetchBlob(expected_out)
actual = workspace.FetchBlob(actual_out)
np.testing.assert_array_equal(expected, actual)
def testLengthsPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
# For LengthsSharding only 1-D tensors supported as a first input
if len(main_dims) > 1:
continue
ins = ['in' + str(i) for i in range(2 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(2 + len(extra_ins))
]
op = core.CreateOperator(
'LengthsPartition', ins, outs,
pack_first_input=(1 if pack else 0)
)
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i + 1], d)
x.append(d)
# Randomly generate length tensor as well
elements = np.random.randint(2, 10)
lengths = []
total_length = 0
for _ in range(elements - 1):
lengths.append(np.random.randint(main_dims[0] - total_length))
total_length += lengths[-1]
lengths.append(main_dims[0] - total_length)
workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32))
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
out.append(sharded_lengths)
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
if __name__ == "__main__":
import unittest
unittest.main()
|
jupytext/kernels.py | st--/jupytext | 5,378 | 2038 | """Find kernel specifications for a given language"""
import os
import sys
from .languages import same_language
from .reraise import reraise
try:
# I prefer not to take a dependency on jupyter_client
from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec
except ImportError as err:
find_kernel_specs = reraise(err)
get_kernel_spec = reraise(err)
def set_kernelspec_from_language(notebook):
"""Set the kernel specification based on the 'main_language' metadata"""
language = notebook.metadata.get("jupytext", {}).get("main_language")
if "kernelspec" not in notebook.metadata and language:
try:
kernelspec = kernelspec_from_language(language)
except ValueError:
return
notebook.metadata["kernelspec"] = kernelspec
notebook.metadata.get("jupytext", {}).pop("main_language")
def kernelspec_from_language(language):
"""Return the python kernel that matches the current env, or the first kernel that matches the given language"""
if language == "python":
# Return the kernel that matches the current Python executable
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
cmd = kernel_specs.argv[0]
if (
kernel_specs.language == "python"
and os.path.isfile(cmd)
and os.path.samefile(cmd, sys.executable)
):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError(
"No kernel found that matches the current python executable {}\n".format(
sys.executable
)
+ "Install one with 'python -m ipykernel install --name kernel_name [--user]'"
)
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
if same_language(kernel_specs.language, language):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError("No kernel found for the language {}".format(language))
|
tests/test_base_table.py | stjordanis/datar | 110 | 2057 | <filename>tests/test_base_table.py<gh_stars>100-1000
import pytest
from datar import stats
from datar.base import *
from datar import f
from datar.datasets import warpbreaks, state_division, state_region, airquality
from .conftest import assert_iterable_equal
def test_table():
# https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table
z = stats.rpois(100, 5)
x = table(z)
assert sum(x.values.flatten()) == 100
#-----------------
with data_context(warpbreaks) as _:
tab = table(f.wool, f.tension)
assert tab.columns.tolist() == ['H', 'L', 'M']
assert tab.index.tolist() == ['A', 'B']
assert_iterable_equal(tab.values.flatten(), [9] * 6)
tab = table(warpbreaks.loc[:, ['wool', 'tension']])
assert tab.columns.tolist() == ['H', 'L', 'M']
assert tab.index.tolist() == ['A', 'B']
assert_iterable_equal(tab.values.flatten(), [9] * 6)
#-----------------
tab = table(state_division, state_region)
assert tab.loc['New England', 'Northeast'] == 6
#-----------------
with data_context(airquality) as _:
qt = stats.quantile(f.Temp)
ct = cut(f.Temp, qt)
tab = table(ct, f.Month)
assert tab.iloc[0,0] == 24
#-----------------
a = letters[:3]
tab = table(a, sample(a))
assert sum(tab.values.flatten()) == 3
#-----------------
tab = table(a, sample(a), dnn=['x', 'y'])
assert tab.index.name == 'x'
assert tab.columns.name == 'y'
#-----------------
a = c(NA, Inf, (1.0/(i+1) for i in range(3)))
a = a * 10
# tab = table(a)
# assert_iterable_equal(tab.values.flatten(), [10] * 4)
tab = table(a, exclude=None)
assert_iterable_equal(tab.values.flatten(), [10] * 5)
#------------------
b = as_factor(rep(c("A","B","C"), 10))
tab = table(b)
assert tab.shape == (1, 3)
assert_iterable_equal(tab.values.flatten(), [10] * 3)
tab = table(b, exclude="B")
assert tab.shape == (1, 2)
assert_iterable_equal(tab.values.flatten(), [10] * 2)
assert 'B' not in tab.columns
#-------------------
d = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E"))
tab = table(d, exclude="B", dnn=['x'])
assert_iterable_equal(tab.columns.to_list(), ["A", "C", "D", "E"])
assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0])
d2 = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E"))
tab = table(d, d2, exclude="B")
assert tab.shape == (4, 4)
tab = table("abc", "cba", dnn='x')
assert tab.shape == (3,3)
assert sum(tab.values.flatten()) == 3
with data_context(airquality) as _:
tab = table(f.Ozone, f.Solar_R, exclude=None)
assert '<NA>' in tab.columns
assert '<NA>' in tab.index
def test_table_error():
from datar.datasets import iris, warpbreaks
with pytest.raises(ValueError):
table(iris)
with pytest.raises(ValueError):
table(warpbreaks, iris)
with pytest.raises(ValueError):
table(warpbreaks.wool, iris)
with pytest.raises(ValueError):
table(iris.iloc[:, []])
with pytest.raises(ValueError):
table(iris.iloc[:, [1,2]], iris)
with pytest.raises(ValueError):
table(iris.iloc[:, [1]], iris, iris)
with pytest.raises(ValueError):
table(iris.iloc[:, [1]], iris.iloc[:, []])
|
src/sol/handle_metaplex.py | terra-dashboard/staketaxcsv | 140 | 2063 | from common.make_tx import make_swap_tx
from sol.handle_simple import handle_unknown_detect_transfers
def handle_metaplex(exporter, txinfo):
transfers_in, transfers_out, _ = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
else:
handle_unknown_detect_transfers(exporter, txinfo)
def is_nft_mint(txinfo):
log_instructions = txinfo.log_instructions
transfers_in, transfers_out, _ = txinfo.transfers_net
if "MintTo" in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0:
return True
elif ("MintTo" in log_instructions
and len(transfers_out) == 1
and len(transfers_in) == 1
and transfers_in[0][0] == 1):
return True
else:
return False
def handle_nft_mint(exporter, txinfo):
transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
return
handle_unknown_detect_transfers(exporter, txinfo)
|
tianshou/utils/logger/tensorboard.py | Aceticia/tianshou | 4,714 | 2082 | <reponame>Aceticia/tianshou<filename>tianshou/utils/logger/tensorboard.py
import warnings
from typing import Any, Callable, Optional, Tuple
from tensorboard.backend.event_processing import event_accumulator
from torch.utils.tensorboard import SummaryWriter
from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger
class TensorboardLogger(BaseLogger):
"""A logger that relies on tensorboard SummaryWriter by default to visualize \
and log statistics.
:param SummaryWriter writer: the writer to log data.
:param int train_interval: the log interval in log_train_data(). Default to 1000.
:param int test_interval: the log interval in log_test_data(). Default to 1.
:param int update_interval: the log interval in log_update_data(). Default to 1000.
:param int save_interval: the save interval in save_data(). Default to 1 (save at
the end of each epoch).
"""
def __init__(
self,
writer: SummaryWriter,
train_interval: int = 1000,
test_interval: int = 1,
update_interval: int = 1000,
save_interval: int = 1,
) -> None:
super().__init__(train_interval, test_interval, update_interval)
self.save_interval = save_interval
self.last_save_step = -1
self.writer = writer
def write(self, step_type: str, step: int, data: LOG_DATA_TYPE) -> None:
for k, v in data.items():
self.writer.add_scalar(k, v, global_step=step)
def save_data(
self,
epoch: int,
env_step: int,
gradient_step: int,
save_checkpoint_fn: Optional[Callable[[int, int, int], None]] = None,
) -> None:
if save_checkpoint_fn and epoch - self.last_save_step >= self.save_interval:
self.last_save_step = epoch
save_checkpoint_fn(epoch, env_step, gradient_step)
self.write("save/epoch", epoch, {"save/epoch": epoch})
self.write("save/env_step", env_step, {"save/env_step": env_step})
self.write(
"save/gradient_step", gradient_step,
{"save/gradient_step": gradient_step}
)
def restore_data(self) -> Tuple[int, int, int]:
ea = event_accumulator.EventAccumulator(self.writer.log_dir)
ea.Reload()
try: # epoch / gradient_step
epoch = ea.scalars.Items("save/epoch")[-1].step
self.last_save_step = self.last_log_test_step = epoch
gradient_step = ea.scalars.Items("save/gradient_step")[-1].step
self.last_log_update_step = gradient_step
except KeyError:
epoch, gradient_step = 0, 0
try: # offline trainer doesn't have env_step
env_step = ea.scalars.Items("save/env_step")[-1].step
self.last_log_train_step = env_step
except KeyError:
env_step = 0
return epoch, env_step, gradient_step
class BasicLogger(TensorboardLogger):
"""BasicLogger has changed its name to TensorboardLogger in #427.
This class is for compatibility.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn(
"Deprecated soon: BasicLogger has renamed to TensorboardLogger in #427."
)
super().__init__(*args, **kwargs)
|
tempest/tests/lib/services/compute/test_security_group_default_rules_client.py | mail2nsrajesh/tempest | 254 | 2092 | <reponame>mail2nsrajesh/tempest<filename>tempest/tests/lib/services/compute/test_security_group_default_rules_client.py
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.compute import security_group_default_rules_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest):
FAKE_RULE = {
"from_port": 80,
"id": 1,
"ip_protocol": "TCP",
"ip_range": {
"cidr": "10.10.10.0/24"
},
"to_port": 80
}
def setUp(self):
super(TestSecurityGroupDefaultRulesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = (security_group_default_rules_client.
SecurityGroupDefaultRulesClient(fake_auth, 'compute',
'regionOne'))
def _test_list_security_group_default_rules(self, bytes_body=False):
self.check_service_client_function(
self.client.list_security_group_default_rules,
'tempest.lib.common.rest_client.RestClient.get',
{"security_group_default_rules": [self.FAKE_RULE]},
to_utf=bytes_body)
def test_list_security_group_default_rules_with_str_body(self):
self._test_list_security_group_default_rules()
def test_list_security_group_default_rules_with_bytes_body(self):
self._test_list_security_group_default_rules(bytes_body=True)
def _test_show_security_group_default_rule(self, bytes_body=False):
self.check_service_client_function(
self.client.show_security_group_default_rule,
'tempest.lib.common.rest_client.RestClient.get',
{"security_group_default_rule": self.FAKE_RULE},
to_utf=bytes_body,
security_group_default_rule_id=1)
def test_show_security_group_default_rule_with_str_body(self):
self._test_show_security_group_default_rule()
def test_show_security_group_default_rule_with_bytes_body(self):
self._test_show_security_group_default_rule(bytes_body=True)
def _test_create_security_default_group_rule(self, bytes_body=False):
request_body = {
"to_port": 80,
"from_port": 80,
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
}
self.check_service_client_function(
self.client.create_security_default_group_rule,
'tempest.lib.common.rest_client.RestClient.post',
{"security_group_default_rule": self.FAKE_RULE},
to_utf=bytes_body, **request_body)
def test_create_security_default_group_rule_with_str_body(self):
self._test_create_security_default_group_rule()
def test_create_security_default_group_rule_with_bytes_body(self):
self._test_create_security_default_group_rule(bytes_body=True)
def test_delete_security_group_default_rule(self):
self.check_service_client_function(
self.client.delete_security_group_default_rule,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=204, security_group_default_rule_id=1)
|
demo/cnn_predict.py | huynhtnhut97/keras-video-classifier | 108 | 2101 | import numpy as np
from keras import backend as K
import os
import sys
K.set_image_dim_ordering('tf')
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
data_dir_path = patch_path('very_large_data')
model_dir_path = patch_path('models/UCF-101')
from keras_video_classifier.library.convolutional import CnnVideoClassifier
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels
config_file_path = CnnVideoClassifier.get_config_file_path(model_dir_path)
weight_file_path = CnnVideoClassifier.get_weight_file_path(model_dir_path)
np.random.seed(42)
load_ucf(data_dir_path)
predictor = CnnVideoClassifier()
predictor.load_model(config_file_path, weight_file_path)
videos = scan_ucf_with_labels(data_dir_path, [label for (label, label_index) in predictor.labels.items()])
video_file_path_list = np.array([file_path for file_path in videos.keys()])
np.random.shuffle(video_file_path_list)
for video_file_path in video_file_path_list:
label = videos[video_file_path]
predicted_label = predictor.predict(video_file_path)
print('predicted: ' + predicted_label + ' actual: ' + label)
if __name__ == '__main__':
main() |
pmdarima/preprocessing/endog/boxcox.py | tuomijal/pmdarima | 736 | 2103 | <reponame>tuomijal/pmdarima<gh_stars>100-1000
# -*- coding: utf-8 -*-
from scipy import stats
import numpy as np
import warnings
from ...compat import check_is_fitted, pmdarima as pm_compat
from .base import BaseEndogTransformer
__all__ = ['BoxCoxEndogTransformer']
class BoxCoxEndogTransformer(BaseEndogTransformer):
r"""Apply the Box-Cox transformation to an endogenous array
The Box-Cox transformation is applied to non-normal data to coerce it more
towards a normal distribution. It's specified as::
(((y + lam2) ** lam1) - 1) / lam1, if lmbda != 0, else
log(y + lam2)
Parameters
----------
lmbda : float or None, optional (default=None)
The lambda value for the Box-Cox transformation, if known. If not
specified, it will be estimated via MLE.
lmbda2 : float, optional (default=0.)
The value to add to ``y`` to make it non-negative. If, after adding
``lmbda2``, there are still negative values, a ValueError will be
raised.
neg_action : str, optional (default="raise")
How to respond if any values in ``y <= 0`` after adding ``lmbda2``.
One of ('raise', 'warn', 'ignore'). If anything other than 'raise',
values <= 0 will be truncated to the value of ``floor``.
floor : float, optional (default=1e-16)
A positive value that truncate values to if there are values in ``y``
that are zero or negative and ``neg_action`` is not 'raise'. Note that
if values are truncated, invertibility will not be preserved, and the
transformed array may not be perfectly inverse-transformed.
"""
def __init__(self, lmbda=None, lmbda2=0, neg_action="raise", floor=1e-16):
self.lmbda = lmbda
self.lmbda2 = lmbda2
self.neg_action = neg_action
self.floor = floor
def fit(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Fit the transformer
Learns the value of ``lmbda``, if not specified in the constructor.
If defined in the constructor, is not re-learned.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
"""
lam1 = self.lmbda
lam2 = self.lmbda2
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
if lam2 < 0:
raise ValueError("lmbda2 must be a non-negative scalar value")
if lam1 is None:
y, _ = self._check_y_X(y, X)
_, lam1 = stats.boxcox(y + lam2, lmbda=None, alpha=None)
self.lam1_ = lam1
self.lam2_ = lam2
return self
def transform(self, y, X=None, **kwargs):
"""Transform the new array
Apply the Box-Cox transformation to the array after learning the
lambda parameter.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y_transform : array-like or None
The Box-Cox transformed y array
X : array-like or None
The X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
y += lam2
neg_mask = y <= 0.
if neg_mask.any():
action = self.neg_action
msg = "Negative or zero values present in y"
if action == "raise":
raise ValueError(msg)
elif action == "warn":
warnings.warn(msg, UserWarning)
y[neg_mask] = self.floor
if lam1 == 0:
return np.log(y), exog
return (y ** lam1 - 1) / lam1, exog
def inverse_transform(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
X : array-like or None
The inverse-transformed X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
if lam1 == 0:
return np.exp(y) - lam2, exog
numer = y * lam1 # remove denominator
numer += 1. # add 1 back to it
de_exp = numer ** (1. / lam1) # de-exponentiate
return de_exp - lam2, exog
|
neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py | congnt95/neutron | 1,080 | 2131 | <gh_stars>1000+
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
from oslo_utils import uuidutils
import sqlalchemy as sa
from neutron.db import rbac_db_models
"""rbac_qos_policy
Revision ID: c6c112992c9
Revises: <PASSWORD>
Create Date: 2015-11-25 18:45:03.831359
"""
# revision identifiers, used by Alembic.
revision = 'c6c112992c9'
down_revision = 'e3278ee65050'
depends_on = ('15e43b934f81',)
qos_rbacs = sa.Table(
'qospolicyrbacs', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=255),
nullable=True),
sa.Column('target_tenant', sa.String(length=255),
nullable=False),
sa.Column('action', sa.String(length=255), nullable=False),
sa.Column('object_id', sa.String(length=36), nullable=False))
# A simple model of the qos_policies table with only the fields needed for
# the migration.
qos_policy = sa.Table('qos_policies', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id',
sa.String(length=255)),
sa.Column('shared', sa.Boolean(), nullable=False))
def upgrade():
op.bulk_insert(qos_rbacs, get_values())
op.drop_column('qos_policies', 'shared')
def get_values():
session = sa.orm.Session(bind=op.get_bind())
values = []
for row in session.query(qos_policy).filter(qos_policy.c.shared).all():
values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0],
'tenant_id': row[1], 'target_tenant': '*',
'action': rbac_db_models.ACCESS_SHARED})
session.commit()
return values
|
boto3_type_annotations/boto3_type_annotations/guardduty/client.py | cowboygneox/boto3_type_annotations | 119 | 2147 | <filename>boto3_type_annotations/boto3_type_annotations/guardduty/client.py
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def accept_invitation(self, DetectorId: str, InvitationId: str, MasterId: str) -> Dict:
pass
def archive_findings(self, DetectorId: str, FindingIds: List) -> Dict:
pass
def can_paginate(self, operation_name: str = None):
pass
def create_detector(self, Enable: bool, ClientToken: str = None, FindingPublishingFrequency: str = None) -> Dict:
pass
def create_filter(self, DetectorId: str, FindingCriteria: Dict, Name: str, Action: str = None, ClientToken: str = None, Description: str = None, Rank: int = None) -> Dict:
pass
def create_ip_set(self, Activate: bool, DetectorId: str, Format: str, Location: str, Name: str, ClientToken: str = None) -> Dict:
pass
def create_members(self, AccountDetails: List, DetectorId: str) -> Dict:
pass
def create_sample_findings(self, DetectorId: str, FindingTypes: List = None) -> Dict:
pass
def create_threat_intel_set(self, Activate: bool, DetectorId: str, Format: str, Location: str, Name: str, ClientToken: str = None) -> Dict:
pass
def decline_invitations(self, AccountIds: List) -> Dict:
pass
def delete_detector(self, DetectorId: str) -> Dict:
pass
def delete_filter(self, DetectorId: str, FilterName: str) -> Dict:
pass
def delete_invitations(self, AccountIds: List) -> Dict:
pass
def delete_ip_set(self, DetectorId: str, IpSetId: str) -> Dict:
pass
def delete_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def delete_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str) -> Dict:
pass
def disassociate_from_master_account(self, DetectorId: str) -> Dict:
pass
def disassociate_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_detector(self, DetectorId: str) -> Dict:
pass
def get_filter(self, DetectorId: str, FilterName: str) -> Dict:
pass
def get_findings(self, DetectorId: str, FindingIds: List, SortCriteria: Dict = None) -> Dict:
pass
def get_findings_statistics(self, DetectorId: str, FindingStatisticTypes: List, FindingCriteria: Dict = None) -> Dict:
pass
def get_invitations_count(self) -> Dict:
pass
def get_ip_set(self, DetectorId: str, IpSetId: str) -> Dict:
pass
def get_master_account(self, DetectorId: str) -> Dict:
pass
def get_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def invite_members(self, AccountIds: List, DetectorId: str, DisableEmailNotification: bool = None, Message: str = None) -> Dict:
pass
def list_detectors(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_filters(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_findings(self, DetectorId: str, FindingCriteria: Dict = None, MaxResults: int = None, NextToken: str = None, SortCriteria: Dict = None) -> Dict:
pass
def list_invitations(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_ip_sets(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_members(self, DetectorId: str, MaxResults: int = None, NextToken: str = None, OnlyAssociated: str = None) -> Dict:
pass
def list_threat_intel_sets(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def start_monitoring_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def stop_monitoring_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def unarchive_findings(self, DetectorId: str, FindingIds: List) -> Dict:
pass
def update_detector(self, DetectorId: str, Enable: bool = None, FindingPublishingFrequency: str = None) -> Dict:
pass
def update_filter(self, DetectorId: str, FilterName: str, Action: str = None, Description: str = None, FindingCriteria: Dict = None, Rank: int = None) -> Dict:
pass
def update_findings_feedback(self, DetectorId: str, Feedback: str, FindingIds: List, Comments: str = None) -> Dict:
pass
def update_ip_set(self, DetectorId: str, IpSetId: str, Activate: bool = None, Location: str = None, Name: str = None) -> Dict:
pass
def update_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str, Activate: bool = None, Location: str = None, Name: str = None) -> Dict:
pass
|
alipay/aop/api/domain/MetroOdItem.py | antopen/alipay-sdk-python-all | 213 | 2153 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CloudbusUserInfo import CloudbusUserInfo
class MetroOdItem(object):
def __init__(self):
self._dest_geo = None
self._od = None
self._time = None
self._user_info = None
self._week_od = None
self._work_od = None
@property
def dest_geo(self):
return self._dest_geo
@dest_geo.setter
def dest_geo(self, value):
self._dest_geo = value
@property
def od(self):
return self._od
@od.setter
def od(self, value):
self._od = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
self._time = value
@property
def user_info(self):
return self._user_info
@user_info.setter
def user_info(self, value):
if isinstance(value, CloudbusUserInfo):
self._user_info = value
else:
self._user_info = CloudbusUserInfo.from_alipay_dict(value)
@property
def week_od(self):
return self._week_od
@week_od.setter
def week_od(self, value):
self._week_od = value
@property
def work_od(self):
return self._work_od
@work_od.setter
def work_od(self, value):
self._work_od = value
def to_alipay_dict(self):
params = dict()
if self.dest_geo:
if hasattr(self.dest_geo, 'to_alipay_dict'):
params['dest_geo'] = self.dest_geo.to_alipay_dict()
else:
params['dest_geo'] = self.dest_geo
if self.od:
if hasattr(self.od, 'to_alipay_dict'):
params['od'] = self.od.to_alipay_dict()
else:
params['od'] = self.od
if self.time:
if hasattr(self.time, 'to_alipay_dict'):
params['time'] = self.time.to_alipay_dict()
else:
params['time'] = self.time
if self.user_info:
if hasattr(self.user_info, 'to_alipay_dict'):
params['user_info'] = self.user_info.to_alipay_dict()
else:
params['user_info'] = self.user_info
if self.week_od:
if hasattr(self.week_od, 'to_alipay_dict'):
params['week_od'] = self.week_od.to_alipay_dict()
else:
params['week_od'] = self.week_od
if self.work_od:
if hasattr(self.work_od, 'to_alipay_dict'):
params['work_od'] = self.work_od.to_alipay_dict()
else:
params['work_od'] = self.work_od
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MetroOdItem()
if 'dest_geo' in d:
o.dest_geo = d['dest_geo']
if 'od' in d:
o.od = d['od']
if 'time' in d:
o.time = d['time']
if 'user_info' in d:
o.user_info = d['user_info']
if 'week_od' in d:
o.week_od = d['week_od']
if 'work_od' in d:
o.work_od = d['work_od']
return o
|
qiskit/ignis/mitigation/measurement/filters.py | paulineollitrault/qiskit-ignis | 182 | 2157 | <reponame>paulineollitrault/qiskit-ignis<gh_stars>100-1000
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=cell-var-from-loop,invalid-name
"""
Measurement correction filters.
"""
from typing import List, Union
from copy import deepcopy
from scipy.optimize import minimize
import scipy.linalg as la
import numpy as np
import qiskit
from qiskit import QiskitError
from qiskit.tools import parallel_map
from qiskit.ignis.verification.tomography import count_keys
class MeasurementFilter():
"""
Measurement error mitigation filter.
Produced from a measurement calibration fitter and can be applied
to data.
"""
def __init__(self,
cal_matrix: np.matrix,
state_labels: list):
"""
Initialize a measurement error mitigation filter using the cal_matrix
from a measurement calibration fitter.
Args:
cal_matrix: the calibration matrix for applying the correction
state_labels: the states for the ordering of the cal matrix
"""
self._cal_matrix = cal_matrix
self._state_labels = state_labels
@property
def cal_matrix(self):
"""Return cal_matrix."""
return self._cal_matrix
@property
def state_labels(self):
"""return the state label ordering of the cal matrix"""
return self._state_labels
@state_labels.setter
def state_labels(self, new_state_labels):
"""set the state label ordering of the cal matrix"""
self._state_labels = new_state_labels
@cal_matrix.setter
def cal_matrix(self, new_cal_matrix):
"""Set cal_matrix."""
self._cal_matrix = new_cal_matrix
def apply(self,
raw_data,
method='least_squares'):
"""Apply the calibration matrix to results.
Args:
raw_data (dict or list): The data to be corrected. Can be in a number of forms:
Form 1: a counts dictionary from results.get_counts
Form 2: a list of counts of `length==len(state_labels)`
Form 3: a list of counts of `length==M*len(state_labels)` where M is an
integer (e.g. for use with the tomography data)
Form 4: a qiskit Result
method (str): fitting method. If `None`, then least_squares is used.
``pseudo_inverse``: direct inversion of the A matrix
``least_squares``: constrained to have physical probabilities
Returns:
dict or list: The corrected data in the same form as `raw_data`
Raises:
QiskitError: if `raw_data` is not an integer multiple
of the number of calibrated states.
"""
# check forms of raw_data
if isinstance(raw_data, dict):
# counts dictionary
for data_label in raw_data.keys():
if data_label not in self._state_labels:
raise QiskitError("Unexpected state label '" + data_label +
"', verify the fitter's state labels "
"correspond to the input data")
data_format = 0
# convert to form2
raw_data2 = [np.zeros(len(self._state_labels), dtype=float)]
for stateidx, state in enumerate(self._state_labels):
raw_data2[0][stateidx] = raw_data.get(state, 0)
elif isinstance(raw_data, list):
size_ratio = len(raw_data)/len(self._state_labels)
if len(raw_data) == len(self._state_labels):
data_format = 1
raw_data2 = [raw_data]
elif int(size_ratio) == size_ratio:
data_format = 2
size_ratio = int(size_ratio)
# make the list into chunks the size of state_labels for easier
# processing
raw_data2 = np.zeros([size_ratio, len(self._state_labels)])
for i in range(size_ratio):
raw_data2[i][:] = raw_data[
i * len(self._state_labels):(i + 1)*len(
self._state_labels)]
else:
raise QiskitError("Data list is not an integer multiple "
"of the number of calibrated states")
elif isinstance(raw_data, qiskit.result.result.Result):
# extract out all the counts, re-call the function with the
# counts and push back into the new result
new_result = deepcopy(raw_data)
new_counts_list = parallel_map(
self._apply_correction,
[resultidx for resultidx, _ in enumerate(raw_data.results)],
task_args=(raw_data, method))
for resultidx, new_counts in new_counts_list:
new_result.results[resultidx].data.counts = new_counts
return new_result
else:
raise QiskitError("Unrecognized type for raw_data.")
if method == 'pseudo_inverse':
pinv_cal_mat = la.pinv(self._cal_matrix)
# Apply the correction
for data_idx, _ in enumerate(raw_data2):
if method == 'pseudo_inverse':
raw_data2[data_idx] = np.dot(
pinv_cal_mat, raw_data2[data_idx])
elif method == 'least_squares':
nshots = sum(raw_data2[data_idx])
def fun(x):
return sum(
(raw_data2[data_idx] - np.dot(self._cal_matrix, x))**2)
x0 = np.random.rand(len(self._state_labels))
x0 = x0 / sum(x0)
cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
bnds = tuple((0, nshots) for x in x0)
res = minimize(fun, x0, method='SLSQP',
constraints=cons, bounds=bnds, tol=1e-6)
raw_data2[data_idx] = res.x
else:
raise QiskitError("Unrecognized method.")
if data_format == 2:
# flatten back out the list
raw_data2 = raw_data2.flatten()
elif data_format == 0:
# convert back into a counts dictionary
new_count_dict = {}
for stateidx, state in enumerate(self._state_labels):
if raw_data2[0][stateidx] != 0:
new_count_dict[state] = raw_data2[0][stateidx]
raw_data2 = new_count_dict
else:
# TODO: should probably change to:
# raw_data2 = raw_data2[0].tolist()
raw_data2 = raw_data2[0]
return raw_data2
def _apply_correction(self, resultidx, raw_data, method):
"""Wrapper to call apply with a counts dictionary."""
new_counts = self.apply(
raw_data.get_counts(resultidx), method=method)
return resultidx, new_counts
class TensoredFilter():
"""
Tensored measurement error mitigation filter.
Produced from a tensored measurement calibration fitter and can be applied
to data.
"""
def __init__(self,
cal_matrices: np.matrix,
substate_labels_list: list,
mit_pattern: list):
"""
Initialize a tensored measurement error mitigation filter using
the cal_matrices from a tensored measurement calibration fitter.
A simple usage this class is explained [here]
(https://qiskit.org/documentation/tutorials/noise/3_measurement_error_mitigation.html).
Args:
cal_matrices: the calibration matrices for applying the correction.
substate_labels_list: for each calibration matrix
a list of the states (as strings, states in the subspace)
mit_pattern: for each calibration matrix
a list of the logical qubit indices (as int, states in the subspace)
"""
self._cal_matrices = cal_matrices
self._qubit_list_sizes = []
self._indices_list = []
self._substate_labels_list = []
self.substate_labels_list = substate_labels_list
self._mit_pattern = mit_pattern
@property
def cal_matrices(self):
"""Return cal_matrices."""
return self._cal_matrices
@cal_matrices.setter
def cal_matrices(self, new_cal_matrices):
"""Set cal_matrices."""
self._cal_matrices = deepcopy(new_cal_matrices)
@property
def substate_labels_list(self):
"""Return _substate_labels_list"""
return self._substate_labels_list
@substate_labels_list.setter
def substate_labels_list(self, new_substate_labels_list):
"""Return _substate_labels_list"""
self._substate_labels_list = new_substate_labels_list
# get the number of qubits in each subspace
self._qubit_list_sizes = []
for _, substate_label_list in enumerate(self._substate_labels_list):
self._qubit_list_sizes.append(
int(np.log2(len(substate_label_list))))
# get the indices in the calibration matrix
self._indices_list = []
for _, sub_labels in enumerate(self._substate_labels_list):
self._indices_list.append(
{lab: ind for ind, lab in enumerate(sub_labels)})
@property
def qubit_list_sizes(self):
"""Return _qubit_list_sizes."""
return self._qubit_list_sizes
@property
def nqubits(self):
"""Return the number of qubits. See also MeasurementFilter.apply() """
return sum(self._qubit_list_sizes)
def apply(self,
raw_data: Union[qiskit.result.result.Result, dict],
method: str = 'least_squares',
meas_layout: List[int] = None):
"""
Apply the calibration matrices to results.
Args:
raw_data (dict or Result): The data to be corrected. Can be in one of two forms:
* A counts dictionary from results.get_counts
* A Qiskit Result
method (str): fitting method. The following methods are supported:
* 'pseudo_inverse': direct inversion of the cal matrices.
Mitigated counts can contain negative values
and the sum of counts would not equal to the shots.
Mitigation is conducted qubit wise:
For each qubit, mitigate the whole counts using the calibration matrices
which affect the corresponding qubit.
For example, assume we are mitigating the 3rd bit of the 4-bit counts
using '2\times 2' calibration matrix `A_3`.
When mitigating the count of '0110' in this step,
the following formula is applied:
`count['0110'] = A_3^{-1}[1, 0]*count['0100'] + A_3^{-1}[1, 1]*count['0110']`.
The total time complexity of this method is `O(m2^{n + t})`,
where `n` is the size of calibrated qubits,
`m` is the number of sets in `mit_pattern`,
and `t` is the size of largest set of mit_pattern.
If the `mit_pattern` is shaped like `[[0], [1], [2], ..., [n-1]]`,
which corresponds to the tensor product noise model without cross-talk,
then the time complexity would be `O(n2^n)`.
If the `mit_pattern` is shaped like `[[0, 1, 2, ..., n-1]]`,
which exactly corresponds to the complete error mitigation,
then the time complexity would be `O(2^(n+n)) = O(4^n)`.
* 'least_squares': constrained to have physical probabilities.
Instead of directly applying inverse calibration matrices,
this method solve a constrained optimization problem to find
the closest probability vector to the result from 'pseudo_inverse' method.
Sequential least square quadratic programming (SLSQP) is used
in the internal process.
Every updating step in SLSQP takes `O(m2^{n+t})` time.
Since this method is using the SLSQP optimization over
the vector with lenght `2^n`, the mitigation for 8 bit counts
with the `mit_pattern = [[0], [1], [2], ..., [n-1]]` would
take 10 seconds or more.
* If `None`, 'least_squares' is used.
meas_layout (list of int): the mapping from classical registers to qubits
* If you measure qubit `2` to clbit `0`, `0` to `1`, and `1` to `2`,
the list becomes `[2, 0, 1]`
* If `None`, flatten(mit_pattern) is used.
Returns:
dict or Result: The corrected data in the same form as raw_data
Raises:
QiskitError: if raw_data is not in a one of the defined forms.
"""
all_states = count_keys(self.nqubits)
num_of_states = 2**self.nqubits
if meas_layout is None:
meas_layout = []
for qubits in self._mit_pattern:
meas_layout += qubits
# check forms of raw_data
if isinstance(raw_data, dict):
# counts dictionary
# convert to list
raw_data2 = [np.zeros(num_of_states, dtype=float)]
for state, count in raw_data.items():
stateidx = int(state, 2)
raw_data2[0][stateidx] = count
elif isinstance(raw_data, qiskit.result.result.Result):
# extract out all the counts, re-call the function with the
# counts and push back into the new result
new_result = deepcopy(raw_data)
new_counts_list = parallel_map(
self._apply_correction,
[resultidx for resultidx, _ in enumerate(raw_data.results)],
task_args=(raw_data, method, meas_layout))
for resultidx, new_counts in new_counts_list:
new_result.results[resultidx].data.counts = new_counts
return new_result
else:
raise QiskitError("Unrecognized type for raw_data.")
if method == 'pseudo_inverse':
pinv_cal_matrices = []
for cal_mat in self._cal_matrices:
pinv_cal_matrices.append(la.pinv(cal_mat))
meas_layout = meas_layout[::-1] # reverse endian
qubits_to_clbits = [-1 for _ in range(max(meas_layout) + 1)]
for i, qubit in enumerate(meas_layout):
qubits_to_clbits[qubit] = i
# Apply the correction
for data_idx, _ in enumerate(raw_data2):
if method == 'pseudo_inverse':
for pinv_cal_mat, pos_qubits, indices in zip(pinv_cal_matrices,
self._mit_pattern,
self._indices_list):
inv_mat_dot_x = np.zeros([num_of_states], dtype=float)
pos_clbits = [qubits_to_clbits[qubit] for qubit in pos_qubits]
for state_idx, state in enumerate(all_states):
first_index = self.compute_index_of_cal_mat(state, pos_clbits, indices)
for i in range(len(pinv_cal_mat)): # i is index of pinv_cal_mat
source_state = self.flip_state(state, i, pos_clbits)
second_index = self.compute_index_of_cal_mat(source_state,
pos_clbits,
indices)
inv_mat_dot_x[state_idx] += pinv_cal_mat[first_index, second_index]\
* raw_data2[data_idx][int(source_state, 2)]
raw_data2[data_idx] = inv_mat_dot_x
elif method == 'least_squares':
def fun(x):
mat_dot_x = deepcopy(x)
for cal_mat, pos_qubits, indices in zip(self._cal_matrices,
self._mit_pattern,
self._indices_list):
res_mat_dot_x = np.zeros([num_of_states], dtype=float)
pos_clbits = [qubits_to_clbits[qubit] for qubit in pos_qubits]
for state_idx, state in enumerate(all_states):
second_index = self.compute_index_of_cal_mat(state, pos_clbits, indices)
for i in range(len(cal_mat)):
target_state = self.flip_state(state, i, pos_clbits)
first_index =\
self.compute_index_of_cal_mat(target_state, pos_clbits, indices)
res_mat_dot_x[int(target_state, 2)]\
+= cal_mat[first_index, second_index] * mat_dot_x[state_idx]
mat_dot_x = res_mat_dot_x
return sum((raw_data2[data_idx] - mat_dot_x) ** 2)
x0 = np.random.rand(num_of_states)
x0 = x0 / sum(x0)
nshots = sum(raw_data2[data_idx])
cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
bnds = tuple((0, nshots) for x in x0)
res = minimize(fun, x0, method='SLSQP',
constraints=cons, bounds=bnds, tol=1e-6)
raw_data2[data_idx] = res.x
else:
raise QiskitError("Unrecognized method.")
# convert back into a counts dictionary
new_count_dict = {}
for state_idx, state in enumerate(all_states):
if raw_data2[0][state_idx] != 0:
new_count_dict[state] = raw_data2[0][state_idx]
return new_count_dict
def flip_state(self, state: str, mat_index: int, flip_poses: List[int]) -> str:
"""Flip the state according to the chosen qubit positions"""
flip_poses = [pos for i, pos in enumerate(flip_poses) if (mat_index >> i) & 1]
flip_poses = sorted(flip_poses)
new_state = ""
pos = 0
for flip_pos in flip_poses:
new_state += state[pos:flip_pos]
new_state += str(int(state[flip_pos], 2) ^ 1) # flip the state
pos = flip_pos + 1
new_state += state[pos:]
return new_state
def compute_index_of_cal_mat(self, state: str, pos_qubits: List[int], indices: dict) -> int:
"""Return the index of (pseudo inverse) calibration matrix for the input quantum state"""
sub_state = ""
for pos in pos_qubits:
sub_state += state[pos]
return indices[sub_state]
def _apply_correction(self,
resultidx: int,
raw_data: qiskit.result.result.Result,
method: str,
meas_layout: List[int]):
"""Wrapper to call apply with a counts dictionary."""
new_counts = self.apply(
raw_data.get_counts(resultidx), method=method, meas_layout=meas_layout)
return resultidx, new_counts
|
dev/Code/Framework/AzFramework/CodeGen/AzEBusInline.py | jeikabu/lumberyard | 1,738 | 2165 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import os
from az_code_gen.base import *
from AzReflectionCpp import format_cpp_annotations
class AZEBusInline_Driver(TemplateDriver):
def apply_transformations(self, json_object):
format_cpp_annotations(json_object)
def render_templates(self, input_file, **template_kwargs):
input_file_name, input_file_ext = os.path.splitext(input_file)
self.render_template_to_file(
"AzEBusInline.tpl", template_kwargs, '{}.generated.inline'.format(input_file_name))
# Factory function - called from launcher
def create_drivers(env):
return [AZEBusInline_Driver(env)]
|
youtube_dl/extractor/turner.py | jonyg80/youtube-dl | 66,635 | 2171 | # coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
float_or_none,
parse_duration,
xpath_attr,
update_url_query,
ExtractorError,
strip_or_none,
url_or_none,
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
if not token:
query = {
'path': secure_path,
}
if custom_tokenizer_query:
query.update(custom_tokenizer_query)
else:
query['videoId'] = content_id
if ap_data.get('auth_required'):
query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
auth = self._download_xml(
tokenizer_src, content_id, query=query)
error_msg = xpath_text(auth, 'error/msg')
if error_msg:
raise ExtractorError(error_msg, expected=True)
token = xpath_text(auth, 'token')
if not token:
return video_url
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}, fatal=False):
video_data = self._download_xml(
data_src, video_id,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=fatal)
if not video_data:
return {}
video_id = video_data.attrib['id']
title = xpath_text(video_data, 'headline', fatal=True)
content_id = xpath_text(video_data, 'contentId') or video_id
# rtmp_src = xpath_text(video_data, 'akamai/src')
# if rtmp_src:
# split_rtmp_src = rtmp_src.split(',')
# if len(split_rtmp_src) == 2:
# rtmp_src = split_rtmp_src[1]
# aifp = xpath_text(video_data, 'akamai/aifp', default='')
urls = []
formats = []
thumbnails = []
subtitles = {}
rex = re.compile(
r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
# Possible formats locations: files/file, files/groupFiles/files
# and maybe others
for video_file in video_data.findall('.//file'):
video_url = url_or_none(video_file.text.strip())
if not video_url:
continue
ext = determine_ext(video_url)
if video_url.startswith('/mp4:protected/'):
continue
# TODO Correct extraction for these files
# protected_path_data = path_data.get('protected')
# if not protected_path_data or not rtmp_src:
# continue
# protected_path = self._search_regex(
# r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
# auth = self._download_webpage(
# protected_path_data['tokenizer_src'], query={
# 'path': protected_path,
# 'videoId': content_id,
# 'aifp': aifp,
# })
# token = xpath_text(auth, 'token')
# if not token:
# continue
# video_url = rtmp_src + video_url + '?' + token
elif video_url.startswith('/secure/'):
secure_path_data = path_data.get('secure')
if not secure_path_data:
continue
video_url = self._add_akamai_spe_token(
secure_path_data['tokenizer_src'],
secure_path_data['media_src'] + video_url,
content_id, ap_data)
elif not re.match('https?://', video_url):
base_path_data = path_data.get(ext, path_data.get('default', {}))
media_src = base_path_data.get('media_src')
if not media_src:
continue
video_url = media_src + video_url
if video_url in urls:
continue
urls.append(video_url)
format_id = video_file.get('bitrate')
if ext in ('scc', 'srt', 'vtt'):
subtitles.setdefault('en', []).append({
'ext': ext,
'url': video_url,
})
elif ext == 'png':
thumbnails.append({
'id': format_id,
'url': video_url,
})
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif re.match(r'https?://[^/]+\.akamaihd\.net/[iz]/', video_url):
formats.extend(self._extract_akamai_formats(
video_url, video_id, {
'hds': path_data.get('f4m', {}).get('host'),
# nba.cdn.turner.com, ht.cdn.turner.com, ht2.cdn.turner.com
# ht3.cdn.turner.com, i.cdn.turner.com, s.cdn.turner.com
# ssl.cdn.turner.com
'http': 'pmd.cdn.turner.com',
}))
elif ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4',
m3u8_id=format_id or 'hls', fatal=False)
if '/secure/' in video_url and '?hdnea=' in video_url:
for f in m3u8_formats:
f['_seekable'] = False
formats.extend(m3u8_formats)
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {'hdcore': '3.7.0'}),
video_id, f4m_id=format_id or 'hds', fatal=False))
else:
f = {
'format_id': format_id,
'url': video_url,
'ext': ext,
}
mobj = rex.search(video_url)
if mobj:
f.update({
'width': int(mobj.group('width')),
'height': int(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
elif isinstance(format_id, compat_str):
if format_id.isdigit():
f['tbr'] = int(format_id)
else:
mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
if mobj:
if mobj.group(1) == 'audio':
f.update({
'vcodec': 'none',
'ext': 'm4a',
})
else:
f['tbr'] = int(mobj.group(1))
formats.append(f)
self._sort_formats(formats)
for source in video_data.findall('closedCaptions/source'):
for track in source.findall('track'):
track_url = url_or_none(track.get('url'))
if not track_url or track_url.endswith('/big'):
continue
lang = track.get('lang') or track.get('label') or 'en'
subtitles.setdefault(lang, []).append({
'url': track_url,
'ext': {
'scc': 'scc',
'webvtt': 'vtt',
'smptett': 'tt',
}.get(source.get('format'))
})
thumbnails.extend({
'id': image.get('cut') or image.get('name'),
'url': image.text,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data.findall('images/image'))
is_live = xpath_text(video_data, 'isLive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'thumbnail': xpath_text(video_data, 'poster'),
'description': strip_or_none(xpath_text(video_data, 'description')),
'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
'timestamp': self._extract_timestamp(video_data),
'upload_date': xpath_attr(video_data, 'metas', 'version'),
'series': xpath_text(video_data, 'showTitle'),
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
'is_live': is_live,
}
def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None):
streams_data = self._download_json(
'http://medium.ngtv.io/media/%s/tv' % media_id,
media_id)['media']['tv']
duration = None
chapters = []
formats = []
for supported_type in ('unprotected', 'bulkaes'):
stream_data = streams_data.get(supported_type, {})
m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
if not m3u8_url:
continue
if stream_data.get('playlistProtection') == 'spe':
m3u8_url = self._add_akamai_spe_token(
'http://token.ngtv.io/token/token_spe',
m3u8_url, media_id, ap_data or {}, tokenizer_query)
formats.extend(self._extract_m3u8_formats(
m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
duration = float_or_none(stream_data.get('totalRuntime'))
if not chapters:
for chapter in stream_data.get('contentSegments', []):
start_time = float_or_none(chapter.get('start'))
chapter_duration = float_or_none(chapter.get('duration'))
if start_time is None or chapter_duration is None:
continue
chapters.append({
'start_time': start_time,
'end_time': start_time + chapter_duration,
})
self._sort_formats(formats)
return {
'formats': formats,
'chapters': chapters,
'duration': duration,
}
|
tests/zpill.py | al3pht/cloud-custodian | 2,415 | 2173 | <gh_stars>1000+
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import fnmatch
from io import StringIO
import json
import os
import shutil
import zipfile
import re
from datetime import datetime, timedelta, tzinfo
from distutils.util import strtobool
import boto3
import placebo
from botocore.response import StreamingBody
from placebo import pill
from c7n.testing import CustodianTestCore
from .constants import ACCOUNT_ID
# Custodian Test Account. This is used only for testing.
# Access is available for community project maintainers.
###########################################################################
# BEGIN PLACEBO MONKEY PATCH
#
# Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony...
# These monkeypatch patches represent fixes on trunk of that repo that have not been released
# into an extant version, we carry them here. We can drop this when this issue is resolved
#
# https://github.com/garnaat/placebo/issues/63
#
# License - Apache 2.0
# Copyright (c) 2015 <NAME>
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if "__class__" in target:
class_name = target.pop("__class__")
if "__module__" in obj:
obj.pop("__module__")
# Use getattr(module, class_name) for custom types if needed
if class_name == "datetime":
return datetime(tzinfo=utc, **target)
if class_name == "StreamingBody":
return StringIO(target["body"])
# Return unrecognized structures as-is
return obj
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {"__class__": obj.__class__.__name__}
try:
result["__module__"] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime):
result["year"] = obj.year
result["month"] = obj.month
result["day"] = obj.day
result["hour"] = obj.hour
result["minute"] = obj.minute
result["second"] = obj.second
result["microsecond"] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result["body"] = obj.read()
obj._raw_stream = StringIO(result["body"])
obj._amount_read = 0
return result
if isinstance(obj, bytes):
return obj.decode('utf8')
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable")
pill.FakeHttpResponse.raw = None
placebo.pill.serialize = serialize
placebo.pill.deserialize = deserialize
# END PLACEBO MONKEY
##########################################################################
class BluePill(pill.Pill):
def playback(self):
super(BluePill, self).playback()
self._avail = self.get_available()
def get_available(self):
return {
os.path.join(self.data_path, n)
for n in fnmatch.filter(os.listdir(self.data_path), "*.json")
}
def get_next_file_path(self, service, operation):
fn, format = super(BluePill, self).get_next_file_path(service, operation)
# couple of double use cases
if fn in self._avail:
self._avail.remove(fn)
else:
print("\ndouble use %s\n" % fn)
return (fn, format)
def stop(self):
result = super(BluePill, self).stop()
if self._avail:
print("Unused json files \n %s" % ("\n".join(sorted(self._avail))))
return result
class ZippedPill(pill.Pill):
def __init__(self, path, prefix=None, debug=False):
super(ZippedPill, self).__init__(prefix, debug)
self.path = path
self._used = set()
self.archive = None
def playback(self):
self.archive = zipfile.ZipFile(self.path, "r")
self._files = set(self.archive.namelist())
return super(ZippedPill, self).playback()
def record(self):
self.archive = zipfile.ZipFile(self.path, "a", zipfile.ZIP_DEFLATED)
self._files = set()
files = {n for n in self.archive.namelist() if n.startswith(self.prefix)}
if not files:
return super(ZippedPill, self).record()
# We can't update files in a zip, so copy
self.archive.close()
os.rename(self.path, "%s.tmp" % self.path)
src = zipfile.ZipFile("%s.tmp" % self.path, "r")
self.archive = zipfile.ZipFile(self.path, "w", zipfile.ZIP_DEFLATED)
for n in src.namelist():
if n in files:
continue
self.archive.writestr(n, src.read(n))
os.remove("%s.tmp" % self.path)
return super(ZippedPill, self).record()
def stop(self):
super(ZippedPill, self).stop()
if self.archive:
self.archive.close()
def save_response(self, service, operation, response_data, http_response=200):
filepath = self.get_new_file_path(service, operation)
pill.LOG.debug("save_response: path=%s", filepath)
json_data = {"status_code": http_response, "data": response_data}
self.archive.writestr(
filepath,
json.dumps(json_data, indent=4, default=pill.serialize),
zipfile.ZIP_DEFLATED,
)
self._files.add(filepath)
def load_response(self, service, operation):
response_file = self.get_next_file_path(service, operation)
self._used.add(response_file)
pill.LOG.debug("load_responses: %s", response_file)
response_data = json.loads(
self.archive.read(response_file), object_hook=pill.deserialize
)
return (
pill.FakeHttpResponse(response_data["status_code"]), response_data["data"]
)
def get_new_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_new_file_path: %s", base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + "*")
for file_path in fnmatch.filter(self._files, glob_pattern):
file_name = os.path.basename(file_path)
m = self.filename_re.match(file_name)
if m:
i = int(m.group("index"))
if i > index:
index = i
index += 1
return os.path.join(self._data_path, "{0}_{1}.json".format(base_name, index))
def get_next_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_next_file_path: %s", base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(self._data_path, base_name + "_{0}.json".format(index))
fn = fn.replace('\\', '/')
if fn in self._files:
next_file = fn
self._index[base_name] += 1
self._files.add(fn)
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError("response file ({0}) not found".format(fn))
return fn
def attach(session, data_path, prefix=None, debug=False):
pill = ZippedPill(data_path, prefix=prefix, debug=debug)
pill.attach(session, prefix)
return pill
class RedPill(pill.Pill):
def datetime_converter(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
def save_response(self, service, operation, response_data,
http_response=200):
"""
Override to sanitize response metadata and account_ids
"""
# aws sso setups involve a short lived credential transfer
if service == "portal.sso":
return
if 'ResponseMetadata' in response_data:
response_data['ResponseMetadata'] = {}
response_data = json.dumps(response_data, default=serialize)
response_data = re.sub(r"\b\d{12}\b", ACCOUNT_ID, response_data) # noqa
response_data = json.loads(response_data, object_hook=deserialize)
super(RedPill, self).save_response(service, operation, response_data,
http_response)
class PillTest(CustodianTestCore):
archive_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "placebo_data.zip"
)
placebo_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "placebo"
)
output_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "output"
)
recording = False
def cleanUp(self):
self.pill = None
def record_flight_data(self, test_case, zdata=False, augment=False, region=None):
self.recording = True
test_dir = os.path.join(self.placebo_dir, test_case)
if not (zdata or augment):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir)
session = boto3.Session(region_name=region)
default_region = session.region_name
if not zdata:
pill = RedPill()
pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, debug=True)
pill.record()
self.pill = pill
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
class FakeFactory:
def __call__(fake, region=None, assume=None):
new_session = None
# slightly experimental for test recording, using
# cross account assumes, note this will record sts
# assume role api calls creds into test data, they will
# go stale, but its best to modify before commiting.
# Disabled by default.
if 0 and (assume is not False and fake.assume_role):
client = session.client('sts')
creds = client.assume_role(
RoleArn=fake.assume_role,
RoleSessionName='CustodianTest')['Credentials']
new_session = boto3.Session(
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken'],
region_name=region or fake.region or default_region)
elif region and region != default_region:
new_session = boto3.Session(region_name=region)
if new_session:
assert not zdata
new_pill = placebo.attach(new_session, test_dir, debug=True)
new_pill.record()
self.addCleanup(new_pill.stop)
return new_session
return session
return FakeFactory()
def replay_flight_data(self, test_case, zdata=False, region=None):
"""
The `region` argument is to allow functional tests to override the
default region. It is unused when replaying stored data.
"""
if strtobool(os.environ.get('C7N_FUNCTIONAL', 'no')):
self.recording = True
return lambda region=region, assume=None: boto3.Session(region_name=region)
if not zdata:
test_dir = os.path.join(self.placebo_dir, test_case)
if not os.path.exists(test_dir):
raise RuntimeError("Invalid Test Dir for flight data %s" % test_dir)
session = boto3.Session(region_name=region)
if not zdata:
pill = placebo.attach(session, test_dir)
# pill = BluePill()
# pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, False)
pill.playback()
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
return lambda region=None, assume=None: session
|
tao_compiler/mlir/disc/tests/glob_op_test.bzl | JamesTheZ/BladeDISC | 328 | 2180 | # Test definitions for Lit, the LLVM test runner.
#
# This is reusing the LLVM Lit test runner in the interim until the new build
# rules are upstreamed.
# TODO(b/136126535): remove this custom rule.
"""Lit runner globbing test
"""
load("//tensorflow:tensorflow.bzl", "filegroup")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_native_cc_binary", "tf_copts")
# Default values used by the test runner.
_default_test_file_exts = ["mlir", ".pbtxt", ".td"]
_default_driver = "@llvm-project//mlir:run_lit.sh"
_default_size = "small"
_default_tags = []
# These are patterns which we should never match, for tests, subdirectories, or
# test input data files.
_ALWAYS_EXCLUDE = [
"**/LICENSE.txt",
"**/README.txt",
"**/lit.local.cfg",
# Exclude input files that have spaces in their names, since bazel
# cannot cope with such "targets" in the srcs list.
"**/* *",
"**/* */**",
]
def _run_lit_test(name, test_file, data, size, tags, driver, features, exec_properties):
"""Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir.
Note that, due to Bazel's hermetic builds, lit only sees the tests that
are included in the `data` parameter, regardless of what other tests might
exist in the directory searched.
Args:
name: str, the name of the test, including extension.
data: [str], the data input to the test.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
name_without_suffix = test_file[0].split('.')[0]
local_test_files = name + ".test_files"
filegroup(
name = local_test_files,
srcs = native.glob([
"data/" + name_without_suffix + "*.mlir",
]),
)
tf_cc_test(
name = name,
srcs = test_file,
size = size,
deps = [
"//tensorflow/compiler/mlir/disc/tests:mlir_feature_test",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
],
data = [":" + local_test_files] + data + [
"//tensorflow/compiler/mlir/disc:disc_compiler_main",
"//tensorflow/compiler/mlir:tf-mlir-translate",
"//tensorflow/compiler/mlir:tf-opt",
],
)
def glob_op_tests(
exclude = [],
test_file_exts = _default_test_file_exts,
default_size = _default_size,
size_override = {},
data = [],
per_test_extra_data = {},
default_tags = _default_tags,
tags_override = {},
driver = _default_driver,
features = [],
exec_properties = {}):
"""Creates all plausible Lit tests (and their inputs) under this directory.
Args:
exclude: [str], paths to exclude (for tests and inputs).
test_file_exts: [str], extensions for files that are tests.
default_size: str, the test size for targets not in "size_override".
size_override: {str: str}, sizes to use for specific tests.
data: [str], additional input data to the test.
per_test_extra_data: {str: [str]}, extra data to attach to a given file.
default_tags: [str], additional tags to attach to the test.
tags_override: {str: str}, tags to add to specific tests.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
exec_properties: a dictionary of properties to pass on.
"""
# Ignore some patterns by default for tests and input data.
exclude = _ALWAYS_EXCLUDE + exclude
tests = native.glob(
["*." + ext for ext in test_file_exts],
exclude = exclude,
)
# Run tests individually such that errors can be attributed to a specific
# failure.
for i in range(len(tests)):
curr_test = tests[i]
# Instantiate this test with updated parameters.
lit_test(
name = curr_test,
data = data + per_test_extra_data.get(curr_test, []),
size = size_override.get(curr_test, default_size),
tags = default_tags + tags_override.get(curr_test, []),
driver = driver,
features = features,
exec_properties = exec_properties,
)
def lit_test(
name,
data = [],
size = _default_size,
tags = _default_tags,
driver = _default_driver,
features = [],
exec_properties = {}):
"""Runs test files under lit.
Args:
name: str, the name of the test.
data: [str], labels that should be provided as data inputs.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
_run_lit_test(name + ".test", [name], data, size, tags, driver, features, exec_properties)
|
src/transformers/modeling_tf_pytorch_utils.py | ari-holtzman/transformers | 5,129 | 2182 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
""" Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch # noqa: F401
import tensorflow as tf # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
""" Load TF 2.0 model in a pytorch model
"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
""" Load TF2.0 symbolic weights in a PyTorch model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__, unexpected_keys)
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
|
examples/text_classification/yelp_reviews_polarity/train.py | liorshk/simpletransformers | 3,151 | 2186 | <reponame>liorshk/simpletransformers<gh_stars>1000+
import sys
import pandas as pd
from simpletransformers.classification import ClassificationModel
prefix = "data/"
train_df = pd.read_csv(prefix + "train.csv", header=None)
train_df.head()
eval_df = pd.read_csv(prefix + "test.csv", header=None)
eval_df.head()
train_df[0] = (train_df[0] == 2).astype(int)
eval_df[0] = (eval_df[0] == 2).astype(int)
train_df = pd.DataFrame(
{"text": train_df[1].replace(r"\n", " ", regex=True), "labels": train_df[0]}
)
print(train_df.head())
eval_df = pd.DataFrame(
{"text": eval_df[1].replace(r"\n", " ", regex=True), "labels": eval_df[0]}
)
print(eval_df.head())
model_type = sys.argv[1]
if model_type == "bert":
model_name = "bert-base-cased"
elif model_type == "roberta":
model_name = "roberta-base"
elif model_type == "distilbert":
model_name = "distilbert-base-cased"
elif model_type == "distilroberta":
model_type = "roberta"
model_name = "distilroberta-base"
elif model_type == "electra-base":
model_type = "electra"
model_name = "google/electra-base-discriminator"
elif model_type == "electra-small":
model_type = "electra"
model_name = "google/electra-small-discriminator"
elif model_type == "xlnet":
model_name = "xlnet-base-cased"
train_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"use_cached_eval_features": True,
"output_dir": f"outputs/{model_type}",
"best_model_dir": f"outputs/{model_type}/best_model",
"evaluate_during_training": True,
"max_seq_length": 128,
"num_train_epochs": 3,
"evaluate_during_training_steps": 1000,
"wandb_project": "Classification Model Comparison",
"wandb_kwargs": {"name": model_name},
"save_model_every_epoch": False,
"save_eval_checkpoints": False,
# "use_early_stopping": True,
# "early_stopping_metric": "mcc",
# "n_gpu": 2,
# "manual_seed": 4,
# "use_multiprocessing": False,
"train_batch_size": 128,
"eval_batch_size": 64,
# "config": {
# "output_hidden_states": True
# }
}
if model_type == "xlnet":
train_args["train_batch_size"] = 64
train_args["gradient_accumulation_steps"] = 2
# Create a ClassificationModel
model = ClassificationModel(model_type, model_name, args=train_args)
# Train the model
model.train_model(train_df, eval_df=eval_df)
# # # Evaluate the model
# result, model_outputs, wrong_predictions = model.eval_model(eval_df)
|
gluoncv/data/transforms/block.py | Kh4L/gluon-cv | 5,447 | 2192 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
# pylint: disable= missing-docstring
"Addtional image transforms."
import random
import math
import numpy as np
from mxnet import image, nd
from mxnet.gluon import Block
__all__ = ['RandomCrop', 'RandomErasing']
class RandomCrop(Block):
"""Randomly crop `src` with `size` (width, height).
Padding is optional.
Upsample result if `src` is smaller than `size`.
Parameters
----------
size : int or tuple of (W, H)
Size of the final output.
pad: int or tuple
if int, size of the zero-padding
if tuple, number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all axes.
interpolation : int
Interpolation method for resizing. By default uses bilinear
interpolation. See OpenCV's resize function for available choices.
Inputs:
- **data**: input tensor with (Hi x Wi x C) shape.
Outputs:
- **out**: output tensor with (size[0] x size[1] x C) or (size x size x C) shape.
"""
def __init__(self, size, pad=None, interpolation=2):
super(RandomCrop, self).__init__()
numeric_types = (float, int, np.generic)
if isinstance(size, numeric_types):
size = (size, size)
self._args = (size, interpolation)
self.pad = ((pad, pad), (pad, pad), (0, 0)) if isinstance(pad, int) else pad
def forward(self, x):
if self.pad:
return image.random_crop(nd.array(
np.pad(x.asnumpy(), self.pad, mode='constant', constant_values=0)), *self._args)[0]
else:
return image.random_crop(x, *self._args)[0]
class RandomErasing(Block):
"""Randomly erasing the area in `src` between `s_min` and `s_max` with `probability`.
`ratio` controls the ratio between width and height.
`mean` means the value in erasing area.
Parameters
----------
probability : float
Probability of erasing.
s_min : float
Min area to all area.
s_max : float
Max area to all area.
ratio : float
The ratio between width and height.
mean : int or tuple of (R, G, B)
The value in erasing area.
Inputs:
- **data**: input tensor with (Hi x Wi x C) shape.
Outputs:
- **out**: output tensor with (Hi x Wi x C) shape.
"""
def __init__(self, probability=0.5, s_min=0.02, s_max=0.4, ratio=0.3,
mean=(125.31, 122.96, 113.86)):
super(RandomErasing, self).__init__()
self.probability = probability
self.mean = mean
self.s_min = s_min
self.s_max = s_max
self.ratio = ratio
def forward(self, x):
if not isinstance(self.probability, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.s_min, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.s_max, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.ratio, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.mean, (int, tuple)):
raise TypeError('Got inappropriate size arg')
if random.uniform(0, 1) > self.probability:
return x
width, height, _ = x.shape
area = width * height
target_area = random.uniform(self.s_min, self.s_max) * area
aspect_ratio = random.uniform(self.ratio, 1/self.ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w < width and h < height:
x1 = random.randint(0, width - w)
y1 = random.randint(0, height - h)
x[x1:x1+w, y1:y1+h, 0] = self.mean[0]
x[x1:x1+w, y1:y1+h, 1] = self.mean[1]
x[x1:x1+w, y1:y1+h, 2] = self.mean[2]
return x
|
moe/bandit/ucb/ucb_interface.py | dstoeckel/MOE | 966 | 2214 | # -*- coding: utf-8 -*-
"""Classes (Python) to compute the Bandit UCB (Upper Confidence Bound) arm allocation and choosing the arm to pull next.
See :mod:`moe.bandit.bandit_interface` for further details on bandit.
"""
import copy
from abc import abstractmethod
from moe.bandit.bandit_interface import BanditInterface
from moe.bandit.utils import get_winning_arm_names_from_payoff_arm_name_list, get_equal_arm_allocations
class UCBInterface(BanditInterface):
r"""Implementation of the constructor of UCB (Upper Confidence Bound) and method allocate_arms. The method get_ucb_payoff is implemented in subclass.
A class to encapsulate the computation of bandit UCB.
The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf
To inherit this class, a subclass needs to implement get_ucb_payoff
(see :func:`moe.bandit.ucb.ucb1.UCB1.get_ucb_payoff` for an example), everything else is already implemented.
See :mod:`moe.bandit.bandit_interface` docs for further details.
"""
def __init__(
self,
historical_info,
subtype=None,
):
"""Construct a UCB object.
:param historical_info: a dictionary of arms sampled
:type historical_info: dictionary of (str, SampleArm()) pairs (see :class:`moe.bandit.data_containers.SampleArm` for more details)
:param subtype: subtype of the UCB bandit algorithm (default: None)
:type subtype: str
"""
self._historical_info = copy.deepcopy(historical_info)
self._subtype = subtype
@staticmethod
def get_unsampled_arm_names(arms_sampled):
r"""Compute the set of unsampled arm names based on the given ``arms_sampled``..
Throws an exception when arms_sampled is empty.
:param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm`
:type arms_sampled: dictionary of (str, SampleArm()) pairs
:return: set of names of the unsampled arms
:rtype: frozenset(str)
:raise: ValueError when ``arms_sampled`` are empty.
"""
if not arms_sampled:
raise ValueError('arms_sampled is empty!')
unsampled_arm_name_list = [name for name, sampled_arm in arms_sampled.iteritems() if sampled_arm.total == 0]
return frozenset(unsampled_arm_name_list)
@abstractmethod
def get_ucb_payoff(self, sampled_arm, number_sampled):
r"""Compute the expected upper confidence bound payoff using the UCB subtype formula.
See definition in subclasses for details.
:param sampled_arm: a sampled arm
:type sampled_arm: :class:`moe.bandit.data_containers.SampleArm`
:param number_sampled: the overall number of pulls so far
:type number_sampled: int
:return: ucb payoff
:rtype: float64
:raise: ValueError when ``sampled_arm`` is empty.
"""
pass
def allocate_arms(self):
r"""Compute the allocation to each arm given ``historical_info``, running bandit ``subtype`` endpoint.
Computes the allocation to each arm based on the given subtype, and, historical info.
Works with k-armed bandits (k >= 1).
The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf
If there is at least one unsampled arm, this method will choose to pull the unsampled arm
(randomly choose an unsampled arm if there are multiple unsampled arms).
If all arms are pulled at least once, this method will pull the optimal arm
(best expected upper confidence bound payoff).
See :func:`moe.bandit.ucb.ucb_interface.UCBInterface.get_ucb_payoff` for details on how to compute the expected upper confidence bound payoff (expected UCB payoff)
In case of a tie, the method will split the allocation among the optimal arms.
For example, if we have three arms (arm1, arm2, and arm3) with expected UCB payoff 0.5, 0.5, and 0.1 respectively.
We split the allocation between the optimal arms arm1 and arm2.
``{arm1: 0.5, arm2: 0.5, arm3: 0.0}``
:return: the dictionary of (arm, allocation) key-value pairs
:rtype: a dictionary of (str, float64) pairs
:raise: ValueError when ``sample_arms`` are empty.
"""
arms_sampled = self._historical_info.arms_sampled
if not arms_sampled:
raise ValueError('sample_arms are empty!')
return get_equal_arm_allocations(arms_sampled, self.get_winning_arm_names(arms_sampled))
def get_winning_arm_names(self, arms_sampled):
r"""Compute the set of winning arm names based on the given ``arms_sampled``..
Throws an exception when arms_sampled is empty.
:param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm`
:type arms_sampled: dictionary of (str, SampleArm()) pairs
:return: set of names of the winning arms
:rtype: frozenset(str)
:raise: ValueError when ``arms_sampled`` are empty.
"""
if not arms_sampled:
raise ValueError('arms_sampled is empty!')
# If there exists an unsampled arm, return the names of the unsampled arms
unsampled_arm_names = self.get_unsampled_arm_names(arms_sampled)
if unsampled_arm_names:
return unsampled_arm_names
number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()])
ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()]
return get_winning_arm_names_from_payoff_arm_name_list(ucb_payoff_arm_name_list)
|
forms/snippets/delete_watch.py | soheilv/python-samples | 255 | 2232 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START forms_delete_watch]
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import client, file, tools
SCOPES = "https://www.googleapis.com/auth/drive"
API_KEY = "<YOUR_API_KEY>"
DISCOVERY_DOC = f"https://forms.googleapis.com/$discovery/rest?version=v1beta&key={API_KEY}&labels=FORMS_BETA_TESTERS"
store = file.Storage('credentials.json')
creds = None
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = discovery.build('forms', 'v1beta', http=creds.authorize(
Http()), discoveryServiceUrl=DISCOVERY_DOC, static_discovery=False)
form_id = '<YOUR_FORM_ID>'
watch_id = '<YOUR_WATCH_ID>'
# Print JSON response after deleting a form watch
result = service.forms().watches().delete(formId=form_id, watchId=watch_id).execute()
print(result)
# [END forms_delete_watch]
|
corehq/apps/dump_reload/tests/test_sql_dump_load.py | andyasne/commcare-hq | 471 | 2252 | <gh_stars>100-1000
import inspect
import json
import uuid
from collections import Counter
from datetime import datetime
from io import StringIO
import mock
from django.contrib.admin.utils import NestedObjects
from django.db import transaction, IntegrityError
from django.db.models.signals import post_delete, post_save
from django.test import SimpleTestCase, TestCase
from nose.tools import nottest
from casexml.apps.case.mock import CaseFactory, CaseIndex, CaseStructure
from corehq.apps.commtrack.helpers import make_product
from corehq.apps.commtrack.tests.util import get_single_balance_block
from corehq.apps.domain.models import Domain
from corehq.apps.dump_reload.sql import SqlDataDumper, SqlDataLoader
from corehq.apps.dump_reload.sql.dump import (
get_model_iterator_builders_to_dump,
get_objects_to_dump,
)
from corehq.apps.dump_reload.sql.load import (
DefaultDictWithKey,
constraint_checks_deferred,
)
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.products.models import SQLProduct
from corehq.apps.zapier.consts import EventTypes
from corehq.apps.zapier.models import ZapierSubscription
from corehq.apps.zapier.signals.receivers import (
zapier_subscription_post_delete,
)
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.form_processor.models import (
CaseTransaction,
CommCareCaseIndexSQL,
CommCareCaseSQL,
LedgerTransaction,
LedgerValue,
XFormInstanceSQL,
)
from corehq.form_processor.tests.utils import (
FormProcessorTestUtils,
create_form_for_test,
sharded,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
)
class BaseDumpLoadTest(TestCase):
@classmethod
def setUpClass(cls):
post_delete.disconnect(zapier_subscription_post_delete, sender=ZapierSubscription)
super(BaseDumpLoadTest, cls).setUpClass()
cls.domain_name = uuid.uuid4().hex
cls.domain = Domain(name=cls.domain_name)
cls.domain.save()
cls.default_objects_counts = Counter({})
@classmethod
def tearDownClass(cls):
cls.domain.delete()
super(BaseDumpLoadTest, cls).tearDownClass()
post_delete.connect(zapier_subscription_post_delete, sender=ZapierSubscription)
def delete_sql_data(self):
delete_domain_sql_data_for_dump_load_test(self.domain_name)
def tearDown(self):
self.delete_sql_data()
super(BaseDumpLoadTest, self).tearDown()
def _dump_and_load(self, expected_dump_counts, load_filter=None, expected_load_counts=None, dumper_fn=None):
expected_load_counts = expected_load_counts or expected_dump_counts
expected_dump_counts.update(self.default_objects_counts)
models = list(expected_dump_counts)
self._check_signals_handle_raw(models)
output_stream = StringIO()
if dumper_fn:
dumper_fn(output_stream)
else:
SqlDataDumper(self.domain_name, [], []).dump(output_stream)
self.delete_sql_data()
# make sure that there's no data left in the DB
objects_remaining = list(get_objects_to_dump(self.domain_name, [], []))
object_classes = [obj.__class__.__name__ for obj in objects_remaining]
counts = Counter(object_classes)
self.assertEqual([], objects_remaining, 'Not all data deleted: {}'.format(counts))
# Dump
actual_model_counts, dump_lines = self._parse_dump_output(output_stream)
expected_model_counts = _normalize_object_counter(expected_dump_counts)
self.assertDictEqual(dict(expected_model_counts), dict(actual_model_counts))
# Load
loader = SqlDataLoader(object_filter=load_filter)
loaded_model_counts = loader.load_objects(dump_lines)
normalized_expected_loaded_counts = _normalize_object_counter(expected_load_counts, for_loaded=True)
self.assertDictEqual(dict(normalized_expected_loaded_counts), dict(loaded_model_counts))
self.assertEqual(sum(expected_load_counts.values()), sum(loaded_model_counts.values()))
return dump_lines
def _parse_dump_output(self, output_stream):
dump_output = output_stream.getvalue().split('\n')
dump_lines = [line.strip() for line in dump_output if line.strip()]
actual_model_counts = Counter([json.loads(line)['model'] for line in dump_lines])
return actual_model_counts, dump_lines
def _check_signals_handle_raw(self, models):
"""Ensure that any post_save signal handlers have been updated
to handle 'raw' calls."""
whitelist_receivers = [
'django_digest.models._post_save_persist_partial_digests'
]
for model in models:
for receiver in post_save._live_receivers(model):
receiver_path = receiver.__module__ + '.' + receiver.__name__
if receiver_path in whitelist_receivers:
continue
args = inspect.getargspec(receiver).args
message = 'Signal handler "{}" for model "{}" missing raw arg'.format(
receiver, model
)
self.assertIn('raw', args, message)
@nottest
def delete_domain_sql_data_for_dump_load_test(domain_name):
for model_class, builder in get_model_iterator_builders_to_dump(domain_name, [], []):
for iterator in builder.querysets():
with transaction.atomic(using=iterator.db), \
constraint_checks_deferred(iterator.db):
collector = NestedObjects(using=iterator.db)
collector.collect(iterator)
collector.delete()
assert [] == list(get_objects_to_dump(domain_name, [], [])), "Not all SQL objects deleted"
@sharded
class TestSQLDumpLoadShardedModels(BaseDumpLoadTest):
maxDiff = None
@classmethod
def setUpClass(cls):
super(TestSQLDumpLoadShardedModels, cls).setUpClass()
cls.factory = CaseFactory(domain=cls.domain_name)
cls.form_accessors = FormAccessors(cls.domain_name)
cls.case_accessors = CaseAccessors(cls.domain_name)
cls.product = make_product(cls.domain_name, 'A Product', 'prodcode_a')
cls.default_objects_counts.update({SQLProduct: 1})
@classmethod
def tearDownClass(cls):
FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain_name)
super(TestSQLDumpLoadShardedModels, cls).tearDownClass()
def test_dump_load_form(self):
expected_object_counts = Counter({
XFormInstanceSQL: 2,
BlobMeta: 2
})
pre_forms = [
create_form_for_test(self.domain_name),
create_form_for_test(self.domain_name)
]
self._dump_and_load(expected_object_counts)
form_ids = self.form_accessors.get_all_form_ids_in_domain('XFormInstance')
self.assertEqual(set(form_ids), set(form.form_id for form in pre_forms))
for pre_form in pre_forms:
post_form = self.form_accessors.get_form(pre_form.form_id)
self.assertDictEqual(pre_form.to_json(), post_form.to_json())
def test_sql_dump_load_case(self):
expected_object_counts = Counter({
XFormInstanceSQL: 2,
BlobMeta: 2,
CommCareCaseSQL: 2,
CaseTransaction: 3,
CommCareCaseIndexSQL: 1
})
pre_cases = self.factory.create_or_update_case(
CaseStructure(
attrs={'case_name': 'child', 'update': {'age': 3, 'diabetic': False}, 'create': True},
indices=[
CaseIndex(CaseStructure(attrs={'case_name': 'parent', 'update': {'age': 42}, 'create': True})),
]
)
)
pre_cases[0] = self.factory.create_or_update_case(CaseStructure(
case_id=pre_cases[0].case_id,
attrs={'external_id': 'billie jean', 'update': {'name': '<NAME>'}}
))[0]
self._dump_and_load(expected_object_counts)
case_ids = self.case_accessors.get_case_ids_in_domain()
self.assertEqual(set(case_ids), set(case.case_id for case in pre_cases))
for pre_case in pre_cases:
post_case = self.case_accessors.get_case(pre_case.case_id)
self.assertDictEqual(pre_case.to_json(), post_case.to_json())
def test_ledgers(self):
expected_object_counts = Counter({
XFormInstanceSQL: 3,
BlobMeta: 3,
CommCareCaseSQL: 1,
CaseTransaction: 3,
LedgerValue: 1,
LedgerTransaction: 2
})
case = self.factory.create_case()
submit_case_blocks([
get_single_balance_block(case.case_id, self.product._id, 10)
], self.domain_name)
submit_case_blocks([
get_single_balance_block(case.case_id, self.product._id, 5)
], self.domain_name)
pre_ledger_values = LedgerAccessorSQL.get_ledger_values_for_case(case.case_id)
pre_ledger_transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(case.case_id)
self.assertEqual(1, len(pre_ledger_values))
self.assertEqual(2, len(pre_ledger_transactions))
self._dump_and_load(expected_object_counts)
post_ledger_values = LedgerAccessorSQL.get_ledger_values_for_case(case.case_id)
post_ledger_transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(case.case_id)
self.assertEqual(1, len(post_ledger_values))
self.assertEqual(2, len(post_ledger_transactions))
self.assertEqual(pre_ledger_values[0].ledger_reference, post_ledger_values[0].ledger_reference)
self.assertDictEqual(pre_ledger_values[0].to_json(), post_ledger_values[0].to_json())
pre_ledger_transactions = sorted(pre_ledger_transactions, key=lambda t: t.pk)
post_ledger_transactions = sorted(post_ledger_transactions, key=lambda t: t.pk)
for pre, post in zip(pre_ledger_transactions, post_ledger_transactions):
self.assertEqual(str(pre), str(post))
class TestSQLDumpLoad(BaseDumpLoadTest):
def test_case_search_config(self):
from corehq.apps.case_search.models import CaseSearchConfig, FuzzyProperties
expected_object_counts = Counter({
CaseSearchConfig: 1,
FuzzyProperties: 2,
})
pre_config, created = CaseSearchConfig.objects.get_or_create(pk=self.domain_name)
pre_config.enabled = True
pre_fuzzies = [
FuzzyProperties(domain=self.domain, case_type='dog', properties=['breed', 'color']),
FuzzyProperties(domain=self.domain, case_type='owner', properties=['name']),
]
for fuzzy in pre_fuzzies:
fuzzy.save()
pre_config.fuzzy_properties.set(pre_fuzzies)
pre_config.save()
self._dump_and_load(expected_object_counts)
post_config = CaseSearchConfig.objects.get(domain=self.domain_name)
self.assertTrue(post_config.enabled)
self.assertEqual(pre_config.fuzzy_properties, post_config.fuzzy_properties)
post_fuzzies = FuzzyProperties.objects.filter(domain=self.domain_name)
self.assertEqual(set(f.case_type for f in post_fuzzies), {'dog', 'owner'})
def test_users(self):
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.models import WebUser
from django.contrib.auth.models import User
expected_object_counts = Counter({User: 3})
ccuser_1 = CommCareUser.create(
domain=self.domain_name,
username='user_1',
password='<PASSWORD>',
created_by=None,
created_via=None,
email='<EMAIL>',
)
ccuser_2 = CommCareUser.create(
domain=self.domain_name,
username='user_2',
password='<PASSWORD>',
created_by=None,
created_via=None,
email='<EMAIL>',
)
web_user = WebUser.create(
domain=self.domain_name,
username='webuser_t1',
password='<PASSWORD>',
created_by=None,
created_via=None,
email='<EMAIL>',
)
self.addCleanup(ccuser_1.delete, self.domain_name, deleted_by=None)
self.addCleanup(ccuser_2.delete, self.domain_name, deleted_by=None)
self.addCleanup(web_user.delete, self.domain_name, deleted_by=None)
self._dump_and_load(expected_object_counts)
def test_dump_roles(self):
from corehq.apps.users.models import UserRole, Permissions, RoleAssignableBy, RolePermission
expected_object_counts = Counter({
UserRole: 2,
RolePermission: 11,
RoleAssignableBy: 1
})
role1 = UserRole.create(self.domain_name, 'role1')
role2 = UserRole.create(
self.domain_name, 'role1',
permissions=Permissions(edit_web_users=True),
assignable_by=[role1.id]
)
self.addCleanup(role1.delete)
self.addCleanup(role2.delete)
self._dump_and_load(expected_object_counts)
role1_loaded = UserRole.objects.get(id=role1.id)
role2_loaded = UserRole.objects.get(id=role2.id)
self.assertEqual(role1_loaded.permissions.to_list(), Permissions().to_list())
self.assertEqual(role1_loaded.assignable_by, [])
self.assertEqual(role2_loaded.permissions.to_list(), Permissions(edit_web_users=True).to_list())
self.assertEqual(role2_loaded.assignable_by, [role1_loaded.get_id])
def test_device_logs(self):
from corehq.apps.receiverwrapper.util import submit_form_locally
from phonelog.models import DeviceReportEntry, ForceCloseEntry, UserEntry, UserErrorEntry
from corehq.apps.users.models import CommCareUser
from django.contrib.auth.models import User
expected_object_counts = Counter({
User: 1,
DeviceReportEntry: 7,
UserEntry: 1,
UserErrorEntry: 2,
ForceCloseEntry: 1
})
user = CommCareUser.create(
domain=self.domain_name,
username='user_1',
password='<PASSWORD>',
created_by=None,
created_via=None,
email='<EMAIL>',
uuid='428d454aa9abc74e1964e16d3565d6b6' # match ID in devicelog.xml
)
self.addCleanup(user.delete, self.domain_name, deleted_by=None)
with open('corehq/ex-submodules/couchforms/tests/data/devicelogs/devicelog.xml', 'rb') as f:
xml = f.read()
submit_form_locally(xml, self.domain_name)
self._dump_and_load(expected_object_counts)
def test_demo_user_restore(self):
from corehq.apps.users.models import CommCareUser
from corehq.apps.ota.models import DemoUserRestore
from django.contrib.auth.models import User
expected_object_counts = Counter({
User: 1,
DemoUserRestore: 1
})
user_id = uuid.uuid4().hex
user = CommCareUser.create(
domain=self.domain_name,
username='user_1',
password='<PASSWORD>',
created_by=None,
created_via=None,
email='<EMAIL>',
uuid=user_id
)
self.addCleanup(user.delete, self.domain_name, deleted_by=None)
DemoUserRestore(
demo_user_id=user_id,
restore_blob_id=uuid.uuid4().hex,
content_length=1027,
restore_comment="Test migrate demo user restore"
).save()
self._dump_and_load(expected_object_counts)
def test_products(self):
from corehq.apps.products.models import SQLProduct
expected_object_counts = Counter({SQLProduct: 3})
p1 = SQLProduct.objects.create(domain=self.domain_name, product_id='test1', name='test1')
p2 = SQLProduct.objects.create(domain=self.domain_name, product_id='test2', name='test2')
parchived = SQLProduct.objects.create(domain=self.domain_name, product_id='test3', name='test3', is_archived=True)
self._dump_and_load(expected_object_counts)
self.assertEqual(2, SQLProduct.active_objects.filter(domain=self.domain_name).count())
all_active = SQLProduct.active_objects.filter(domain=self.domain_name).all()
self.assertTrue(p1 in all_active)
self.assertTrue(p2 in all_active)
self.assertTrue(parchived not in all_active)
def test_location_type(self):
from corehq.apps.locations.models import LocationType
from corehq.apps.locations.tests.test_location_types import make_loc_type
expected_object_counts = Counter({LocationType: 7})
state = make_loc_type('state', domain=self.domain_name)
district = make_loc_type('district', state, domain=self.domain_name)
section = make_loc_type('section', district, domain=self.domain_name)
block = make_loc_type('block', district, domain=self.domain_name)
center = make_loc_type('center', block, domain=self.domain_name)
county = make_loc_type('county', state, domain=self.domain_name)
city = make_loc_type('city', county, domain=self.domain_name)
self._dump_and_load(expected_object_counts)
hierarchy = LocationType.objects.full_hierarchy(self.domain_name)
desired_hierarchy = {
state.id: (
state,
{
district.id: (
district,
{
section.id: (section, {}),
block.id: (block, {
center.id: (center, {}),
}),
},
),
county.id: (
county,
{city.id: (city, {})},
),
},
),
}
self.assertEqual(hierarchy, desired_hierarchy)
def test_location(self):
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.locations.tests.util import setup_locations_and_types
expected_object_counts = Counter({LocationType: 3, SQLLocation: 11})
location_type_names = ['province', 'district', 'city']
location_structure = [
('Western Cape', [
('Cape Winelands', [
('Stellenbosch', []),
('Paarl', []),
]),
('Cape Town', [
('Cape Town City', []),
])
]),
('Gauteng', [
('Ekurhuleni ', [
('Alberton', []),
('Benoni', []),
('Springs', []),
]),
]),
]
location_types, locations = setup_locations_and_types(
self.domain_name,
location_type_names,
[],
location_structure,
)
self._dump_and_load(expected_object_counts)
names = ['Cape Winelands', 'Paarl', 'Cape Town']
location_ids = [locations[name].location_id for name in names]
result = SQLLocation.objects.get_locations_and_children(location_ids)
self.assertItemsEqual(
[loc.name for loc in result],
['Cape Winelands', 'Stellenbosch', 'Paarl', 'Cape Town', 'Cape Town City']
)
result = SQLLocation.objects.get_locations_and_children([locations['Gauteng'].location_id])
self.assertItemsEqual(
[loc.name for loc in result],
['Gauteng', 'Ekurhuleni ', 'Alberton', 'Benoni', 'Springs']
)
def test_sms(self):
from corehq.apps.sms.models import PhoneNumber, MessagingEvent, MessagingSubEvent
expected_object_counts = Counter({PhoneNumber: 1, MessagingEvent: 1, MessagingSubEvent: 1})
phone_number = PhoneNumber(
domain=self.domain_name,
owner_doc_type='CommCareCase',
owner_id='fake-owner-id1',
phone_number='99912341234',
backend_id=None,
ivr_backend_id=None,
verified=True,
is_two_way=True,
pending_verification=False,
contact_last_modified=datetime.utcnow()
)
phone_number.save()
event = MessagingEvent.objects.create(
domain=self.domain_name,
date=datetime.utcnow(),
source=MessagingEvent.SOURCE_REMINDER,
content_type=MessagingEvent.CONTENT_SMS,
status=MessagingEvent.STATUS_COMPLETED
)
MessagingSubEvent.objects.create(
parent=event,
date=datetime.utcnow(),
recipient_type=MessagingEvent.RECIPIENT_CASE,
content_type=MessagingEvent.CONTENT_SMS,
status=MessagingEvent.STATUS_COMPLETED
)
self._dump_and_load(expected_object_counts)
def test_message_scheduling(self):
AlertScheduleInstance(
schedule_instance_id=uuid.uuid4(),
domain=self.domain_name,
recipient_type='CommCareUser',
recipient_id=uuid.uuid4().hex,
current_event_num=0,
schedule_iteration_num=1,
next_event_due=datetime(2017, 3, 1),
active=True,
alert_schedule_id=uuid.uuid4(),
).save()
self._dump_and_load({AlertScheduleInstance: 1})
def test_mobile_backend(self):
from corehq.apps.sms.models import (
SQLMobileBackend,
SQLMobileBackendMapping,
)
domain_backend = SQLMobileBackend.objects.create(
domain=self.domain_name,
name='test-domain-mobile-backend',
display_name='Test Domain Mobile Backend',
hq_api_id='TDMB',
inbound_api_key='test-domain-mobile-backend-inbound-api-key',
supported_countries=["*"],
backend_type=SQLMobileBackend.SMS,
is_global=False,
)
SQLMobileBackendMapping.objects.create(
domain=self.domain_name,
backend=domain_backend,
backend_type=SQLMobileBackend.SMS,
prefix='123',
)
global_backend = SQLMobileBackend.objects.create(
domain=None,
name='test-global-mobile-backend',
display_name='Test Global Mobile Backend',
hq_api_id='TGMB',
inbound_api_key='test-global-mobile-backend-inbound-api-key',
supported_countries=["*"],
backend_type=SQLMobileBackend.SMS,
is_global=True,
)
SQLMobileBackendMapping.objects.create(
domain=self.domain_name,
backend=global_backend,
backend_type=SQLMobileBackend.SMS,
prefix='*',
)
self._dump_and_load({
SQLMobileBackendMapping: 1,
SQLMobileBackend: 1,
})
self.assertEqual(SQLMobileBackend.objects.first().domain,
self.domain_name)
self.assertEqual(SQLMobileBackendMapping.objects.first().domain,
self.domain_name)
def test_case_importer(self):
from corehq.apps.case_importer.tracking.models import (
CaseUploadFileMeta,
CaseUploadFormRecord,
CaseUploadRecord,
)
upload_file_meta = CaseUploadFileMeta.objects.create(
identifier=uuid.uuid4().hex,
filename='picture.jpg',
length=1024,
)
case_upload_record = CaseUploadRecord.objects.create(
domain=self.domain_name,
upload_id=uuid.uuid4(),
task_id=uuid.uuid4(),
couch_user_id=uuid.uuid4().hex,
case_type='person',
upload_file_meta=upload_file_meta,
)
CaseUploadFormRecord.objects.create(
case_upload_record=case_upload_record,
form_id=uuid.uuid4().hex,
)
self._dump_and_load(Counter({
CaseUploadFileMeta: 1,
CaseUploadRecord: 1,
CaseUploadFormRecord: 1,
}))
def test_transifex(self):
from corehq.apps.translations.models import TransifexProject, TransifexOrganization
org = TransifexOrganization.objects.create(slug='test', name='demo', api_token='<PASSWORD>')
TransifexProject.objects.create(
organization=org, slug='testp', name='demop', domain=self.domain_name
)
TransifexProject.objects.create(
organization=org, slug='testp1', name='demop1', domain=self.domain_name
)
self._dump_and_load(Counter({TransifexOrganization: 1, TransifexProject: 2}))
def test_filtered_dump_load(self):
from corehq.apps.locations.tests.test_location_types import make_loc_type
from corehq.apps.products.models import SQLProduct
from corehq.apps.locations.models import LocationType
make_loc_type('state', domain=self.domain_name)
SQLProduct.objects.create(domain=self.domain_name, product_id='test1', name='test1')
expected_object_counts = Counter({LocationType: 1, SQLProduct: 1})
self._dump_and_load(expected_object_counts, load_filter='sqlproduct', expected_load_counts=Counter({SQLProduct: 1}))
self.assertEqual(0, LocationType.objects.count())
def test_sms_content(self):
from corehq.messaging.scheduling.models import AlertSchedule, SMSContent, AlertEvent
from corehq.messaging.scheduling.scheduling_partitioned.dbaccessors import \
delete_alert_schedule_instances_for_schedule
schedule = AlertSchedule.create_simple_alert(self.domain, SMSContent())
schedule.set_custom_alert(
[
(AlertEvent(minutes_to_wait=5), SMSContent()),
(AlertEvent(minutes_to_wait=15), SMSContent()),
]
)
self.addCleanup(lambda: delete_alert_schedule_instances_for_schedule(AlertScheduleInstance, schedule.schedule_id))
self._dump_and_load(Counter({AlertSchedule: 1, AlertEvent: 2, SMSContent: 2}))
def test_zapier_subscription(self):
ZapierSubscription.objects.create(
domain=self.domain_name,
case_type='case_type',
event_name=EventTypes.NEW_CASE,
url='example.com',
user_id='user_id',
)
self._dump_and_load(Counter({ZapierSubscription: 1}))
@mock.patch("corehq.apps.dump_reload.sql.load.ENQUEUE_TIMEOUT", 1)
class TestSqlLoadWithError(BaseDumpLoadTest):
def setUp(self):
self.products = [
SQLProduct.objects.create(domain=self.domain_name, product_id='test1', name='test1'),
SQLProduct.objects.create(domain=self.domain_name, product_id='test2', name='test2'),
SQLProduct.objects.create(domain=self.domain_name, product_id='test3', name='test3'),
]
def test_load_error_queue_full(self):
"""Blocks when sending 'test3'"""
self._load_with_errors(chunk_size=1)
def test_load_error_queue_full_on_terminate(self):
"""Blocks when sending ``None`` into the queue to 'terminate' it."""
self._load_with_errors(chunk_size=2)
def _load_with_errors(self, chunk_size):
output_stream = StringIO()
SqlDataDumper(self.domain_name, [], []).dump(output_stream)
self.delete_sql_data()
# resave the product to force an error
self.products[0].save()
actual_model_counts, dump_lines = self._parse_dump_output(output_stream)
self.assertEqual(actual_model_counts['products.sqlproduct'], 3)
loader = SqlDataLoader()
with self.assertRaises(IntegrityError),\
mock.patch("corehq.apps.dump_reload.sql.load.CHUNK_SIZE", chunk_size):
# patch the chunk size so that the queue blocks
loader.load_objects(dump_lines)
class DefaultDictWithKeyTests(SimpleTestCase):
def test_intended_use_case(self):
def enlist(item):
return [item]
greasy_spoon = DefaultDictWithKey(enlist)
self.assertEqual(greasy_spoon['spam'], ['spam'])
greasy_spoon['spam'].append('spam')
self.assertEqual(greasy_spoon['spam'], ['spam', 'spam'])
def test_not_enough_params(self):
def empty_list():
return []
greasy_spoon = DefaultDictWithKey(empty_list)
with self.assertRaisesRegex(
TypeError,
r'empty_list\(\) takes 0 positional arguments but 1 was given'
):
greasy_spoon['spam']
def test_too_many_params(self):
def appender(item1, item2):
return [item1, item2]
greasy_spoon = DefaultDictWithKey(appender)
with self.assertRaisesRegex(
TypeError,
r"appender\(\) missing 1 required positional argument: 'item2'"
):
greasy_spoon['spam']
def test_no_factory(self):
greasy_spoon = DefaultDictWithKey()
with self.assertRaisesRegex(
TypeError,
"'NoneType' object is not callable"
):
greasy_spoon['spam']
def _normalize_object_counter(counter, for_loaded=False):
"""Converts a <Model Class> keyed counter to an model label keyed counter"""
def _model_class_to_label(model_class):
label = '{}.{}'.format(model_class._meta.app_label, model_class.__name__)
return label if for_loaded else label.lower()
return Counter({
_model_class_to_label(model_class): count
for model_class, count in counter.items()
})
|
tests/keras/test_activations.py | the-moliver/keras | 150 | 2253 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
from keras import activations
def get_standard_values():
'''
These are just a set of floats used for testing the activation
functions, and are useful in multiple tests.
'''
return np.array([[0, 0.1, 0.5, 0.9, 1.0]], dtype=K.floatx())
def test_softmax():
'''
Test using a reference implementation of softmax
'''
def softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softmax(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_time_distributed_softmax():
x = K.placeholder(shape=(1, 1, 5))
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
f([test_values])[0]
def test_softplus():
'''
Test using a reference softplus implementation
'''
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softplus(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softplus(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_softsign():
'''
Test using a reference softsign implementation
'''
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softsign(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softsign(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_sigmoid():
'''
Test using a numerically stable reference sigmoid implementation
'''
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid():
'''
Test using a reference hard sigmoid implementation
'''
def ref_hard_sigmoid(x):
'''
Reference hard sigmoid with slope and shift values from theano, see
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py
'''
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.hard_sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def test_elu():
x = K.placeholder(ndim=2)
f = K.function([x], [activations.elu(x, 0.5)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=K.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
assert_allclose(result, true_result)
def test_tanh():
test_values = get_standard_values()
x = K.placeholder(ndim=2)
exp = activations.tanh(x)
f = K.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_linear():
'''
This function does no input validation, it just returns the thing
that was passed in.
'''
xs = [1, 5, True, None, 'foo']
for x in xs:
assert(x == activations.linear(x))
if __name__ == '__main__':
pytest.main([__file__])
|
setup.py | ouyhlan/fastNLP | 2,693 | 2266 | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages() if p.startswith('fastNLP')]
print(pkgs)
setup(
name='FastNLP',
version='0.7.0',
url='https://gitee.com/fastnlp/fastNLP',
description='fastNLP: Deep Learning Toolkit for NLP, developed by Fudan FastNLP Team',
long_description=readme,
long_description_content_type='text/markdown',
license='Apache License',
author='<NAME>',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
|
pydlm/tests/base/testKalmanFilter.py | onnheimm/pydlm | 423 | 2279 | <gh_stars>100-1000
import numpy as np
import unittest
from pydlm.modeler.trends import trend
from pydlm.modeler.seasonality import seasonality
from pydlm.modeler.builder import builder
from pydlm.base.kalmanFilter import kalmanFilter
class testKalmanFilter(unittest.TestCase):
def setUp(self):
self.kf1 = kalmanFilter(discount=[1])
self.kf0 = kalmanFilter(discount=[1e-10])
self.kf11 = kalmanFilter(discount=[1, 1])
self.trend0 = trend(degree=0, discount=1, w=1.0)
self.trend0_90 = trend(degree=0, discount=0.9, w=1.0)
self.trend0_98 = trend(degree=0, discount=0.98, w=1.0, name='a')
self.trend1 = trend(degree=1, discount=1, w=1.0)
def testForwardFilter(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with
# discount = 1, one should expect the filterd mean to be 0.5
self.kf1.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
self.assertAlmostEqual(dlm.model.sysVar, 0.375)
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0.5)
dlm.initialize()
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with discount = 0
# one should expect the filtered mean close to 1
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 0)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.5)
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 1)
def testForwardFilterMultiDim(self):
dlm = builder()
dlm.add(seasonality(period=2, discount=1, w=1.0))
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], 0.33333333333)
self.assertAlmostEqual(dlm.model.state[1][0, 0], -0.33333333333)
self.kf11.forwardFilter(dlm.model, -1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], -0.5)
self.assertAlmostEqual(dlm.model.state[1][0, 0], 0.5)
def testBackwardSmoother(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
# with mean being 0 and observe 1 and 0 consectively, one shall
# expect the smoothed mean at 1 will be 1/3, for discount = 1
self.kf1.forwardFilter(dlm.model, 1)
self.kf1.forwardFilter(dlm.model, 0)
self.kf1.backwardSmoother(dlm.model, \
np.matrix([[0.5]]), \
np.matrix([[0.375]]))
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0/3)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.18518519)
# second order trend with discount = 1. The smoothed result should be
# equal to a direct fit on the three data points, 0, 1, -1. Thus, the
# smoothed observation should be 0.0
def testBackwardSmootherMultiDim(self):
dlm = builder()
dlm.add(self.trend1)
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
state1 = dlm.model.state
cov1 = dlm.model.sysVar
self.kf11.forwardFilter(dlm.model, -1)
self.kf11.backwardSmoother(dlm.model, \
rawState = state1, \
rawSysVar = cov1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingData(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0], 1.0)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, 0)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingEvaluation(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
dlm.model.evaluation = np.matrix([[None]])
self.kf1.forwardFilter(dlm.model, 1.0, dealWithMissingEvaluation = True)
self.assertAlmostEqual(dlm.model.obs, 0.0)
self.assertAlmostEqual(dlm.model.transition, 1.0)
def testEvolveMode(self):
dlm = builder()
dlm.add(self.trend0_90)
dlm.add(self.trend0_98)
dlm.initialize()
kf2 = kalmanFilter(discount=[0.9, 0.98],
updateInnovation='component',
index=dlm.componentIndex)
kf2.forwardFilter(dlm.model, 1.0)
self.assertAlmostEqual(dlm.model.innovation[0, 1], 0.0)
self.assertAlmostEqual(dlm.model.innovation[1, 0], 0.0)
if __name__ == '__main__':
unittest.main()
|
Tests/Marketplace/prepare_public_index_for_private_testing.py | diCagri/content | 799 | 2312 | import time
import os
import sys
import shutil
import json
import argparse
from zipfile import ZipFile
from contextlib import contextmanager
from datetime import datetime
from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \
extract_packs_artifacts
from Tests.Marketplace.marketplace_services import init_storage_client
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
MAX_SECONDS_TO_WAIT_FOR_LOCK = 600
LOCK_FILE_PATH = 'lock.txt'
@contextmanager
def lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
try:
acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
yield
except Exception:
logging.exception("Error in dummy index lock context manager.")
finally:
release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
def change_pack_price_to_zero(path_to_pack_metadata):
with open(path_to_pack_metadata, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata['price'] = 0
with open(path_to_pack_metadata, 'w') as pack_metadata_file:
json.dump(pack_metadata, pack_metadata_file, indent=4)
def change_packs_price_to_zero(public_index_folder_path):
paths_to_packs_in_merged_index = [pack_dir.path for pack_dir in os.scandir(public_index_folder_path) if
pack_dir.is_dir()]
for path_to_pack in paths_to_packs_in_merged_index:
path_to_pack_metadata = os.path.join(path_to_pack, 'metadata.json')
change_pack_price_to_zero(path_to_pack_metadata)
def merge_private_index_into_public_index(public_index_folder_path, private_index_folder_path):
packs_in_private_index = [pack_dir.name for pack_dir in os.scandir(private_index_folder_path) if pack_dir.is_dir()]
for pack_name in packs_in_private_index:
path_to_pack_in_private_index = os.path.join(private_index_folder_path, pack_name)
path_to_pack_in_public_index = os.path.join(public_index_folder_path, pack_name)
shutil.copy(path_to_pack_in_private_index, path_to_pack_in_public_index)
def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number,
private_packs):
"""Upload updated index zip to cloud storage.
Args:
public_index_folder_path (str): public index folder full path.
extract_destination_path (str): extract folder full path.
public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob.
build_number (str): circleCI build number, used as an index revision.
private_packs (list): List of private packs and their price.
"""
with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file:
for private_pack in private_packs:
private_pack['price'] = 0
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'packs': private_packs
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(public_index_folder_path)
index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
public_ci_dummy_index_blob.reload()
public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
public_ci_dummy_index_blob.upload_from_filename(index_zip_path)
logging.success("Finished uploading index.zip to storage.")
except Exception:
logging.exception("Failed in uploading index. Mismatch in index file generation.")
sys.exit(1)
finally:
shutil.rmtree(public_index_folder_path)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True)
parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index",
required=True)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.")
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs",
required=True)
parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket",
required=True)
# disable-secrets-detection-end
return parser.parse_args()
def is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
return dummy_index_lock_blob.exists()
def lock_dummy_index(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
with open(LOCK_FILE_PATH, 'w') as lock_file:
lock_file.write('locked')
with open(LOCK_FILE_PATH, 'rb') as lock_file:
dummy_index_lock_blob.upload_from_file(lock_file)
def acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
total_seconds_waited = 0
while is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
if total_seconds_waited >= MAX_SECONDS_TO_WAIT_FOR_LOCK:
logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")
sys.exit(1)
if total_seconds_waited % 60 == 0:
# Printing a message every minute to keep the machine from dying due to no output
logging.info("Waiting to acquire lock.")
total_seconds_waited += 10
time.sleep(10)
lock_dummy_index(public_storage_bucket, dummy_index_lock_path)
def release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
dummy_index_lock_blob.delete()
os.remove(LOCK_FILE_PATH)
def add_private_packs_from_dummy_index(private_packs, dummy_index_blob):
downloaded_dummy_index_path = 'current_dummy_index.zip'
extracted_dummy_index_path = 'dummy_index'
dummy_index_json_path = os.path.join(extracted_dummy_index_path, 'index', 'index.json')
dummy_index_blob.download_to_filename(downloaded_dummy_index_path)
os.mkdir(extracted_dummy_index_path)
if os.path.exists(downloaded_dummy_index_path):
with ZipFile(downloaded_dummy_index_path, 'r') as index_zip:
index_zip.extractall(extracted_dummy_index_path)
with open(dummy_index_json_path) as index_file:
index_json = json.load(index_file)
packs_from_dummy_index = index_json.get('packs', [])
for pack in private_packs:
is_pack_in_dummy_index = any(
[pack['id'] == dummy_index_pack['id'] for dummy_index_pack in packs_from_dummy_index])
if not is_pack_in_dummy_index:
packs_from_dummy_index.append(pack)
os.remove(downloaded_dummy_index_path)
shutil.rmtree(extracted_dummy_index_path)
return packs_from_dummy_index
def main():
install_logging('prepare_public_index_for_private_testing.log', logger=logging)
upload_config = option_handler()
service_account = upload_config.service_account
build_number = upload_config.ci_build_number
public_bucket_name = upload_config.public_bucket_name
private_bucket_name = upload_config.private_bucket_name
storage_base_path = upload_config.storage_base_path
extract_public_index_path = upload_config.extract_public_index_path
changed_pack = upload_config.pack_name
extract_destination_path = upload_config.extract_artifacts_path
packs_artifacts_path = upload_config.artifacts_path
dummy_index_dir_path = upload_config.dummy_index_dir_path
dummy_index_path = os.path.join(dummy_index_dir_path, 'index.zip')
dummy_index_lock_path = os.path.join(dummy_index_dir_path, 'lock.txt')
storage_client = init_storage_client(service_account)
public_storage_bucket = storage_client.bucket(public_bucket_name)
private_storage_bucket = storage_client.bucket(private_bucket_name)
dummy_index_blob = public_storage_bucket.blob(dummy_index_path)
with lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
public_index_folder_path, public_index_blob, _ = download_and_extract_index(public_storage_bucket,
extract_public_index_path, storage_base_path)
# In order for the packs to be downloaded successfully, their price has to be 0
change_packs_price_to_zero(public_index_folder_path)
private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
public_index_folder_path,
changed_pack, True,
storage_base_path)
private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob)
upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number,
private_packs)
if __name__ == '__main__':
main()
|
plaidml2/edsl/__init__.py | ZhouXiaolin/plaidml | 4,535 | 2315 | <gh_stars>1000+
# Copyright 2019 Intel Corporation.
import logging
from collections import namedtuple
import numpy as np
import six
from plaidml2 import DType
from plaidml2.core import TensorShape, Buffer
from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib
logger = logging.getLogger(__name__)
def __init():
"""Docstring for function plaidml2.edsl.__init"""
ffi_call(lib.plaidml_edsl_init)
ffi.init_once(__init, 'plaidml_edsl_init')
class LogicalShape(ForeignObject):
"""Docstring for class LogicalShape"""
__ffi_del__ = lib.plaidml_logical_shape_free
__ffi_repr__ = lib.plaidml_logical_shape_repr
def __init__(self, dtype=None, dims=[], ptr=None):
if ptr:
ffi_obj = ptr
elif dtype is not None:
raw_dims = ffi.new('int64_t[]', [0 if x is None else x for x in dims])
ffi_obj = ffi_call(lib.plaidml_logical_shape_alloc, dtype, len(dims), raw_dims)
else:
raise ValueError('One of dtype= or ptr= must be specified.')
super(LogicalShape, self).__init__(ffi_obj)
@property
def dtype(self):
return DType(ffi_call(lib.plaidml_logical_shape_get_dtype, self.as_ptr()))
@property
def ndims(self):
return ffi_call(lib.plaidml_logical_shape_get_ndims, self.as_ptr())
@property
def int_dims(self):
"""Returns the dimensions of a LogicalShape as a list.
Args:
self (pointer): The object pointer for a LogicalShape
Returns:
list (int): Integer dimensions of the LogicalShape.
"""
return [
ffi_call(lib.plaidml_logical_shape_get_dim_int, self.as_ptr(), i)
for i in range(self.ndims)
]
def into_TensorShape(self):
return TensorShape(
ptr=ffi_call(lib.plaidml_logical_shape_into_tensor_shape, self.as_ptr()))
Constraint = namedtuple('Constraint', ['lhs', 'rhs'])
def wrap_dim(x):
if isinstance(x, six.integer_types):
return TensorDim(expr=ffi_call(lib.plaidml_dim_expr_int, x))
return x
def dim_op(op, *args):
args = [wrap_dim(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return ffi_call(lib.plaidml_dim_expr_op, op, len(args), raw_args)
class TensorDim(ForeignObject):
"""Docstring for class TensorDim"""
__ffi_del__ = lib.plaidml_dim_expr_free
__ffi_repr__ = lib.plaidml_dim_expr_repr
def __init__(self, expr=None):
if expr is None:
expr = ffi_call(lib.plaidml_dim_expr_none)
super(TensorDim, self).__init__(expr)
def _bind(self, expr):
self.take_ptr(expr)
def __neg__(self):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_NEG, self))
def __add__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD, self, other))
def __radd__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD, other, self))
def __sub__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB, self, other))
def __rsub__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB, other, self))
def __mul__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL, self, other))
def __rmul__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL, other, self))
def __floordiv__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV, self, other))
def __rfloordiv__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV, other, self))
def wrap_poly(x):
if isinstance(x, six.integer_types):
return TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_literal, x))
if isinstance(x, TensorDim):
return TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_dim, x.as_ptr()))
return x
def poly_op(op, *args):
args = [wrap_poly(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return ffi_call(lib.plaidml_poly_expr_op, op, len(args), raw_args)
class TensorIndex(ForeignObject):
"""Docstring for class TensorIndex"""
__ffi_del__ = lib.plaidml_poly_expr_free
__ffi_repr__ = lib.plaidml_poly_expr_repr
def __init__(self, expr=None, name=''):
if expr is None:
expr = ffi_call(lib.plaidml_poly_expr_index, name.encode())
super(TensorIndex, self).__init__(expr)
def __lt__(self, rhs):
return Constraint(self, wrap_dim(rhs))
def __neg__(self):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_NEG, self))
def __add__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD, self, rhs))
def __radd__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD, lhs, self))
def __sub__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB, self, rhs))
def __rsub__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB, lhs, self))
def __mul__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL, self, rhs))
def __rmul__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL, lhs, self))
def __floordiv__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV, self, rhs))
def __rfloordiv__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV, lhs, self))
class _IndexMap(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, ref, key):
if isinstance(key, tuple) or isinstance(key, list):
idxs = key
else:
idxs = [key]
idxs = [wrap_poly(x) for x in idxs]
raw_idxs = [x.as_ptr() for x in idxs]
expr = ffi_call(lib.plaidml_expr_index_map, ref.as_ptr(), len(idxs), raw_idxs)
super(_IndexMap, self).__init__(expr)
class _SizeMap(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, dims):
dims = [wrap_dim(x) for x in dims]
raw_dims = [x.as_ptr() for x in dims]
expr = ffi_call(lib.plaidml_expr_size_map, len(dims), raw_dims)
super(_SizeMap, self).__init__(expr)
class _Contraction(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, agg_op, combo_op, src_idxs, sink_idxs, sink_sizes, name):
src_idxs = [x.as_ptr() for x in src_idxs]
expr = ffi_call(
lib.plaidml_expr_contraction,
agg_op,
combo_op,
sink_idxs.as_ptr(),
sink_sizes.as_ptr(),
len(src_idxs),
src_idxs,
name.encode(),
)
super(_Contraction, self).__init__(expr)
_ContractionPart = namedtuple('_ContractionPart', ['op', 'args'])
class IndexedTensor(object):
"""Docstring for class IndexedTensor"""
def __init__(self, impl, tensor=None):
self._impl = impl
self._tensor = tensor
def __repr__(self):
return repr(self._impl)
# Represents an aggregation_op of SUM in a contraction
def __iadd__(self, rhs):
return IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_SUM, rhs))
# Represents an aggregation_op of PROD in a contraction
def __imul__(self, rhs):
return IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_PROD, rhs))
# Represents an aggregation_op of MAX in a contraction
def __ge__(self, rhs):
self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MAX, rhs))
# Represents an aggregation_op of MIN in a contraction
def __le__(self, rhs):
self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MIN, rhs))
# Represents a combo_op of PLUS in a contraction
def __add__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_ADD, (self, rhs)))
# Represents a combo_op of MULTIPLY in a contraction
def __mul__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_MUL, (self, rhs)))
# Represents a combo_op of EQ in a contraction
def __eq__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_EQ, (self, rhs)))
def _make_contraction(self, agg_op, rhs):
# Extract combo_op and inputs
if isinstance(rhs._impl, _IndexMap):
# Unary op
combo_op = lib.PLAIDML_COMBO_OP_NONE
inputs = [rhs._impl]
elif isinstance(rhs._impl, _ContractionPart):
# Binary/Ternary op
combo_op = rhs._impl.op
inputs = [x._impl for x in rhs._impl.args]
else:
raise ValueError('Invalid impl')
return _Contraction(
agg_op,
combo_op,
inputs,
self._impl,
_SizeMap(self._tensor._dims),
self._tensor._name,
)
class Tensor(ForeignObject):
"""Docstring for class Tensor"""
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
_dims = None
_is_contraction = False
def __init__(self, shape=None, dims=None, expr=None, value=None, name='', buffer=None):
self._name = name
self._buffer = buffer
if shape:
if buffer is None:
raw_buffer = ffi.NULL
else:
raw_buffer = buffer.as_ptr()
expr = ffi_call(lib.plaidml_expr_placeholder, shape.as_ptr(), raw_buffer,
name.encode())
elif dims is not None:
self._dims = dims
expr = None
elif value is not None:
if isinstance(value, six.integer_types):
expr = ffi_call(lib.plaidml_expr_int, value)
elif isinstance(value, float):
expr = ffi_call(lib.plaidml_expr_float, value)
else:
raise TypeError('Invalid type for value={}'.format(value))
elif expr is None:
raise ValueError('One of dims=, shape=, or expr= must be specified.')
super(Tensor, self).__init__(expr)
def set_param_value(self, buffer):
# Changes the value of a parameter tensor (i.e. one explicitly set to a buffer value)
# Illegal on other tensors
ffi_call(lib.plaidml_expr_param_reset, self.__ffi_obj__, buffer.as_ptr())
def __hash__(self):
return hash((self.as_ptr(), self._dims, self._is_contraction))
def __getitem__(self, key):
return IndexedTensor(_IndexMap(self, key), tensor=self)
def __setitem__(self, key, value):
if isinstance(value._impl, _Contraction):
# standard contraction
self._set_contraction(value._impl)
elif isinstance(value, Tensor):
pass
elif isinstance(value._impl, _IndexMap):
# Unary ASSIGN contraction
self._set_contraction(
_Contraction(
lib.PLAIDML_AGG_OP_ASSIGN,
lib.PLAIDML_COMBO_OP_NONE,
[value._impl],
_IndexMap(self, key),
_SizeMap(self._dims),
self._name,
))
elif isinstance(value._impl, _ContractionPart):
# Binary or ternary ASSIGN contraction
self._set_contraction(
_Contraction(
lib.PLAIDML_AGG_OP_ASSIGN,
value._impl.op,
[x._impl for x in value._impl.args],
_IndexMap(self, key),
_SizeMap(self._dims),
self._name,
))
else:
raise ValueError('Invalid impl when assigning to a Tensor (Type: {})'.format(
type(value._impl)))
def _set_contraction(self, cion):
self._is_contraction = True
self.take_ptr(cion)
# Represents an eltwise negation
def __neg__(self):
return call('neg', self)
# Represents an eltwise bit_not
def __invert__(self):
return call('bit_not', self)
# Represents an eltwise addition
def __add__(self, rhs):
return call('add', self, rhs)
def __radd__(self, lhs):
return call('add', lhs, self)
# Represents an eltwise subtraction
def __sub__(self, rhs):
return call('sub', self, rhs)
def __rsub__(self, lhs):
return call('sub', lhs, self)
# Represents an eltwise multiplication
def __mul__(self, rhs):
return call('mul', self, rhs)
def __rmul__(self, lhs):
return call('mul', lhs, self)
# Represents an eltwise division
def __div__(self, rhs):
return call('div', self, rhs)
def __rdiv__(self, lhs):
return call('div', lhs, self)
# Represents an eltwise division
def __truediv__(self, rhs):
return call('div', self, rhs)
def __rtruediv__(self, lhs):
return call('div', lhs, self)
# Represents an eltwise cmp_eq
def __eq__(self, rhs):
return call('cmp_eq', self, rhs)
# Represents an eltwise cmp_ne
def __ne__(self, rhs):
return call('cmp_ne', self, rhs)
# Represents an eltwise cmp_lt
def __lt__(self, rhs):
return call('cmp_lt', self, rhs)
# Represents an eltwise cmp_gt
def __gt__(self, rhs):
return call('cmp_gt', self, rhs)
# Represents an eltwise cmp_le
def __le__(self, rhs):
return call('cmp_le', self, rhs)
# Represents an eltwise cmp_ge
def __ge__(self, rhs):
return call('cmp_ge', self, rhs)
# Represents an eltwise bit_left
def __lshift__(self, rhs):
return call('bit_left', self, rhs)
def __rlshift__(self, lhs):
return call('bit_left', lhs, self)
# Represents an eltwise bit_right
def __rshift__(self, rhs):
return call('bit_right', self, rhs)
def __rrshift__(self, lhs):
return call('bit_right', lhs, self)
# Represents an eltwise bit_and
def __and__(self, rhs):
return call('bit_and', self, rhs)
def __rand__(self, lhs):
return call('bit_and', lhs, self)
# Represents an eltwise bit_or
def __or__(self, rhs):
return call('bit_or', self, rhs)
def __ror__(self, lhs):
return call('bit_or', lhs, self)
# Represents an eltwise bit_xor
def __xor__(self, rhs):
return call('bit_xor', self, rhs)
def __rxor__(self, lhs):
return call('bit_xor', lhs, self)
# Enable no_reduce on a contraction
def no_reduce(self):
if not self._is_contraction:
raise TypeError('no_reduce can only be specified on a contraction.')
ffi_call(lib.plaidml_expr_contraction_set_no_reduce, self.as_ptr(), True)
return self
# Set use_default on a contraction
def use_default(self, rhs):
if not self._is_contraction:
raise TypeError('use_default can only be specified on a contraction.')
ffi_call(lib.plaidml_expr_contraction_set_use_default, self.as_ptr(), rhs.as_ptr())
return self
def add_constraint(self, constraint):
ffi_call(
lib.plaidml_expr_contraction_add_constraint,
self.as_ptr(),
constraint.lhs.as_ptr(),
constraint.rhs.as_ptr(),
)
def add_constraints(self, constraints):
for constraint in constraints:
self.add_constraint(constraint)
# Return the tensor's shape
@property
def shape(self):
return LogicalShape(ptr=ffi_call(lib.plaidml_expr_get_shape, self.as_ptr()))
# Verify that the specified dims match the dims of this tensor.
def bind_dims(self, *dims):
raw_dims = [x.as_ptr() for x in dims]
ffi_call(lib.plaidml_expr_bind_dims, self.as_ptr(), len(raw_dims), raw_dims)
# bind a concrete shape to this tensor
def bind(self, shape):
ffi_call(lib.plaidml_expr_bind_shape, self.as_ptr(), shape.as_ptr())
class TensorRef:
"""Docstring for class TensorRef"""
def __init__(self, tensor):
self.tensor = tensor
def __hash__(self):
return hash(ffi_call(lib.plaidml_expr_ptr, self.tensor.as_ptr()))
def __eq__(self, other):
if isinstance(other, Tensor):
return self.__hash__() == TensorRef(other).__hash__()
return self.__hash__() == other.__hash__()
class Value(ForeignObject):
"""Docstring for class Value"""
__ffi_del__ = lib.plaidml_value_free
__ffi_repr__ = lib.plaidml_value_repr
def __init__(self, value):
# logger.debug('Value({})'.format(value))
if isinstance(value, np.ndarray):
if value.ndim == 0:
value = value.item()
else:
value = value.tolist()
if value is None:
ffi_obj = ffi_call(lib.plaidml_value_none)
elif isinstance(value, (six.integer_types, bool)):
ffi_obj = ffi_call(lib.plaidml_value_int, value)
elif isinstance(value, float):
ffi_obj = ffi_call(lib.plaidml_value_float, value)
elif isinstance(value, TensorDim):
ffi_obj = ffi_call(lib.plaidml_value_dim, value.as_ptr())
elif isinstance(value, Tensor):
ffi_obj = ffi_call(lib.plaidml_value_expr, value.as_ptr())
elif isinstance(value, (list, tuple)):
self._elts = [Value(x) for x in value]
raw_elts = [x.as_ptr() for x in self._elts]
ffi_obj = ffi_call(lib.plaidml_value_tuple, len(raw_elts), raw_elts)
elif isinstance(value, six.string_types):
ffi_obj = ffi_call(lib.plaidml_value_str, value.encode('utf-8'))
elif isinstance(value, ffi.CData) and ffi.typeof(value) is ffi.typeof('plaidml_value*'):
ffi_obj = value
else:
raise TypeError('Unsupported type {} for value={}'.format(type(value), value))
super(Value, self).__init__(ffi_obj)
def as_tensor(self):
return Tensor(expr=ffi_call(lib.plaidml_value_expr_get, self.as_ptr()))
def TensorOutput(*args):
return Tensor(dims=args)
def TensorDims(count):
return [TensorDim() for i in range(count)]
def TensorIndexes(count):
return [TensorIndex() for i in range(count)]
class ProgramArgument:
"""Docstring for class ProgramArgument"""
def __init__(self, arg):
self.is_input = arg.is_input
self.ref = TensorRef(Tensor(expr=ffi_call(lib.plaidml_expr_clone, arg.tensor)))
self.shape = LogicalShape(ptr=ffi_call(lib.plaidml_logical_shape_clone, arg.shape))
if arg.buffer:
tensor_shape = self.shape.into_TensorShape()
self.buffer = Buffer(tensor_shape, ptr=ffi_call(lib.plaidml_buffer_clone, arg.buffer))
else:
self.buffer = None
class Program(ForeignObject):
"""Docstring for class Program"""
__ffi_del__ = lib.plaidml_program_free
__ffi_repr__ = lib.plaidml_program_repr
def __init__(self, name, outputs, updates=[]):
raw_outputs = [x.as_ptr() for x in outputs]
dst_updates = [x[0].as_ptr() for x in updates]
src_updates = [x[1].as_ptr() for x in updates]
raw_args = ffi.new('plaidml_program_args**')
ffi_obj = ffi_call(
lib.plaidml_program_evaluate,
name.encode(),
len(raw_outputs),
raw_outputs,
len(updates),
src_updates,
dst_updates,
raw_args,
)
self.args = [ProgramArgument(raw_args[0].args[i]) for i in range(raw_args[0].nargs)]
ffi_call(lib.plaidml_program_args_free, raw_args[0])
super(Program, self).__init__(ffi_obj)
@property
def inputs(self):
return [x for x in self.args if x.is_input]
@property
def outputs(self):
return [x for x in self.args if not x.is_input]
def wrap_tensor(x):
if isinstance(x, six.integer_types):
return Tensor(expr=ffi_call(lib.plaidml_expr_int, x))
if np.issubdtype(type(x), np.integer):
return Tensor(expr=ffi_call(lib.plaidml_expr_int, x.item()))
if isinstance(x, float):
return Tensor(expr=ffi_call(lib.plaidml_expr_float, x))
if isinstance(x, TensorDim):
return Tensor(expr=ffi_call(lib.plaidml_expr_dim, x.as_ptr()))
if isinstance(x, Tensor):
return x
raise TypeError('Unexpected type for call argument: {}. fn: {}, args: {}, bad arg: {}'.format(
type(x), fn, args, x))
def call(fn, *args):
args = [wrap_tensor(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return Tensor(expr=ffi_call(lib.plaidml_expr_call, fn.encode(), len(args), raw_args))
def cast(x, dtype):
return Tensor(expr=ffi_call(lib.plaidml_expr_cast, wrap_tensor(x).as_ptr(), dtype))
def as_bool(x):
return cast(x, DType.BOOLEAN)
def as_float(x, bit_size):
map = {
16: DType.FLOAT16,
32: DType.FLOAT32,
64: DType.FLOAT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_float'
return cast(x, dtype)
def as_int(x, bit_size):
map = {
8: DType.INT8,
16: DType.INT16,
32: DType.INT32,
64: DType.INT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_int'
return cast(x, dtype)
def as_uint(x, bit_size):
map = {
8: DType.UINT8,
16: DType.UINT16,
32: DType.UINT32,
64: DType.UINT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_uint'
return cast(x, dtype)
def ceil(x):
return call('ceil', x)
def cond(lhs, rhs, true_case):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_COND, (lhs, rhs, true_case)))
def cos(x):
return call('cos', x)
def exp(x):
return call('exp', x)
def floor(x):
return call('floor', x)
def gather(x, y):
return call('gather', x, y)
def gradients(loss, variables):
wrts = [x.as_ptr() for x in variables]
raw_grads = ffi.new('plaidml_expr*[]', len(wrts))
ffi_call(
lib.plaidml_expr_gradient,
len(wrts),
wrts,
loss.as_ptr(),
raw_grads,
)
return [Tensor(expr=x) for x in raw_grads]
def ident(x):
return call('ident', x)
def index(x, axis):
return call('index', x, axis)
def jacobian(loss, variables):
wrts = [x.as_ptr() for x in variables]
raw_grads = ffi.new('plaidml_expr*[]', len(wrts))
ffi_call(
lib.plaidml_expr_jacobian,
len(wrts),
wrts,
loss.as_ptr(),
raw_grads,
)
return [Tensor(expr=x) for x in raw_grads]
def log(x):
return call('log', x)
def max(x, y):
return call('max', x, y)
def min(x, y):
return call('min', x, y)
def pow(x, y):
return call('pow', x, y)
def prng(state, shape):
return call('prng', state, *shape)
def reshape(x, dims):
return call('reshape', x, *dims)
def round(x):
return call('round', x)
def scatter(x, y, z):
return call('scatter', x, y, z)
def select(cond, true_case, false_case):
return call('cond', cond, true_case, false_case)
def shape(x):
return call('shape', x)
def sin(x):
return call('sin', x)
def sqrt(x):
return call('sqrt', x)
def tan(x):
return call('tan', x)
def tanh(x):
return call('tanh', x)
|
backend/services/apns_util.py | xuantan/viewfinder | 645 | 2320 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Apple Push Notification service utilities.
Original copyright for this code: https://github.com/jayridge/apnstornado
TokenToBinary(): converts a hex-encoded token into a binary value
CreateMessage(): formats a binary APNs message from parameters
ParseResponse(): parses APNs binary response for status & identifier
ErrorStatusToString(): converts error status to error message
"""
__author__ = '<EMAIL> (<NAME>)'
import base64
import json
import struct
import time
from tornado import escape
_MAX_PAYLOAD_BYTES = 256
"""Maximum number of bytes in the APNS payload."""
_ELLIPSIS_BYTES = escape.utf8(u'…')
"""UTF-8 encoding of the Unicode ellipsis character."""
def TokenToBinary(token):
return base64.b64decode(token)
def TokenFromBinary(bin_token):
return base64.b64encode(bin_token)
def CreateMessage(token, alert=None, badge=None, sound=None,
identifier=0, expiry=None, extra=None, allow_truncate=True):
token = TokenToBinary(token)
if len(token) != 32:
raise ValueError, u'Token must be a 32-byte binary string.'
if (alert is not None) and (not isinstance(alert, (basestring, dict))):
raise ValueError, u'Alert message must be a string or a dictionary.'
if expiry is None:
expiry = long(time.time() + 365 * 86400)
# Start by determining the length of the UTF-8 encoded JSON with no alert text. This allows us to
# determine how much space is left for the message.
# 'content-available': 1 is necessary to trigger iOS 7's background download processing.
aps = { 'alert' : '', 'content-available': 1 }
if badge is not None:
aps['badge'] = badge
if sound is not None:
aps['sound'] = sound
data = { 'aps' : aps }
if extra is not None:
data.update(extra)
# Create compact JSON representation with no extra space and no escaping of non-ascii chars (i.e. use
# direct UTF-8 representation rather than "\u1234" escaping). This maximizes the amount of space that's
# left for the alert text.
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
bytes_left = _MAX_PAYLOAD_BYTES - len(encoded)
if allow_truncate and isinstance(alert, basestring):
alert = _TruncateAlert(alert, bytes_left)
elif alert and len(escape.utf8(alert)) > bytes_left:
raise ValueError, u'max payload(%d) exceeded: %d' % (_MAX_PAYLOAD_BYTES, len(escape.utf8(alert)))
# Now re-encode including the alert text.
aps['alert'] = alert
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
length = len(encoded)
assert length <= _MAX_PAYLOAD_BYTES, (encoded, length)
return struct.pack('!bIIH32sH%(length)ds' % { 'length' : length },
1, identifier, expiry,
32, token, length, encoded)
def ParseResponse(bytes):
if len(bytes) != 6:
raise ValueError, u'response must be a 6-byte binary string.'
command, status, identifier = struct.unpack_from('!bbI', bytes, 0)
if command != 8:
raise ValueError, u'response command must equal 8.'
return status, identifier, ErrorStatusToString(status)
def ErrorStatusToString(status):
if status is 0:
return 'No errors encountered'
elif status is 1:
return 'Processing error'
elif status is 2:
return 'Missing device token'
elif status is 3:
return 'Missing topic'
elif status is 4:
return 'Missing payload'
elif status is 5:
return 'Invalid token size'
elif status is 6:
return 'Invalid topic size'
elif status is 7:
return 'Invalid payload size'
elif status is 8:
return 'Invalid token'
elif status is 255:
return 'None (unknown)'
else:
return ''
def _TruncateAlert(alert, max_bytes):
"""Converts the alert text to UTF-8 encoded JSON format, which is how
the alert will be stored in the APNS payload. If the number of
resulting bytes exceeds "max_bytes", then truncates the alert text
at a Unicode character boundary, taking care not to split JSON
escape sequences. Returns the truncated UTF-8 encoded alert text,
including a trailing ellipsis character.
"""
alert_json = escape.utf8(json.dumps(escape.recursive_unicode(alert), ensure_ascii=False))
# Strip quotes added by JSON.
alert_json = alert_json[1:-1]
# Check if alert fits with no truncation.
if len(alert_json) <= max_bytes:
return escape.utf8(alert)
# Make room for an appended ellipsis.
assert max_bytes >= len(_ELLIPSIS_BYTES), 'max_bytes must be at least %d' % len(_ELLIPSIS_BYTES)
max_bytes -= len(_ELLIPSIS_BYTES)
# Truncate the JSON UTF8 string at a Unicode character boundary.
truncated = alert_json[:max_bytes].decode('utf-8', errors='ignore')
# If JSON escape sequences were split, then the truncated string may not be valid JSON. Keep
# chopping trailing characters until the truncated string is valid JSON. It may take several
# tries, such as in the case where a "\u1234" sequence has been split.
while True:
try:
alert = json.loads(u'"%s"' % truncated)
break
except Exception:
truncated = truncated[:-1]
# Return the UTF-8 encoding of the alert with the ellipsis appended to it.
return escape.utf8(alert) + _ELLIPSIS_BYTES
|
demonstrations/tutorial_kernels_module.py | jamesellis1999/qml | 216 | 2321 | r"""Training and evaluating quantum kernels
===========================================
.. meta::
:property="og:description": Kernels and alignment training with Pennylane.
:property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png
.. related::
tutorial_kernel_based_training Kernel-based training with scikit-learn
tutorial_data_reuploading_classifier Classification with data reuploading
*Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Posted: 24 June 2021*
Kernel methods are one of the cornerstones of classical machine learning.
Here we are concerned with kernels that can be evaluated on quantum computers,
*quantum kernels* for short.
In this tutorial you will learn how to evaluate kernels, use them for classification
and train them with gradient-based optimization, and all that using the
functionality of PennyLane's
`kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__.
The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own
`QHack <https://qhack.ai/>`__ hackathon.
What are kernel methods?
------------------------
To understand what a kernel method does, let's first revisit
one of the simplest methods to assign binary labels to datapoints:
linear classification.
Imagine we want to discern two different classes of points that lie in
different corners of the plane. A linear classifier corresponds to
drawing a line and assigning different labels to the regions on opposing
sides of the line:
.. figure:: ../demonstrations/kernels_module/linear_classification.png
:align: center
:width: 30%
We can mathematically formalize this by assigning the label :math:`y`
via
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b).
The vector :math:`\boldsymbol{w}` points perpendicular to the line and
thus determine its slope. The independent term :math:`b` specifies the
position on the plane. In this form, linear classification can also be
extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a
line does not divide the entire space into two regions anymore. Instead
one needs a *hyperplane*. It is immediately clear that this method is
not very powerful, as datasets that are not separable by a hyperplane
can't be classified without error.
We can actually sneak around this limitation by performing a neat trick:
if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our
datapoints into a larger *feature space* and then perform linear
classification there, we could actually realise non-linear
classification in our original space!
.. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png
:align: center
:width: 65%
If we go back to the expression for our prediction and include the
embedding, we get
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b).
We will forgo one tiny step, but it can be shown that for the purpose
of optimal classification, we can choose the vector defining the
decision boundary as a linear combination of the embedded datapoints
:math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting
this into the formula yields
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right).
This rewriting might not seem useful at first, but notice the above
formula only contains inner products between vectors in the embedding
space:
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle.
We call this function the *kernel*. It provides the advantage that we can often
find an explicit formula for the kernel :math:`k` that makes it
superfluous to actually perform the (potentially expensive) embedding
:math:`\phi`. Consider for example the following embedding and the
associated kernel:
.. math::
\phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\
k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2.
This means by just replacing the regular scalar product in our linear
classification with the map :math:`k`, we can actually express much more
intricate decision boundaries!
This is very important, because in many interesting cases the embedding :math:`\phi`
will be much costlier to compute than the kernel :math:`k`.
In this demo, we will explore one particular kind of kernel
that can be realized on near-term quantum computers, namely *Quantum
Embedding Kernels (QEKs)*. These are kernels that arise from embedding
data into the space of quantum states. We formalize this by considering
a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps
a datapoint :math:`\boldsymbol{x}` to the state
.. math::
|\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle.
The kernel value is then given by the *overlap* of the associated
embedded quantum states
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2.
"""
##############################################################################
# A toy problem
# -------------
# In this demo, we will treat a toy problem that showcases the
# inner workings of classification with quantum embedding kernels,
# training variational embedding kernels and the available functionalities
# to do both in PennyLane. We of course need to start with some imports:
from pennylane import numpy as np
import matplotlib as mpl
np.random.seed(1359)
##############################################################################
# And we proceed right away to create a dataset to work with, the
# ``DoubleCake`` dataset. Firstly, we define two functions to enable us to
# generate the data.
# The details of these functions are not essential for understanding the demo,
# so don't mind them if they are confusing.
def _make_circular_data(num_sectors):
"""Generate datapoints arranged in an even circle."""
center_indices = np.array(range(0, num_sectors))
sector_angle = 2 * np.pi / num_sectors
angles = (center_indices + 0.5) * sector_angle
x = 0.7 * np.cos(angles)
y = 0.7 * np.sin(angles)
labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1
return x, y, labels
def make_double_cake_data(num_sectors):
x1, y1, labels1 = _make_circular_data(num_sectors)
x2, y2, labels2 = _make_circular_data(num_sectors)
# x and y coordinates of the datapoints
x = np.hstack([x1, 0.5 * x2])
y = np.hstack([y1, 0.5 * y2])
# Canonical form of dataset
X = np.vstack([x, y]).T
labels = np.hstack([labels1, -1 * labels2])
# Canonical form of labels
Y = labels.astype(int)
return X, Y
##############################################################################
# Next, we define a function to help plot the ``DoubleCake`` data:
def plot_double_cake_data(X, Y, ax, num_sectors=None):
"""Plot double cake data and corresponding sectors."""
x, y = X.T
cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"])
ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s")
if num_sectors is not None:
sector_angle = 360 / num_sectors
for i in range(num_sectors):
color = ["#FF0000", "#0000FF"][(i % 2)]
other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)]
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
1,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=color,
alpha=0.1,
width=0.5,
)
)
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
0.5,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=other_color,
alpha=0.1,
)
)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_aspect("equal")
ax.axis("off")
return ax
##############################################################################
# Let's now have a look at our dataset. In our example, we will work with
# 3 sectors:
import matplotlib.pyplot as plt
num_sectors = 3
X, Y = make_double_cake_data(num_sectors)
ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors)
##############################################################################
# Defining a Quantum Embedding Kernel
# -----------------------------------
# PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__
# allows for a particularly simple
# implementation of Quantum Embedding Kernels. The first ingredient we
# need for this is an *ansatz*, which we will construct by repeating a
# layer as building block. Let's start by defining this layer:
import pennylane as qml
def layer(x, params, wires, i0=0, inc=1):
"""Building block of the embedding ansatz"""
i = i0
for j, wire in enumerate(wires):
qml.Hadamard(wires=[wire])
qml.RZ(x[i % len(x)], wires=[wire])
i += inc
qml.RY(params[0, j], wires=[wire])
qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1])
##############################################################################
# To construct the ansatz, this layer is repeated multiple times, reusing
# the datapoint ``x`` but feeding different variational
# parameters ``params`` into each of them.
# Together, the datapoint and the variational parameters fully determine
# the embedding ansatz :math:`U(\boldsymbol{x})`.
# In order to construct the full kernel circuit, we also require its adjoint
# :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``.
def ansatz(x, params, wires):
"""The embedding ansatz"""
for j, layer_params in enumerate(params):
layer(x, layer_params, wires, i0=j * len(wires))
adjoint_ansatz = qml.adjoint(ansatz)
def random_params(num_wires, num_layers):
"""Generate random variational parameters in the shape for the ansatz."""
return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)
##############################################################################
# Together with the ansatz we only need a device to run the quantum circuit on.
# For the purpose of this tutorial we will use PennyLane's ``default.qubit``
# device with 5 wires in analytic mode.
dev = qml.device("default.qubit", wires=5, shots=None)
wires = dev.wires.tolist()
##############################################################################
# Let us now define the quantum circuit that realizes the kernel. We will compute
# the overlap of the quantum states by first applying the embedding of the first
# datapoint and then the adjoint of the embedding of the second datapoint. We
# finally extract the probabilities of observing each basis state.
@qml.qnode(dev)
def kernel_circuit(x1, x2, params):
ansatz(x1, params, wires=wires)
adjoint_ansatz(x2, params, wires=wires)
return qml.probs(wires=wires)
##############################################################################
# The kernel function itself is now obtained by looking at the probability
# of observing the all-zero state at the end of the kernel circuit -- because
# of the ordering in ``qml.probs``, this is the first entry:
def kernel(x1, x2, params):
return kernel_circuit(x1, x2, params)[0]
##############################################################################
#
# .. note::
# An alternative way to set up the kernel circuit in PennyLane would be
# to use the observable type
# `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__.
# This is shown in the
# `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more
# background information on the kernel circuit structure itself.
#
# Before focusing on the kernel values we have to provide values for the
# variational parameters. At this point we fix the number of layers in the
# ansatz circuit to :math:`6`.
init_params = random_params(num_wires=5, num_layers=6)
##############################################################################
# Now we can have a look at the kernel value between the first and the
# second datapoint:
kernel_value = kernel(X[0], X[1], init_params)
print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}")
##############################################################################
# The mutual kernel values between all elements of the dataset form the
# *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix``
# method, which makes use of symmetry of the kernel,
# :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`.
# In addition, the option ``assume_normalized_kernel=True`` ensures that we do not
# calculate the entries between the same datapoints, as we know them to be 1
# for our noiseless simulation. Overall this means that we compute
# :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints.
# To include the variational parameters, we construct a ``lambda`` function that
# fixes them to the values we sampled above.
init_kernel = lambda x1, x2: kernel(x1, x2, init_params)
K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True)
with np.printoptions(precision=3, suppress=True):
print(K_init)
##############################################################################
# Using the Quantum Embedding Kernel for predictions
# --------------------------------------------------
# The quantum kernel alone can not be used to make predictions on a
# dataset, becaues it is essentially just a tool to measure the similarity
# between two datapoints. To perform an actual prediction we will make use
# of scikit-learn's Support Vector Classifier (SVC).
from sklearn.svm import SVC
##############################################################################
# To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function
# that takes two sets of datapoints and returns the associated kernel matrix.
# We can make use of the function ``qml.kernels.kernel_matrix`` that provides
# this functionality. It expects the kernel to not have additional parameters
# besides the datapoints, which is why we again supply the variational
# parameters via the ``lambda`` function from above.
# Once we have this, we can let scikit-learn adjust the SVM from our Quantum
# Embedding Kernel.
#
# .. note::
# This step does *not* modify the variational parameters in our circuit
# ansatz. What it does is solving a different optimization task for the
# :math:`\alpha` and :math:`b` vectors we introduced in the beginning.
svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y)
##############################################################################
# To see how well our classifier performs we will measure which percentage
# of the dataset it classifies correctly.
def accuracy(classifier, X, Y_target):
return 1 - np.count_nonzero(classifier.predict(X) - Y_target) / len(Y_target)
accuracy_init = accuracy(svm, X, Y)
print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}")
##############################################################################
# We are also interested in seeing what the decision boundaries in this
# classification look like. This could help us spotting overfitting issues
# visually in more complex data sets. To this end we will introduce a
# second helper method.
def plot_decision_boundaries(classifier, ax, N_gridpoints=14):
_xx, _yy = np.meshgrid(np.linspace(-1, 1, N_gridpoints), np.linspace(-1, 1, N_gridpoints))
_zz = np.zeros_like(_xx)
for idx in np.ndindex(*_xx.shape):
_zz[idx] = classifier.predict(np.array([_xx[idx], _yy[idx]])[np.newaxis, :])
plot_data = {"_xx": _xx, "_yy": _yy, "_zz": _zz}
ax.contourf(
_xx,
_yy,
_zz,
cmap=mpl.colors.ListedColormap(["#FF0000", "#0000FF"]),
alpha=0.2,
levels=[-1, 0, 1],
)
plot_double_cake_data(X, Y, ax)
return plot_data
##############################################################################
# With that done, let's have a look at the decision boundaries for our
# initial classifier:
init_plot_data = plot_decision_boundaries(svm, plt.gca())
##############################################################################
# We see the outer points in the dataset can be correctly classified, but
# we still struggle with the inner circle. But remember we have a circuit
# with many free parameters! It is reasonable to believe we can give
# values to those variational parameters which improve the overall accuracy
# of our SVC.
#
# Training the Quantum Embedding Kernel
# -------------------------------------
#
# To be able to train the Quantum Embedding Kernel we need some measure of
# how well it fits the dataset in question. Performing an exhaustive
# search in parameter space is not a good solution because it is very
# resource intensive, and since the accuracy is a discrete quantity we
# would not be able to detect small improvements.
#
# We can, however, resort to a more specialized measure, the
# *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the
# similarity predicted by the quantum kernel to the actual labels of the
# training data. It is based on *kernel alignment*, a similiarity measure
# between two kernels with given kernel matrices :math:`K_1` and
# :math:`K_2`:
#
# .. math::
# \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}.
#
# .. note::
# Seen from a more theoretical side, :math:`\operatorname{KA}`
# is nothing else than the cosine of the angle between the kernel
# matrices :math:`K_1` and :math:`K_2` if we see them as vectors
# in the space of matrices with the Hilbert-Schmidt (or
# Frobenius) scalar product
# :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This
# reinforces the geometric picture of how this measure relates
# to objects, namely two kernels, being aligned in a vector space.
#
# The training data enters the picture by defining an *ideal* kernel
# function that expresses the original labelling in the vector
# :math:`\boldsymbol{y}` by assigning to two datapoints the product
# of the corresponding labels:
#
# .. math::
# k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j.
#
# The assigned kernel is thus :math:`+1` if both datapoints lie in the
# same class and :math:`-1` otherwise and its kernel matrix is simply
# given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`.
# The kernel-target alignment is then defined as the kernel alignment
# of the kernel matrix :math:`K` generated by the
# quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`:
#
# .. math::
# \operatorname{KTA}_{\boldsymbol{y}}(K)
# = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}}
# = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N}
#
# where :math:`N` is the number of elements in :math:`\boldsymbol{y}`,
# that is the number of datapoints in the dataset.
#
# In summary, the kernel-target alignment effectively captures how well
# the kernel you chose reproduces the actual similarities of the data. It
# does have one drawback, however: having a high kernel-target alignment
# is only a necessary but not a sufficient condition for a good
# performance of the kernel [#Alignment]_. This means having good alignment is
# guaranteed for good performance, but optimal alignment will not always
# bring optimal training accuracy with it.
#
# Let's now come back to the actual implementation. PennyLane's
# ``kernels`` module allows you to easily evaluate the kernel
# target alignment:
kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True)
print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}")
##############################################################################
# Now let's code up an optimization loop and improve the kernel-target alignment!
#
# We will make use of regular gradient descent optimization. To speed up
# the optimization we will not use the entire training set to compute
# :math:`\operatorname{KTA}` but rather
# sample smaller subsets of the data at each step, we choose :math:`4`
# datapoints at random. Remember that PennyLane's built-in optimizer works
# to *minimize* the cost function that is given to it, which is why we
# have to multiply the kernel target alignment by :math:`-1` to actually
# *maximize* it in the process.
#
# .. note::
# Currently, the function ``qml.kernels.target_alignment`` is not
# differentiable yet, making it unfit for gradient descent optimization.
# We therefore first define a differentiable version of this function.
def target_alignment(
X,
Y,
kernel,
assume_normalized_kernel=False,
rescale_class_labels=True,
):
"""Kernel-target alignment between kernel and labels."""
K = qml.kernels.square_kernel_matrix(
X,
kernel,
assume_normalized_kernel=assume_normalized_kernel,
)
if rescale_class_labels:
nplus = np.count_nonzero(np.array(Y) == 1)
nminus = len(Y) - nplus
_Y = np.array([y / nplus if y == 1 else y / nminus for y in Y])
else:
_Y = np.array(Y)
T = np.outer(_Y, _Y)
inner_product = np.sum(K * T)
norm = np.sqrt(np.sum(K * K) * np.sum(T * T))
inner_product = inner_product / norm
return inner_product
params = init_params
opt = qml.GradientDescentOptimizer(0.2)
for i in range(500):
# Choose subset of datapoints to compute the KTA on.
subset = np.random.choice(list(range(len(X))), 4)
# Define the cost function for optimization
cost = lambda _params: -target_alignment(
X[subset],
Y[subset],
lambda x1, x2: kernel(x1, x2, _params),
assume_normalized_kernel=True,
)
# Optimization step
params = opt.step(cost, params)
# Report the alignment on the full dataset every 50 steps.
if (i + 1) % 50 == 0:
current_alignment = target_alignment(
X,
Y,
lambda x1, x2: kernel(x1, x2, params),
assume_normalized_kernel=True,
)
print(f"Step {i+1} - Alignment = {current_alignment:.3f}")
##############################################################################
# We want to assess the impact of training the parameters of the quantum
# kernel. Thus, let's build a second support vector classifier with the
# trained kernel:
# First create a kernel with the trained parameter baked into it.
trained_kernel = lambda x1, x2: kernel(x1, x2, params)
# Second create a kernel matrix function using the trained kernel.
trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel)
# Note that SVC expects the kernel argument to be a kernel matrix function.
svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y)
##############################################################################
# We expect to see an accuracy improvement vs. the SVM with random
# parameters:
accuracy_trained = accuracy(svm_trained, X, Y)
print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}")
##############################################################################
# We have now achieved perfect classification! 🎆
#
# Following on the results that SVM's have proven good generalisation
# behavior, it will be interesting to inspect the decision boundaries of
# our classifier:
trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca())
##############################################################################
# Indeed, we see that now not only every data instance falls within the
# correct class, but also that there are no strong artifacts that would make us
# distrust the model. In this sense, our approach benefits from both: on
# one hand it can adjust itself to the dataset, and on the other hand
# is not expected to suffer from bad generalisation.
#
# References
# ----------
#
# .. [#Training_QEKs]
#
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, and <NAME>.
# "Training Quantum Embedding Kernels on Near-Term Quantum Computers."
# `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021.
#
# .. [#Alignment]
#
# <NAME>, <NAME>, and <NAME>.
# "An overview of kernel alignment and its applications."
# `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
|
tg/release.py | TurboGears/tg2 | 812 | 2329 | """TurboGears project related information"""
version = "2.4.3"
description = "Next generation TurboGears"
long_description="""
TurboGears brings together a best of breed python tools
to create a flexible, full featured, and easy to use web
framework.
TurboGears 2 provides an integrated and well tested set of tools for
everything you need to build dynamic, database driven applications.
It provides a full range of tools for front end javascript
develeopment, back database development and everything in between:
* dynamic javascript powered widgets (ToscaWidgets2)
* automatic JSON generation from your controllers
* powerful, designer friendly XHTML based templating
* object or route based URL dispatching
* powerful Object Relational Mappers (SQLAlchemy)
The latest development version is available in the
`TurboGears Git repositories`_.
.. _TurboGears Git repositories:
https://github.com/TurboGears
"""
url="http://www.turbogears.org/"
author= "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and the TurboGears community"
email = "<EMAIL>"
copyright = """Copyright 2005-2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and contributors"""
license = "MIT"
|
applications/ChimeraApplication/tests/chimera_analysis_base_test.py | lkusch/Kratos | 778 | 2389 | <gh_stars>100-1000
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as UnitTest
import KratosMultiphysics.ChimeraApplication
from KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis import FluidChimeraAnalysis
class ChimeraAnalysisBaseTest(UnitTest.TestCase):
def setUp(self):
# Set to true to get post-process files for the test
self.print_output = False
def _run_test(self,settings_file_name):
model = KratosMultiphysics.Model()
with open(settings_file_name,'r') as settings_file:
settings = KratosMultiphysics.Parameters(settings_file.read())
# to check the results: add output settings block if needed
if self.print_output:
settings.AddValue("output_processes", KratosMultiphysics.Parameters(r'''{
"vtk_output" : [{
"python_module" : "vtk_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "VtkOutputProcess",
"help" : "This process writes postprocessing files for Paraview",
"Parameters" : {
"model_part_name" : "FluidModelPart.Parts_background_surface",
"output_control_type" : "step",
"output_frequency" : 1,
"file_format" : "ascii",
"output_precision" : 3,
"output_sub_model_parts" : false,
"write_deformed_configuration" : true,
"folder_name" : "test_vtk_output",
"save_output_files_in_folder" : true,
"nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],
"nodal_data_value_variables" : [],
"element_flags" : ["ACTIVE"],
"nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],
"element_data_value_variables" : [],
"condition_data_value_variables" : []
}
},{
"python_module" : "vtk_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "VtkOutputProcess",
"help" : "This process writes postprocessing files for Paraview",
"Parameters" : {
"model_part_name" : "FluidModelPart.Parts_patch_surface",
"output_control_type" : "step",
"output_frequency" : 1,
"file_format" : "ascii",
"output_precision" : 3,
"output_sub_model_parts" : false,
"write_deformed_configuration" : true,
"folder_name" : "test_vtk_output",
"save_output_files_in_folder" : true,
"nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],
"nodal_data_value_variables" : [],
"element_flags" : ["ACTIVE"],
"nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],
"element_data_value_variables" : [],
"condition_data_value_variables" : []
}
}]
}'''))
analysis = FluidChimeraAnalysis(model,settings)
analysis.Run()
|
exercises/pt/exc_01_03_01.py | Jette16/spacy-course | 2,085 | 2392 | # Importar a classe da língua inglesa (English) e criar um objeto nlp
from ____ import ____
nlp = ____
# Processar o texto
doc = ____("I like tree kangaroos and narwhals.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
|
image-generation/slegan/args.py | AaratiAkkapeddi/nnabla-examples | 228 | 2397 | <filename>image-generation/slegan/args.py
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_args(batch_size=8, image_size=256, max_iter=100000):
"""
Get command line arguments.
Arguments set the default values of command line arguments.
"""
import argparse
import os
description = "Example of Lightweight GAN."
parser = argparse.ArgumentParser(description)
parser.add_argument("-d", "--device-id", type=str, default="0",
help="Device id.")
parser.add_argument("-c", "--context", type=str, default="cudnn",
help="Context.")
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument("--img-path", type=str,
default="~/AnimalFace-dog",
help="Image path.")
parser.add_argument("--image-size", type=int, default=image_size,
help="Image size.")
parser.add_argument("--batch-size", "-b", type=int, default=batch_size,
help="Batch size.")
parser.add_argument("--max-iter", "-i", type=int, default=max_iter,
help="Max iterations.")
parser.add_argument("--save-interval", type=int, default=50000,
help="Interval for saving models.")
parser.add_argument("--test-interval", type=int, default=5000,
help="Interval for testing models.")
parser.add_argument("--latent", type=int, default=256,
help="Number of latent variables.")
parser.add_argument("--monitor-path", type=str, default="./result/tmp",
help="Monitor path.")
parser.add_argument("--model-load-path", type=str, default=".",
help="Path to load parameters from")
parser.add_argument("--train-samples", type=int, default=-1,
help="Number of data to be used. When -1 is set all data is used.")
parser.add_argument("--lr", type=float, default=2e-4,
help="Learning rate")
parser.add_argument("--aug-list", nargs="+",
default=["lrflip", "translation", "color"])
args = parser.parse_args()
return args
def save_args(args, mode="train"):
from nnabla import logger
import os
if not os.path.exists(args.monitor_path):
os.makedirs(args.monitor_path)
path = "{}/Arguments-{}.txt".format(args.monitor_path, mode)
logger.info("Arguments are saved to {}.".format(path))
with open(path, "w") as fp:
for k, v in sorted(vars(args).items()):
logger.info("{}={}".format(k, v))
fp.write("{}={}\n".format(k, v))
|
ignite/handlers/time_profilers.py | iamhardikat11/ignite | 4,119 | 2406 | import functools
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:
# compute on non-zero data:
data = data[data > 0]
out = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]
if len(data) > 1:
out += [
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
) # type: Union[int, torch.Tensor]
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)] # type: ignore[list-item]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = [] # type: List[float]
self.processing_times = [] # type: List[float]
self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered" # type: Union[str, float]
min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
mean = "None" # type: Union[str, float]
std = "None" # type: Union[str, float]
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
|
custom_components/snowtire/__init__.py | borys-kupar/smart-home | 128 | 2408 | #
# Copyright (c) 2020, Andrey "Limych" Khrolenok <<EMAIL>>
# Creative Commons BY-NC-SA 4.0 International Public License
# (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/)
#
"""
The Snowtire binary sensor.
For more details about this platform, please refer to the documentation at
https://github.com/Limych/ha-snowtire/
"""
|
tests/test_bayes_classifier.py | manishgit138/pomegranate | 3,019 | 2409 | from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2) |
tests/engine/knowledge_base.py | roshanmaskey/plaso | 1,253 | 2433 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the knowledge base."""
import unittest
from plaso.containers import artifacts
from plaso.engine import knowledge_base
from tests import test_lib as shared_test_lib
class KnowledgeBaseTest(shared_test_lib.BaseTestCase):
"""Tests for the knowledge base."""
# pylint: disable=protected-access
_MACOS_PATHS = [
'/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions',
('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/'
'apdfllckaahabafndbhieahigkjlhalf'),
'/private/var/log/system.log',
'/Users/frank/Library/Application Data/Google/Chrome/Default',
'/Users/hans/Library/Application Data/Google/Chrome/Default',
('/Users/frank/Library/Application Data/Google/Chrome/Default/'
'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'),
'/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions']
_MACOS_USERS = [
{'name': 'root', 'path': '/var/root', 'sid': '0'},
{'name': 'frank', 'path': '/Users/frank', 'sid': '4052'},
{'name': 'hans', 'path': '/Users/hans', 'sid': '4352'},
{'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}]
_WINDOWS_PATHS = [
'C:\\Users\\Dude\\SomeFolder\\Chrome\\Default\\Extensions',
('C:\\Users\\Dude\\SomeNoneStandardFolder\\Chrome\\Default\\Extensions\\'
'hmjkmjkepdijhoojdojkdfohbdgmmhki'),
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'blpcfgokakmgnkcojhhkbfbldkacnbeo'),
'C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions',
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'icppfcnhkcmnfdhfhphakoifcfokfdhg'),
'C:\\Windows\\System32',
'C:\\Stuff/with path separator\\Folder']
_WINDOWS_USERS = [
{'name': 'dude', 'path': 'C:\\Users\\dude', 'sid': 'S-1'},
{'name': 'frank', 'path': 'C:\\Users\\frank', 'sid': 'S-2'}]
def _SetUserAccounts(self, knowledge_base_object, users):
"""Sets the user accounts in the knowledge base.
Args:
knowledge_base_object (KnowledgeBase): knowledge base.
users (list[dict[str,str])): users.
"""
for user in users:
identifier = user.get('sid', user.get('uid', None))
if not identifier:
continue
user_account = artifacts.UserAccountArtifact(
identifier=identifier, user_directory=user.get('path', None),
username=user.get('name', None))
knowledge_base_object.AddUserAccount(user_account)
def testCodepageProperty(self):
"""Tests the codepage property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.codepage, 'cp1252')
def testHostnameProperty(self):
"""Tests the hostname property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.hostname, '')
def testOperatingSystemProperty(self):
"""Tests the operating_system property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertIsNone(operating_system)
knowledge_base_object.SetValue('operating_system', 'Windows')
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertEqual(operating_system, 'Windows')
def testTimezoneProperty(self):
"""Tests the timezone property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.timezone.zone, 'UTC')
def testUserAccountsProperty(self):
"""Tests the user accounts property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(len(knowledge_base_object.user_accounts), 0)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertEqual(len(knowledge_base_object.user_accounts), 1)
def testYearProperty(self):
"""Tests the year property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.year, 0)
def testAddUserAccount(self):
"""Tests the AddUserAccount function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
with self.assertRaises(KeyError):
knowledge_base_object.AddUserAccount(user_account)
def testAddEnvironmentVariable(self):
"""Tests the AddEnvironmentVariable function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
with self.assertRaises(KeyError):
knowledge_base_object.AddEnvironmentVariable(environment_variable)
def testGetEnvironmentVariable(self):
"""Tests the GetEnvironmentVariable functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'SystemRoot')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'sYsTeMrOoT')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'Bogus')
self.assertIsNone(test_environment_variable)
def testGetEnvironmentVariables(self):
"""Tests the GetEnvironmentVariables function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='WinDir', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variables = knowledge_base_object.GetEnvironmentVariables()
self.assertEqual(len(environment_variables), 2)
def testGetHostname(self):
"""Tests the GetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, '')
# TODO: add tests for GetMountPoint.
def testGetSourceConfigurationArtifacts(self):
"""Tests the GetSourceConfigurationArtifacts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
source_configurations = (
knowledge_base_object.GetSourceConfigurationArtifacts())
self.assertEqual(len(source_configurations), 1)
self.assertIsNotNone(source_configurations[0])
system_configuration = source_configurations[0].system_configuration
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
def testGetSystemConfigurationArtifact(self):
"""Tests the _GetSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
system_configuration = (
knowledge_base_object._GetSystemConfigurationArtifact())
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
# TODO: add tests for GetTextPrepend.
def testGetUsernameByIdentifier(self):
"""Tests the GetUsernameByIdentifier function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
usename = knowledge_base_object.GetUsernameByIdentifier('1000')
self.assertEqual(usename, 'testuser')
usename = knowledge_base_object.GetUsernameByIdentifier(1000)
self.assertEqual(usename, '')
usename = knowledge_base_object.GetUsernameByIdentifier('1001')
self.assertEqual(usename, '')
def testGetUsernameForPath(self):
"""Tests the GetUsernameForPath function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[4])
self.assertEqual(username, 'hans')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertIsNone(username)
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[2])
self.assertEqual(username, 'frank')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[2])
self.assertIsNone(username)
def testGetSetValue(self):
"""Tests the Get and SetValue functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
expected_value = 'test value'
knowledge_base_object.SetValue('Test', expected_value)
value = knowledge_base_object.GetValue('Test')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('tEsT')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('Bogus')
self.assertIsNone(value)
def testHasUserAccounts(self):
"""Tests the HasUserAccounts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertFalse(knowledge_base_object.HasUserAccounts())
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertTrue(knowledge_base_object.HasUserAccounts())
def testReadSystemConfigurationArtifact(self):
"""Tests the ReadSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.hostname = artifacts.HostnameArtifact(
name='myhost.mydomain')
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
system_configuration.user_accounts.append(user_account)
knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration)
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, 'myhost.mydomain')
def testSetActiveSession(self):
"""Tests the SetActiveSession function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a')
self.assertEqual(
knowledge_base_object._active_session,
'ddda05bedf324cbd99fa8c24b8a0037a')
knowledge_base_object.SetActiveSession(
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
self.assertEqual(
knowledge_base_object._active_session,
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
def testSetCodepage(self):
"""Tests the SetCodepage function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetCodepage('cp1252')
with self.assertRaises(ValueError):
knowledge_base_object.SetCodepage('bogus')
def testSetHostname(self):
"""Tests the SetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
# TODO: add tests for SetMountPoint.
# TODO: add tests for SetTextPrepend.
def testSetTimeZone(self):
"""Tests the SetTimeZone function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
time_zone_artifact = artifacts.TimeZoneArtifact(
localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112',
name='Eastern Standard Time')
knowledge_base_object.AddAvailableTimeZone(time_zone_artifact)
# Set an IANA time zone name.
knowledge_base_object.SetTimeZone('Europe/Zurich')
self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich')
# Set a Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern Standard Time')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a localized Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern (standaardtijd)')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a MUI form Windows time zone name.
knowledge_base_object.SetTimeZone('@tzres.dll,-112')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
with self.assertRaises(ValueError):
knowledge_base_object.SetTimeZone('Bogus')
if __name__ == '__main__':
unittest.main()
|
Imaging/Core/Testing/Python/TestHSVToRGB.py | forestGzh/VTK | 1,755 | 2454 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Use the painter to draw using colors.
# This is not a pipeline object. It will support pipeline objects.
# Please do not use this object directly.
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetNumberOfScalarComponents(3)
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,320,0,320,0,0)
imageCanvas.SetDrawColor(0,0,0)
imageCanvas.FillBox(0,511,0,511)
# r, g, b
imageCanvas.SetDrawColor(255,0,0)
imageCanvas.FillBox(0,50,0,100)
imageCanvas.SetDrawColor(128,128,0)
imageCanvas.FillBox(50,100,0,100)
imageCanvas.SetDrawColor(0,255,0)
imageCanvas.FillBox(100,150,0,100)
imageCanvas.SetDrawColor(0,128,128)
imageCanvas.FillBox(150,200,0,100)
imageCanvas.SetDrawColor(0,0,255)
imageCanvas.FillBox(200,250,0,100)
imageCanvas.SetDrawColor(128,0,128)
imageCanvas.FillBox(250,300,0,100)
# intensity scale
imageCanvas.SetDrawColor(5,5,5)
imageCanvas.FillBox(0,50,110,210)
imageCanvas.SetDrawColor(55,55,55)
imageCanvas.FillBox(50,100,110,210)
imageCanvas.SetDrawColor(105,105,105)
imageCanvas.FillBox(100,150,110,210)
imageCanvas.SetDrawColor(155,155,155)
imageCanvas.FillBox(150,200,110,210)
imageCanvas.SetDrawColor(205,205,205)
imageCanvas.FillBox(200,250,110,210)
imageCanvas.SetDrawColor(255,255,255)
imageCanvas.FillBox(250,300,110,210)
# saturation scale
imageCanvas.SetDrawColor(245,0,0)
imageCanvas.FillBox(0,50,220,320)
imageCanvas.SetDrawColor(213,16,16)
imageCanvas.FillBox(50,100,220,320)
imageCanvas.SetDrawColor(181,32,32)
imageCanvas.FillBox(100,150,220,320)
imageCanvas.SetDrawColor(149,48,48)
imageCanvas.FillBox(150,200,220,320)
imageCanvas.SetDrawColor(117,64,64)
imageCanvas.FillBox(200,250,220,320)
imageCanvas.SetDrawColor(85,80,80)
imageCanvas.FillBox(250,300,220,320)
convert = vtk.vtkImageRGBToHSV()
convert.SetInputConnection(imageCanvas.GetOutputPort())
convertBack = vtk.vtkImageHSVToRGB()
convertBack.SetInputConnection(convert.GetOutputPort())
cast = vtk.vtkImageCast()
cast.SetInputConnection(convertBack.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
cast.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(convertBack.GetOutputPort())
#viewer SetInputConnection [imageCanvas GetOutputPort]
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
viewer.SetSize(320,320)
viewer.Render()
# --- end of script --
|
tests/test_handler_surface_distance.py | dyollb/MONAI | 2,971 | 2456 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import SurfaceDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
class TestHandlerSurfaceDistance(unittest.TestCase):
# TODO test multi node Surface Distance
def test_compute(self):
sur_metric = SurfaceDistance(include_background=True)
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
sur_metric.attach(engine, "surface_distance")
y_pred, y = TEST_SAMPLE_1
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4)
y_pred, y = TEST_SAMPLE_2
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4)
y_pred, y = TEST_SAMPLE_3
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
y_pred, y = TEST_SAMPLE_4
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
def test_shape_mismatch(self):
sur_metric = SurfaceDistance(include_background=True)
with self.assertRaises((AssertionError, ValueError)):
y_pred = TEST_SAMPLE_1[0]
y = torch.ones((1, 1, 10, 10, 10))
sur_metric.update([y_pred, y])
if __name__ == "__main__":
unittest.main()
|
astroplan/constraints.py | edose/astroplan | 160 | 2488 | <gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Specify and constraints to determine which targets are observable for
an observer.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
from abc import ABCMeta, abstractmethod
import datetime
import time
import warnings
# Third-party
from astropy.time import Time
import astropy.units as u
from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord
from astropy import table
import numpy as np
from numpy.lib.stride_tricks import as_strided
# Package
from .moon import moon_illumination
from .utils import time_grid_from_range
from .target import get_skycoord
__all__ = ["AltitudeConstraint", "AirmassConstraint", "AtNightConstraint",
"is_observable", "is_always_observable", "time_grid_from_range",
"GalacticLatitudeConstraint", "SunSeparationConstraint",
"MoonSeparationConstraint", "MoonIlluminationConstraint",
"LocalTimeConstraint", "PrimaryEclipseConstraint",
"SecondaryEclipseConstraint", "Constraint", "TimeConstraint",
"observability_table", "months_observable", "max_best_rescale",
"min_best_rescale", "PhaseConstraint", "is_event_observable"]
_current_year = time.localtime().tm_year # needed for backward compatibility
_current_year_time_range = Time( # needed for backward compatibility
[str(_current_year) + '-01-01',
str(_current_year) + '-12-31']
)
def _make_cache_key(times, targets):
"""
Make a unique key to reference this combination of ``times`` and ``targets``.
Often, we wish to store expensive calculations for a combination of
``targets`` and ``times`` in a cache on an ``observer``` object. This
routine will provide an appropriate, hashable, key to store these
calculations in a dictionary.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
targets : `~astropy.coordinates.SkyCoord`
Target or list of targets.
Returns
-------
cache_key : tuple
A hashable tuple for use as a cache key
"""
# make a tuple from times
try:
timekey = tuple(times.jd) + times.shape
except BaseException: # must be scalar
timekey = (times.jd,)
# make hashable thing from targets coords
try:
if hasattr(targets, 'frame'):
# treat as a SkyCoord object. Accessing the longitude
# attribute of the frame data should be unique and is
# quicker than accessing the ra attribute.
targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape
else:
# assume targets is a string.
targkey = (targets,)
except BaseException:
targkey = (targets.frame.data.lon,)
return timekey + targkey
def _get_altaz(times, observer, targets, force_zero_pressure=False):
"""
Calculate alt/az for ``target`` at times linearly spaced between
the two times in ``time_range`` with grid spacing ``time_resolution``
for ``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets.
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``.
force_zero_pressure : bool
Forcefully use 0 pressure.
Returns
-------
altaz_dict : dict
Dictionary containing two key-value pairs. (1) 'times' contains the
times for the alt/az computations, (2) 'altaz' contains the
corresponding alt/az coordinates at those times.
"""
if not hasattr(observer, '_altaz_cache'):
observer._altaz_cache = {}
# convert times, targets to tuple for hashing
aakey = _make_cache_key(times, targets)
if aakey not in observer._altaz_cache:
try:
if force_zero_pressure:
observer_old_pressure = observer.pressure
observer.pressure = 0
altaz = observer.altaz(times, targets, grid_times_targets=False)
observer._altaz_cache[aakey] = dict(times=times,
altaz=altaz)
finally:
if force_zero_pressure:
observer.pressure = observer_old_pressure
return observer._altaz_cache[aakey]
def _get_moon_data(times, observer, force_zero_pressure=False):
"""
Calculate moon altitude az and illumination for an array of times for
``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``.
force_zero_pressure : bool
Forcefully use 0 pressure.
Returns
-------
moon_dict : dict
Dictionary containing three key-value pairs. (1) 'times' contains the
times for the computations, (2) 'altaz' contains the
corresponding alt/az coordinates at those times and (3) contains
the moon illumination for those times.
"""
if not hasattr(observer, '_moon_cache'):
observer._moon_cache = {}
# convert times to tuple for hashing
aakey = _make_cache_key(times, 'moon')
if aakey not in observer._moon_cache:
try:
if force_zero_pressure:
observer_old_pressure = observer.pressure
observer.pressure = 0
altaz = observer.moon_altaz(times)
illumination = np.array(moon_illumination(times))
observer._moon_cache[aakey] = dict(times=times,
illum=illumination,
altaz=altaz)
finally:
if force_zero_pressure:
observer.pressure = observer_old_pressure
return observer._moon_cache[aakey]
def _get_meridian_transit_times(times, observer, targets):
"""
Calculate next meridian transit for an array of times for ``targets`` and
``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
Returns
-------
time_dict : dict
Dictionary containing a key-value pair. 'times' contains the
meridian_transit times.
"""
if not hasattr(observer, '_meridian_transit_cache'):
observer._meridian_transit_cache = {}
# convert times to tuple for hashing
aakey = _make_cache_key(times, targets)
if aakey not in observer._meridian_transit_cache:
meridian_transit_times = observer.target_meridian_transit_time(times, targets)
observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times)
return observer._meridian_transit_cache[aakey]
@abstractmethod
class Constraint(object):
"""
Abstract class for objects defining observational constraints.
"""
__metaclass__ = ABCMeta
def __call__(self, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour,
grid_times_targets=False):
"""
Compute the constraint for this class
Parameters
----------
observer : `~astroplan.Observer`
the observation location from which to apply the constraints
targets : sequence of `~astroplan.Target`
The targets on which to apply the constraints.
times : `~astropy.time.Time`
The times to compute the constraint.
WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET?
time_range : `~astropy.time.Time` (length = 2)
Lower and upper bounds on time sequence.
time_grid_resolution : `~astropy.units.quantity`
Time-grid spacing
grid_times_targets : bool
if True, grids the constraint result with targets along the first
index and times along the second. Otherwise, we rely on broadcasting
the shapes together using standard numpy rules.
Returns
-------
constraint_result : 1D or 2D array of float or bool
The constraints. If 2D with targets along the first index and times along
the second.
"""
if times is None and time_range is not None:
times = time_grid_from_range(time_range,
time_resolution=time_grid_resolution)
if grid_times_targets:
targets = get_skycoord(targets)
# TODO: these broadcasting operations are relatively slow
# but there is potential for huge speedup if the end user
# disables gridding and re-shapes the coords themselves
# prior to evaluating multiple constraints.
if targets.isscalar:
# ensure we have a (1, 1) shape coord
targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis]
else:
targets = targets[..., np.newaxis]
times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False)
result = self.compute_constraint(times, observer, targets)
# make sure the output has the same shape as would result from
# broadcasting times and targets against each other
if targets is not None:
# broadcasting times v targets is slow due to
# complex nature of these objects. We make
# to simple numpy arrays of the same shape and
# broadcast these to find the correct shape
shp1, shp2 = times.shape, targets.shape
x = np.array([1])
a = as_strided(x, shape=shp1, strides=[0] * len(shp1))
b = as_strided(x, shape=shp2, strides=[0] * len(shp2))
output_shape = np.broadcast(a, b).shape
if output_shape != np.array(result).shape:
result = np.broadcast_to(result, output_shape)
return result
@abstractmethod
def compute_constraint(self, times, observer, targets):
"""
Actually do the real work of computing the constraint. Subclasses
override this.
Parameters
----------
times : `~astropy.time.Time`
The times to compute the constraint
observer : `~astroplan.Observer`
the observaton location from which to apply the constraints
targets : sequence of `~astroplan.Target`
The targets on which to apply the constraints.
Returns
-------
constraint_result : 2D array of float or bool
The constraints, with targets along the first index and times along
the second.
"""
# Should be implemented on each subclass of Constraint
raise NotImplementedError
class AltitudeConstraint(Constraint):
"""
Constrain the altitude of the target.
.. note::
This can misbehave if you try to constrain negative altitudes, as
the `~astropy.coordinates.AltAz` frame tends to mishandle negative
Parameters
----------
min : `~astropy.units.Quantity` or `None`
Minimum altitude of the target (inclusive). `None` indicates no limit.
max : `~astropy.units.Quantity` or `None`
Maximum altitude of the target (inclusive). `None` indicates no limit.
boolean_constraint : bool
If True, the constraint is treated as a boolean (True for within the
limits and False for outside). If False, the constraint returns a
float on [0, 1], where 0 is the min altitude and 1 is the max.
"""
def __init__(self, min=None, max=None, boolean_constraint=True):
if min is None:
self.min = -90*u.deg
else:
self.min = min
if max is None:
self.max = 90*u.deg
else:
self.max = max
self.boolean_constraint = boolean_constraint
def compute_constraint(self, times, observer, targets):
cached_altaz = _get_altaz(times, observer, targets)
alt = cached_altaz['altaz'].alt
if self.boolean_constraint:
lowermask = self.min <= alt
uppermask = alt <= self.max
return lowermask & uppermask
else:
return max_best_rescale(alt, self.min, self.max)
class AirmassConstraint(AltitudeConstraint):
"""
Constrain the airmass of a target.
In the current implementation the airmass is approximated by the secant of
the zenith angle.
.. note::
The ``max`` and ``min`` arguments appear in the order (max, min)
in this initializer to support the common case for users who care
about the upper limit on the airmass (``max``) and not the lower
limit.
Parameters
----------
max : float or `None`
Maximum airmass of the target. `None` indicates no limit.
min : float or `None`
Minimum airmass of the target. `None` indicates no limit.
boolean_contstraint : bool
Examples
--------
To create a constraint that requires the airmass be "better than 2",
i.e. at a higher altitude than airmass=2::
AirmassConstraint(2)
"""
def __init__(self, max=None, min=1, boolean_constraint=True):
self.min = min
self.max = max
self.boolean_constraint = boolean_constraint
def compute_constraint(self, times, observer, targets):
cached_altaz = _get_altaz(times, observer, targets)
secz = cached_altaz['altaz'].secz.value
if self.boolean_constraint:
if self.min is None and self.max is not None:
mask = secz <= self.max
elif self.max is None and self.min is not None:
mask = self.min <= secz
elif self.min is not None and self.max is not None:
mask = (self.min <= secz) & (secz <= self.max)
else:
raise ValueError("No max and/or min specified in "
"AirmassConstraint.")
return mask
else:
if self.max is None:
raise ValueError("Cannot have a float AirmassConstraint if max is None.")
else:
mx = self.max
mi = 1 if self.min is None else self.min
# values below 1 should be disregarded
return min_best_rescale(secz, mi, mx, less_than_min=0)
class AtNightConstraint(Constraint):
"""
Constrain the Sun to be below ``horizon``.
"""
@u.quantity_input(horizon=u.deg)
def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True):
"""
Parameters
----------
max_solar_altitude : `~astropy.units.Quantity`
The altitude of the sun below which it is considered to be "night"
(inclusive).
force_pressure_zero : bool (optional)
Force the pressure to zero for solar altitude calculations. This
avoids errors in the altitude of the Sun that can occur when the
Sun is below the horizon and the corrections for atmospheric
refraction return nonsense values.
"""
self.max_solar_altitude = max_solar_altitude
self.force_pressure_zero = force_pressure_zero
@classmethod
def twilight_civil(cls, **kwargs):
"""
Consider nighttime as time between civil twilights (-6 degrees).
"""
return cls(max_solar_altitude=-6*u.deg, **kwargs)
@classmethod
def twilight_nautical(cls, **kwargs):
"""
Consider nighttime as time between nautical twilights (-12 degrees).
"""
return cls(max_solar_altitude=-12*u.deg, **kwargs)
@classmethod
def twilight_astronomical(cls, **kwargs):
"""
Consider nighttime as time between astronomical twilights (-18 degrees).
"""
return cls(max_solar_altitude=-18*u.deg, **kwargs)
def _get_solar_altitudes(self, times, observer, targets):
if not hasattr(observer, '_altaz_cache'):
observer._altaz_cache = {}
aakey = _make_cache_key(times, 'sun')
if aakey not in observer._altaz_cache:
try:
if self.force_pressure_zero:
observer_old_pressure = observer.pressure
observer.pressure = 0
# find solar altitude at these times
altaz = observer.altaz(times, get_sun(times))
altitude = altaz.alt
# cache the altitude
observer._altaz_cache[aakey] = dict(times=times,
altitude=altitude)
finally:
if self.force_pressure_zero:
observer.pressure = observer_old_pressure
else:
altitude = observer._altaz_cache[aakey]['altitude']
return altitude
def compute_constraint(self, times, observer, targets):
solar_altitude = self._get_solar_altitudes(times, observer, targets)
mask = solar_altitude <= self.max_solar_altitude
return mask
class GalacticLatitudeConstraint(Constraint):
"""
Constrain the distance between the Galactic plane and some targets.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable Galactic latitude of target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable Galactic latitude of target (inclusive).
`None` indicates no limit.
"""
self.min = min
self.max = max
def compute_constraint(self, times, observer, targets):
separation = abs(targets.transform_to(Galactic).b)
if self.min is None and self.max is not None:
mask = self.max >= separation
elif self.max is None and self.min is not None:
mask = self.min <= separation
elif self.min is not None and self.max is not None:
mask = ((self.min <= separation) & (separation <= self.max))
else:
raise ValueError("No max and/or min specified in "
"GalacticLatitudeConstraint.")
return mask
class SunSeparationConstraint(Constraint):
"""
Constrain the distance between the Sun and some targets.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable separation between Sun and target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Maximum acceptable separation between Sun and target (inclusive).
`None` indicates no limit.
"""
self.min = min
self.max = max
def compute_constraint(self, times, observer, targets):
# use get_body rather than get sun here, since
# it returns the Sun's coordinates in an observer
# centred frame, so the separation is as-seen
# by the observer.
# 'get_sun' returns ICRS coords.
sun = get_body('sun', times, location=observer.location)
solar_separation = sun.separation(targets)
if self.min is None and self.max is not None:
mask = self.max >= solar_separation
elif self.max is None and self.min is not None:
mask = self.min <= solar_separation
elif self.min is not None and self.max is not None:
mask = ((self.min <= solar_separation) &
(solar_separation <= self.max))
else:
raise ValueError("No max and/or min specified in "
"SunSeparationConstraint.")
return mask
class MoonSeparationConstraint(Constraint):
"""
Constrain the distance between the Earth's moon and some targets.
"""
def __init__(self, min=None, max=None, ephemeris=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable separation between moon and target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Maximum acceptable separation between moon and target (inclusive).
`None` indicates no limit.
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
"""
self.min = min
self.max = max
self.ephemeris = ephemeris
def compute_constraint(self, times, observer, targets):
# removed the location argument here, which causes small <1 deg
# innacuracies, but it is needed until astropy PR #5897 is released
# which should be astropy 1.3.2
moon = get_moon(times,
ephemeris=self.ephemeris)
# note to future editors - the order matters here
# moon.separation(targets) is NOT the same as targets.separation(moon)
# the former calculates the separation in the frame of the moon coord
# which is GCRS, and that is what we want.
moon_separation = moon.separation(targets)
if self.min is None and self.max is not None:
mask = self.max >= moon_separation
elif self.max is None and self.min is not None:
mask = self.min <= moon_separation
elif self.min is not None and self.max is not None:
mask = ((self.min <= moon_separation) &
(moon_separation <= self.max))
else:
raise ValueError("No max and/or min specified in "
"MoonSeparationConstraint.")
return mask
class MoonIlluminationConstraint(Constraint):
"""
Constrain the fractional illumination of the Earth's moon.
Constraint is also satisfied if the Moon has set.
"""
def __init__(self, min=None, max=None, ephemeris=None):
"""
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
`~astropy.coordinates.solar_system_ephemeris` (which is
set to 'builtin' by default).
"""
self.min = min
self.max = max
self.ephemeris = ephemeris
@classmethod
def dark(cls, min=None, max=0.25, **kwargs):
"""
initialize a `~astroplan.constraints.MoonIlluminationConstraint`
with defaults of no minimum and a maximum of 0.25
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
"""
return cls(min, max, **kwargs)
@classmethod
def grey(cls, min=0.25, max=0.65, **kwargs):
"""
initialize a `~astroplan.constraints.MoonIlluminationConstraint`
with defaults of a minimum of 0.25 and a maximum of 0.65
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
"""
return cls(min, max, **kwargs)
@classmethod
def bright(cls, min=0.65, max=None, **kwargs):
"""
initialize a `~astroplan.constraints.MoonIlluminationConstraint`
with defaults of a minimum of 0.65 and no maximum
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
"""
return cls(min, max, **kwargs)
def compute_constraint(self, times, observer, targets):
# first is the moon up?
cached_moon = _get_moon_data(times, observer)
moon_alt = cached_moon['altaz'].alt
moon_down_mask = moon_alt < 0
moon_up_mask = moon_alt >= 0
illumination = cached_moon['illum']
if self.min is None and self.max is not None:
mask = (self.max >= illumination) | moon_down_mask
elif self.max is None and self.min is not None:
mask = (self.min <= illumination) & moon_up_mask
elif self.min is not None and self.max is not None:
mask = ((self.min <= illumination) &
(illumination <= self.max)) & moon_up_mask
else:
raise ValueError("No max and/or min specified in "
"MoonSeparationConstraint.")
return mask
class LocalTimeConstraint(Constraint):
"""
Constrain the observable hours.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~datetime.time`
Earliest local time (inclusive). `None` indicates no limit.
max : `~datetime.time`
Latest local time (inclusive). `None` indicates no limit.
Examples
--------
Constrain the observations to targets that are observable between
23:50 and 04:08 local time:
>>> from astroplan import Observer
>>> from astroplan.constraints import LocalTimeConstraint
>>> import datetime as dt
>>> subaru = Observer.at_site("Subaru", timezone="US/Hawaii")
>>> # bound times between 23:50 and 04:08 local Hawaiian time
>>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8))
"""
self.min = min
self.max = max
if self.min is None and self.max is None:
raise ValueError("You must at least supply either a minimum or a maximum time.")
if self.min is not None:
if not isinstance(self.min, datetime.time):
raise TypeError("Time limits must be specified as datetime.time objects.")
if self.max is not None:
if not isinstance(self.max, datetime.time):
raise TypeError("Time limits must be specified as datetime.time objects.")
def compute_constraint(self, times, observer, targets):
timezone = None
# get timezone from time objects, or from observer
if self.min is not None:
timezone = self.min.tzinfo
elif self.max is not None:
timezone = self.max.tzinfo
if timezone is None:
timezone = observer.timezone
if self.min is not None:
min_time = self.min
else:
min_time = self.min = datetime.time(0, 0, 0)
if self.max is not None:
max_time = self.max
else:
max_time = datetime.time(23, 59, 59)
# If time limits occur on same day:
if min_time < max_time:
try:
mask = np.array([min_time <= t.time() <= max_time for t in times.datetime])
except BaseException: # use np.bool so shape queries don't cause problems
mask = np.bool_(min_time <= times.datetime.time() <= max_time)
# If time boundaries straddle midnight:
else:
try:
mask = np.array([(t.time() >= min_time) or
(t.time() <= max_time) for t in times.datetime])
except BaseException:
mask = np.bool_((times.datetime.time() >= min_time) or
(times.datetime.time() <= max_time))
return mask
class TimeConstraint(Constraint):
"""Constrain the observing time to be within certain time limits.
An example use case for this class would be to associate an acceptable
time range with a specific observing block. This can be useful if not
all observing blocks are valid over the time limits used in calls
to `is_observable` or `is_always_observable`.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.time.Time`
Earliest time (inclusive). `None` indicates no limit.
max : `~astropy.time.Time`
Latest time (inclusive). `None` indicates no limit.
Examples
--------
Constrain the observations to targets that are observable between
2016-03-28 and 2016-03-30:
>>> from astroplan import Observer
>>> from astropy.time import Time
>>> subaru = Observer.at_site("Subaru")
>>> t1 = Time("2016-03-28T12:00:00")
>>> t2 = Time("2016-03-30T12:00:00")
>>> constraint = TimeConstraint(t1,t2)
"""
self.min = min
self.max = max
if self.min is None and self.max is None:
raise ValueError("You must at least supply either a minimum or a "
"maximum time.")
if self.min is not None:
if not isinstance(self.min, Time):
raise TypeError("Time limits must be specified as "
"astropy.time.Time objects.")
if self.max is not None:
if not isinstance(self.max, Time):
raise TypeError("Time limits must be specified as "
"astropy.time.Time objects.")
def compute_constraint(self, times, observer, targets):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
min_time = Time("1950-01-01T00:00:00") if self.min is None else self.min
max_time = Time("2120-01-01T00:00:00") if self.max is None else self.max
mask = np.logical_and(times > min_time, times < max_time)
return mask
class PrimaryEclipseConstraint(Constraint):
"""
Constrain observations to times during primary eclipse.
"""
def __init__(self, eclipsing_system):
"""
Parameters
----------
eclipsing_system : `~astroplan.periodic.EclipsingSystem`
System which must be in primary eclipse.
"""
self.eclipsing_system = eclipsing_system
def compute_constraint(self, times, observer=None, targets=None):
mask = self.eclipsing_system.in_primary_eclipse(times)
return mask
class SecondaryEclipseConstraint(Constraint):
"""
Constrain observations to times during secondary eclipse.
"""
def __init__(self, eclipsing_system):
"""
Parameters
----------
eclipsing_system : `~astroplan.periodic.EclipsingSystem`
System which must be in secondary eclipse.
"""
self.eclipsing_system = eclipsing_system
def compute_constraint(self, times, observer=None, targets=None):
mask = self.eclipsing_system.in_secondary_eclipse(times)
return mask
class PhaseConstraint(Constraint):
"""
Constrain observations to times in some range of phases for a periodic event
(e.g.~transiting exoplanets, eclipsing binaries).
"""
def __init__(self, periodic_event, min=None, max=None):
"""
Parameters
----------
periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass
System on which to compute the phase. For example, the system
could be an eclipsing or non-eclipsing binary, or exoplanet system.
min : float (optional)
Minimum phase (inclusive) on interval [0, 1). Default is zero.
max : float (optional)
Maximum phase (inclusive) on interval [0, 1). Default is one.
Examples
--------
To constrain observations on orbital phases between 0.4 and 0.6,
>>> from astroplan import PeriodicEvent
>>> from astropy.time import Time
>>> import astropy.units as u
>>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day)
>>> constraint = PhaseConstraint(binary, min=0.4, max=0.6)
The minimum and maximum phase must be described on the interval [0, 1).
To constrain observations on orbital phases between 0.6 and 1.2, for
example, you should subtract one from the second number:
>>> constraint = PhaseConstraint(binary, min=0.6, max=0.2)
"""
self.periodic_event = periodic_event
if (min < 0) or (min > 1) or (max < 0) or (max > 1):
raise ValueError('The minimum of the PhaseConstraint must be within'
' the interval [0, 1).')
self.min = min if min is not None else 0.0
self.max = max if max is not None else 1.0
def compute_constraint(self, times, observer=None, targets=None):
phase = self.periodic_event.phase(times)
mask = np.where(self.max > self.min,
(phase >= self.min) & (phase <= self.max),
(phase >= self.min) | (phase <= self.max))
return mask
def is_always_observable(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
A function to determine whether ``targets`` are always observable throughout
``time_range`` given constraints in the ``constraints_list`` for a
particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
ever_observable : list
List of booleans of same length as ``targets`` for whether or not each
target is observable in the time range given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
return np.all(constraint_arr, axis=1)
def is_observable(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
Determines if the ``targets`` are observable during ``time_range`` given
constraints in ``constraints_list`` for a particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
ever_observable : list
List of booleans of same length as ``targets`` for whether or not each
target is ever observable in the time range given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
return np.any(constraint_arr, axis=1)
def is_event_observable(constraints, observer, target, times=None,
times_ingress_egress=None):
"""
Determines if the ``target`` is observable at each time in ``times``, given
constraints in ``constraints`` for a particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target
times : `~astropy.time.Time` (optional)
Array of mid-event times on which to test the constraints
times_ingress_egress : `~astropy.time.Time` (optional)
Array of ingress and egress times for ``N`` events, with shape
(``N``, 2).
Returns
-------
event_observable : `~numpy.ndarray`
Array of booleans of same length as ``times`` for whether or not the
target is ever observable at each time, given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
if times is not None:
applied_constraints = [constraint(observer, target, times=times,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
else:
times_ing = times_ingress_egress[:, 0]
times_egr = times_ingress_egress[:, 1]
applied_constraints_ing = [constraint(observer, target, times=times_ing,
grid_times_targets=True)
for constraint in constraints]
applied_constraints_egr = [constraint(observer, target, times=times_egr,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing),
np.logical_and.reduce(applied_constraints_egr))
return constraint_arr
def months_observable(constraints, observer, targets,
time_range=_current_year_time_range,
time_grid_resolution=0.5*u.hour):
"""
Determines which month the specified ``targets`` are observable for a
specific ``observer``, given the supplied ``constraints``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence
If ``time_range`` is not specified, defaults to current year (localtime)
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
observable_months : list
List of sets of unique integers representing each month that a target is
observable, one set per target. These integers are 1-based so that
January maps to 1, February maps to 2, etc.
"""
# TODO: This method could be sped up a lot by dropping to the trigonometric
# altitude calculations.
if not hasattr(constraints, '__len__'):
constraints = [constraints]
times = time_grid_from_range(time_range, time_grid_resolution)
# TODO: This method could be sped up a lot by dropping to the trigonometric
# altitude calculations.
applied_constraints = [constraint(observer, targets,
times=times,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
months_observable = []
for target, observable in zip(targets, constraint_arr):
s = set([t.datetime.month for t in times[observable]])
months_observable.append(s)
return months_observable
def observability_table(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
Creates a table with information about observability for all the ``targets``
over the requested ``time_range``, given the constraints in
``constraints_list`` for ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`. If a single (scalar) time, the table
will be for a 24 hour period centered on that time.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
observability_table : `~astropy.table.Table`
A Table containing the observability information for each of the
``targets``. The table contains four columns with information about the
target and it's observability: ``'target name'``, ``'ever observable'``,
``'always observable'``, and ``'fraction of time observable'``. The
column ``'time observable'`` will also be present if the ``time_range``
is given as a scalar. It also contains metadata entries ``'times'``
(with an array of all the times), ``'observer'`` (the
`~astroplan.Observer` object), and ``'constraints'`` (containing the
supplied ``constraints``).
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
is_24hr_table = False
if hasattr(time_range, 'isscalar') and time_range.isscalar:
time_range = (time_range-12*u.hour, time_range+12*u.hour)
is_24hr_table = True
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
colnames = ['target name', 'ever observable', 'always observable',
'fraction of time observable']
target_names = [target.name for target in targets]
ever_obs = np.any(constraint_arr, axis=1)
always_obs = np.all(constraint_arr, axis=1)
frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1]
tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs,
frac_obs])
if times is None and time_range is not None:
times = time_grid_from_range(time_range,
time_resolution=time_grid_resolution)
if is_24hr_table:
tab['time observable'] = tab['fraction of time observable'] * 24*u.hour
tab.meta['times'] = times.datetime
tab.meta['observer'] = observer
tab.meta['constraints'] = constraints
return tab
def min_best_rescale(vals, min_val, max_val, less_than_min=1):
"""
rescales an input array ``vals`` to be a score (between zero and one),
where the ``min_val`` goes to one, and the ``max_val`` goes to zero.
Parameters
----------
vals : array-like
the values that need to be rescaled to be between 0 and 1
min_val : float
worst acceptable value (rescales to 0)
max_val : float
best value cared about (rescales to 1)
less_than_min : 0 or 1
what is returned for ``vals`` below ``min_val``. (in some cases
anything less than ``min_val`` should also return one,
in some cases it should return zero)
Returns
-------
array of floats between 0 and 1 inclusive rescaled so that
``vals`` equal to ``max_val`` equal 0 and those equal to
``min_val`` equal 1
Examples
--------
rescale airmasses to between 0 and 1, with the best (1)
and worst (2.25). All values outside the range should
return 0.
>>> from astroplan.constraints import min_best_rescale
>>> import numpy as np
>>> airmasses = np.array([1, 1.5, 2, 3, 0])
>>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP
array([ 1. , 0.6, 0.2, 0. , 0. ])
"""
rescaled = (vals - max_val) / (min_val - max_val)
below = vals < min_val
above = vals > max_val
rescaled[below] = less_than_min
rescaled[above] = 0
return rescaled
def max_best_rescale(vals, min_val, max_val, greater_than_max=1):
"""
rescales an input array ``vals`` to be a score (between zero and one),
where the ``max_val`` goes to one, and the ``min_val`` goes to zero.
Parameters
----------
vals : array-like
the values that need to be rescaled to be between 0 and 1
min_val : float
worst acceptable value (rescales to 0)
max_val : float
best value cared about (rescales to 1)
greater_than_max : 0 or 1
what is returned for ``vals`` above ``max_val``. (in some cases
anything higher than ``max_val`` should also return one,
in some cases it should return zero)
Returns
-------
array of floats between 0 and 1 inclusive rescaled so that
``vals`` equal to ``min_val`` equal 0 and those equal to
``max_val`` equal 1
Examples
--------
rescale an array of altitudes to be between 0 and 1,
with the best (60) going to 1 and worst (35) going to
0. For values outside the range, the rescale should
return 0 below 35 and 1 above 60.
>>> from astroplan.constraints import max_best_rescale
>>> import numpy as np
>>> altitudes = np.array([20, 30, 40, 45, 55, 70])
>>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP
array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ])
"""
rescaled = (vals - min_val) / (max_val - min_val)
below = vals < min_val
above = vals > max_val
rescaled[below] = 0
rescaled[above] = greater_than_max
return rescaled
|
example/image-classification/test_score.py | Vikas-kum/incubator-mxnet | 399 | 2496 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
test pretrained models
"""
from __future__ import print_function
import mxnet as mx
from common import find_mxnet, modelzoo
from score import score
VAL_DATA='data/val-5k-256.rec'
def download_data():
return mx.test_utils.download(
'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA)
def test_imagenet1k_resnet(**kwargs):
models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152']
accs = [.77, .78]
for (m, g) in zip(models, accs):
acc = mx.metric.create('acc')
(speed,) = score(model=m, data_val=VAL_DATA,
rgb_mean='0,0,0', metrics=acc, **kwargs)
r = acc.get()[1]
print('Tested %s, acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1
def test_imagenet1k_inception_bn(**kwargs):
acc = mx.metric.create('acc')
m = 'imagenet1k-inception-bn'
g = 0.75
(speed,) = score(model=m,
data_val=VAL_DATA,
rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs)
r = acc.get()[1]
print('Tested %s acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1
if __name__ == '__main__':
gpus = mx.test_utils.list_gpus()
assert len(gpus) > 0
batch_size = 16 * len(gpus)
gpus = ','.join([str(i) for i in gpus])
kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500}
download_data()
test_imagenet1k_resnet(**kwargs)
test_imagenet1k_inception_bn(**kwargs)
|
dotnet/private/actions/resx_core.bzl | purkhusid/rules_dotnet | 143 | 2567 | <reponame>purkhusid/rules_dotnet
"Actions for compiling resx files"
load(
"@io_bazel_rules_dotnet//dotnet/private:providers.bzl",
"DotnetResourceInfo",
)
def _make_runner_arglist(dotnet, source, output, resgen):
args = dotnet.actions.args()
if type(source) == "Target":
args.add_all(source.files)
else:
args.add(source)
args.add(output)
return args
def emit_resx_core(
dotnet,
name = "",
src = None,
identifier = None,
out = None,
customresgen = None):
"""The function adds an action that compiles a single .resx file into .resources file.
Returns [DotnetResourceInfo](api.md#dotnetresourceinfo).
Args:
dotnet: [DotnetContextInfo](api.md#dotnetcontextinfo).
name: name of the file to generate.
src: The .resx source file that is transformed into .resources file. Only `.resx` files are permitted.
identifier: The logical name for the resource; the name that is used to load the resource. The default is the basename of the file name (no subfolder).
out: An alternative name of the output file (if name should not be used).
customresgen: custom resgen program to use.
Returns:
DotnetResourceInfo: [DotnetResourceInfo](api.md#dotnetresourceinfo).
"""
if name == "" and out == None:
fail("either name or out must be set")
if not out:
result = dotnet.actions.declare_file(name + ".resources")
else:
result = dotnet.actions.declare_file(out)
args = _make_runner_arglist(dotnet, src, result, customresgen.files_to_run.executable.path)
# We use the command to extrace shell path and force runfiles creation
resolve = dotnet._ctx.resolve_tools(tools = [customresgen])
inputs = src.files.to_list() if type(src) == "Target" else [src]
dotnet.actions.run(
inputs = inputs + resolve[0].to_list(),
tools = customresgen.default_runfiles.files,
outputs = [result],
executable = customresgen.files_to_run,
arguments = [args],
env = {"RUNFILES_MANIFEST_FILE": customresgen.files_to_run.runfiles_manifest.path},
mnemonic = "CoreResxCompile",
input_manifests = resolve[1],
progress_message = (
"Compiling resoources" + dotnet.label.package + ":" + dotnet.label.name
),
)
return DotnetResourceInfo(
name = name,
result = result,
identifier = identifier,
)
|
test/jit/test_modules.py | xiaohanhuang/pytorch | 183 | 2568 | # Owner(s): ["oncall: jit"]
import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestModules(JitTestCase):
def test_script_module_with_constants_list(self):
"""
Test that a module that has __constants__ set to something
that is not a set can be scripted.
"""
# torch.nn.Linear has a __constants__ attribute defined
# and intialized to a list.
class Net(torch.nn.Linear):
x: torch.jit.Final[int]
def __init__(self):
super().__init__(5, 10)
self.x = 0
self.checkModule(Net(), (torch.randn(5),))
|
salt/grains/nxos.py | babs/salt | 9,425 | 2581 | <gh_stars>1000+
"""
Grains for Cisco NX-OS minions
.. versionadded:: 2016.11.0
For documentation on setting up the nxos proxy minion look in the documentation
for :mod:`salt.proxy.nxos<salt.proxy.nxos>`.
"""
import logging
import salt.utils.nxos
import salt.utils.platform
from salt.exceptions import NxosClientError
log = logging.getLogger(__name__)
__proxyenabled__ = ["nxos"]
__virtualname__ = "nxos"
def __virtual__():
try:
salt.utils.nxos.version_info()
except NxosClientError as err:
return False, err
return __virtualname__
def system_information(proxy=None):
if salt.utils.platform.is_proxy():
if proxy is None:
return {}
if proxy["nxos.initialized"]() is False:
return {}
return {"nxos": proxy["nxos.grains"]()}
else:
data = salt.utils.nxos.version_info()
return salt.utils.nxos.system_info(data)
|
src/oncall/messengers/teams_messenger.py | navoday-91/oncall | 857 | 2587 | <gh_stars>100-1000
import pymsteams
import logging
from oncall.constants import TEAMS_SUPPORT
class teams_messenger(object):
supports = frozenset([TEAMS_SUPPORT])
def __init__(self, config):
self.webhook = config['webhook']
def send(self, message):
heading = message.get("subject")
final_message = "User: " + message.get("user") + " Message: " + message.get("body")
try:
myTeamsMessage = pymsteams.connectorcard(self.webhook)
myTeamsMessage.title(str(heading))
myTeamsMessage.text(str(final_message))
myTeamsMessage.send()
except:
logging.info("An issue occured while sending message to teams messenger")
|
examples/question_answering/qa_sparse_train.py | ebell495/nn_pruning | 250 | 2593 | # coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sparse Fine-tuning the library models for question answering.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
from nn_pruning.sparse_trainer import SparseTrainer
from .qa_train import QATrainer
# SparseTrainer should appear first in the base classes, as its functions must override QATrainer and its base classes (Trainer)
class QASparseTrainer(SparseTrainer, QATrainer):
def __init__(self, sparse_args, *args, **kwargs):
QATrainer.__init__(self, *args, **kwargs)
SparseTrainer.__init__(self, sparse_args)
|
scripts/examples/OpenMV/16-Codes/find_barcodes.py | jiskra/openmv | 1,761 | 2603 | # Barcode Example
#
# This example shows off how easy it is to detect bar codes using the
# OpenMV Cam M7. Barcode detection does not work on the M4 Camera.
import sensor, image, time, math
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # High Res!
sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed).
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
clock = time.clock()
# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's
# OV7725 camera module. Barcode detection will also work in RGB565 mode but at
# a lower resolution. That said, barcode detection requires a higher resolution
# to work well so it should always be run at 640x480 in grayscale...
def barcode_name(code):
if(code.type() == image.EAN2):
return "EAN2"
if(code.type() == image.EAN5):
return "EAN5"
if(code.type() == image.EAN8):
return "EAN8"
if(code.type() == image.UPCE):
return "UPCE"
if(code.type() == image.ISBN10):
return "ISBN10"
if(code.type() == image.UPCA):
return "UPCA"
if(code.type() == image.EAN13):
return "EAN13"
if(code.type() == image.ISBN13):
return "ISBN13"
if(code.type() == image.I25):
return "I25"
if(code.type() == image.DATABAR):
return "DATABAR"
if(code.type() == image.DATABAR_EXP):
return "DATABAR_EXP"
if(code.type() == image.CODABAR):
return "CODABAR"
if(code.type() == image.CODE39):
return "CODE39"
if(code.type() == image.PDF417):
return "PDF417"
if(code.type() == image.CODE93):
return "CODE93"
if(code.type() == image.CODE128):
return "CODE128"
while(True):
clock.tick()
img = sensor.snapshot()
codes = img.find_barcodes()
for code in codes:
img.draw_rectangle(code.rect())
print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps())
print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args)
if not codes:
print("FPS %f" % clock.fps())
|