max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
openff/bespokefit/executor/services/qcgenerator/cache.py | openforcefield/bespoke-f | 12 | 600 | import hashlib
from typing import TypeVar, Union
import redis
from openff.toolkit.topology import Molecule
from openff.bespokefit.executor.services.qcgenerator import worker
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.bespokefit.utilities.molecule import canonical_order_atoms
_T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask)
def _canonicalize_task(task: _T) -> _T:
task = task.copy(deep=True)
# Ensure the SMILES has a canonical ordering to help ensure cache hits.
canonical_molecule = canonical_order_atoms(
Molecule.from_smiles(task.smiles, allow_undefined_stereo=True)
)
if isinstance(task, Torsion1DTask):
map_to_atom_index = {
j: i for i, j in canonical_molecule.properties["atom_map"].items()
}
central_atom_indices = sorted(
map_to_atom_index[task.central_bond[i]] for i in (0, 1)
)
canonical_molecule.properties["atom_map"] = {
atom_index: (i + 1) for i, atom_index in enumerate(central_atom_indices)
}
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
task.central_bond = (1, 2)
else:
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=False
)
task.smiles = canonical_smiles
return task
def cached_compute_task(
task: Union[HessianTask, OptimizationTask, Torsion1DTask],
redis_connection: redis.Redis,
) -> str:
"""Checks to see if a QC task has already been executed and if not send it to a
worker.
"""
if isinstance(task, Torsion1DTask):
compute = worker.compute_torsion_drive
elif isinstance(task, OptimizationTask):
compute = worker.compute_optimization
elif isinstance(task, HessianTask):
compute = worker.compute_hessian
else:
raise NotImplementedError()
# Canonicalize the task to improve the cache hit rate.
task = _canonicalize_task(task)
task_hash = hashlib.sha512(task.json().encode()).hexdigest()
task_id = redis_connection.hget("qcgenerator:task-ids", task_hash)
if task_id is not None:
return task_id.decode()
task_id = compute.delay(task_json=task.json()).id
redis_connection.hset("qcgenerator:types", task_id, task.type)
# Make sure to only set the hash after the type is set in case the connection
# goes down before this information is entered and subsequently discarded.
redis_connection.hset("qcgenerator:task-ids", task_hash, task_id)
return task_id
| import hashlib
from typing import TypeVar, Union
import redis
from openff.toolkit.topology import Molecule
from openff.bespokefit.executor.services.qcgenerator import worker
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.bespokefit.utilities.molecule import canonical_order_atoms
_T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask)
def _canonicalize_task(task: _T) -> _T:
task = task.copy(deep=True)
# Ensure the SMILES has a canonical ordering to help ensure cache hits.
canonical_molecule = canonical_order_atoms(
Molecule.from_smiles(task.smiles, allow_undefined_stereo=True)
)
if isinstance(task, Torsion1DTask):
map_to_atom_index = {
j: i for i, j in canonical_molecule.properties["atom_map"].items()
}
central_atom_indices = sorted(
map_to_atom_index[task.central_bond[i]] for i in (0, 1)
)
canonical_molecule.properties["atom_map"] = {
atom_index: (i + 1) for i, atom_index in enumerate(central_atom_indices)
}
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
task.central_bond = (1, 2)
else:
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=False
)
task.smiles = canonical_smiles
return task
def cached_compute_task(
task: Union[HessianTask, OptimizationTask, Torsion1DTask],
redis_connection: redis.Redis,
) -> str:
"""Checks to see if a QC task has already been executed and if not send it to a
worker.
"""
if isinstance(task, Torsion1DTask):
compute = worker.compute_torsion_drive
elif isinstance(task, OptimizationTask):
compute = worker.compute_optimization
elif isinstance(task, HessianTask):
compute = worker.compute_hessian
else:
raise NotImplementedError()
# Canonicalize the task to improve the cache hit rate.
task = _canonicalize_task(task)
task_hash = hashlib.sha512(task.json().encode()).hexdigest()
task_id = redis_connection.hget("qcgenerator:task-ids", task_hash)
if task_id is not None:
return task_id.decode()
task_id = compute.delay(task_json=task.json()).id
redis_connection.hset("qcgenerator:types", task_id, task.type)
# Make sure to only set the hash after the type is set in case the connection
# goes down before this information is entered and subsequently discarded.
redis_connection.hset("qcgenerator:task-ids", task_hash, task_id)
return task_id
| en | 0.945006 | # Ensure the SMILES has a canonical ordering to help ensure cache hits. Checks to see if a QC task has already been executed and if not send it to a worker. # Canonicalize the task to improve the cache hit rate. # Make sure to only set the hash after the type is set in case the connection # goes down before this information is entered and subsequently discarded. | 2.098108 | 2 |
advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py | jrzeszutek/cloudify-training-labs | 6 | 601 | '''Copyright Gigaspaces, 2017, All Rights Reserved'''
from cloudify.plugins import lifecycle
OP_START = 'hacker.interfaces.lifecycle.start'
OP_STOP = 'hacker.interfaces.lifecycle.stop'
OP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots'
OP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots'
REQUIRED_OPS = set([OP_START, OP_SS_C, OP_SS_D, OP_STOP])
def build_instance_sequence(instance, operation,
state_start=None, state_end=None):
'''
Builds sequenced subgraph tasks for an instance
.. note::
The sequence will not be built if the instance provided
does not have a node with an operation defined in the
operation parameter.
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param str operation:
Node (lifecycle) operation to execute
:param str state_start:
Verb to describe operation start
:param str state_stop:
Verb to describe operation finish
'''
tasks = list()
# Only build the sequence if the node operation exists
if operation not in instance.node.operations:
return tasks
# Add task starting state
if state_start:
tasks.append(instance.send_event('%s host' % state_start))
tasks.append(instance.set_state(state_start.lower()))
# Add task operation
tasks.append(instance.execute_operation(operation))
# Add task ended state
if state_end:
tasks.append(instance.send_event('%s host' % state_end))
tasks.append(instance.set_state(state_end.lower()))
return tasks
def build_instance_subgraph(instance, graph):
'''
Builds a subgraph for an instance
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param `TaskDependencyGraph` graph:
Task graph to create sequences from
'''
# Init a "stop instance" subgraph
sg_stop = graph.subgraph('stop_subgraph')
seq_stop = sg_stop.sequence()
seq_stop.add(*build_instance_sequence(
instance, OP_STOP, 'Stopping', 'Stopped'))
# Init a "recreate snapshots" subgraph
sg_snap = graph.subgraph('snapshot_subgraph')
seq_snap = sg_snap.sequence()
if OP_SS_D in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_D))
if OP_SS_C in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_C))
# Init a "start instance" subgraph
sg_start = graph.subgraph('stop_subgraph')
seq_start = sg_start.sequence()
seq_start.add(*build_instance_sequence(
instance, OP_START, 'Starting', 'Started'))
# Create subgraph dependencies
graph.add_dependency(sg_snap, sg_stop)
graph.add_dependency(sg_start, sg_snap)
def refresh_snapshots(ctx, **_):
'''
Executes a complex, graph-based set of lifecycle events
to stop all host (compute) instances, delete all
existing instance snapshots, take new snapshots
of all attached volumes, and start the instances
back up when complete.
'''
graph = ctx.graph_mode()
# Find all compute hosts and build a sequence graph
for node in ctx.nodes:
if not REQUIRED_OPS.issubset(node.operations):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node "%s" because '
'it does not have all required operations defined' % node.id)
continue
# Iterate over each node instance
for instance in node.instances:
if not lifecycle.is_host_node(instance):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node instance '
'"%s" because it is not a compute host' % instance.id)
continue
build_instance_subgraph(instance, graph)
# Execute the sequences
return graph.execute()
| '''Copyright Gigaspaces, 2017, All Rights Reserved'''
from cloudify.plugins import lifecycle
OP_START = 'hacker.interfaces.lifecycle.start'
OP_STOP = 'hacker.interfaces.lifecycle.stop'
OP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots'
OP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots'
REQUIRED_OPS = set([OP_START, OP_SS_C, OP_SS_D, OP_STOP])
def build_instance_sequence(instance, operation,
state_start=None, state_end=None):
'''
Builds sequenced subgraph tasks for an instance
.. note::
The sequence will not be built if the instance provided
does not have a node with an operation defined in the
operation parameter.
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param str operation:
Node (lifecycle) operation to execute
:param str state_start:
Verb to describe operation start
:param str state_stop:
Verb to describe operation finish
'''
tasks = list()
# Only build the sequence if the node operation exists
if operation not in instance.node.operations:
return tasks
# Add task starting state
if state_start:
tasks.append(instance.send_event('%s host' % state_start))
tasks.append(instance.set_state(state_start.lower()))
# Add task operation
tasks.append(instance.execute_operation(operation))
# Add task ended state
if state_end:
tasks.append(instance.send_event('%s host' % state_end))
tasks.append(instance.set_state(state_end.lower()))
return tasks
def build_instance_subgraph(instance, graph):
'''
Builds a subgraph for an instance
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param `TaskDependencyGraph` graph:
Task graph to create sequences from
'''
# Init a "stop instance" subgraph
sg_stop = graph.subgraph('stop_subgraph')
seq_stop = sg_stop.sequence()
seq_stop.add(*build_instance_sequence(
instance, OP_STOP, 'Stopping', 'Stopped'))
# Init a "recreate snapshots" subgraph
sg_snap = graph.subgraph('snapshot_subgraph')
seq_snap = sg_snap.sequence()
if OP_SS_D in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_D))
if OP_SS_C in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_C))
# Init a "start instance" subgraph
sg_start = graph.subgraph('stop_subgraph')
seq_start = sg_start.sequence()
seq_start.add(*build_instance_sequence(
instance, OP_START, 'Starting', 'Started'))
# Create subgraph dependencies
graph.add_dependency(sg_snap, sg_stop)
graph.add_dependency(sg_start, sg_snap)
def refresh_snapshots(ctx, **_):
'''
Executes a complex, graph-based set of lifecycle events
to stop all host (compute) instances, delete all
existing instance snapshots, take new snapshots
of all attached volumes, and start the instances
back up when complete.
'''
graph = ctx.graph_mode()
# Find all compute hosts and build a sequence graph
for node in ctx.nodes:
if not REQUIRED_OPS.issubset(node.operations):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node "%s" because '
'it does not have all required operations defined' % node.id)
continue
# Iterate over each node instance
for instance in node.instances:
if not lifecycle.is_host_node(instance):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node instance '
'"%s" because it is not a compute host' % instance.id)
continue
build_instance_subgraph(instance, graph)
# Execute the sequences
return graph.execute()
| en | 0.820969 | Copyright Gigaspaces, 2017, All Rights Reserved Builds sequenced subgraph tasks for an instance .. note:: The sequence will not be built if the instance provided does not have a node with an operation defined in the operation parameter. :param `CloudifyWorkflowNodeInstance` instance: Node instance to execute tasks against :param str operation: Node (lifecycle) operation to execute :param str state_start: Verb to describe operation start :param str state_stop: Verb to describe operation finish # Only build the sequence if the node operation exists # Add task starting state # Add task operation # Add task ended state Builds a subgraph for an instance :param `CloudifyWorkflowNodeInstance` instance: Node instance to execute tasks against :param `TaskDependencyGraph` graph: Task graph to create sequences from # Init a "stop instance" subgraph # Init a "recreate snapshots" subgraph # Init a "start instance" subgraph # Create subgraph dependencies Executes a complex, graph-based set of lifecycle events to stop all host (compute) instances, delete all existing instance snapshots, take new snapshots of all attached volumes, and start the instances back up when complete. # Find all compute hosts and build a sequence graph # Iterate over each node instance # Execute the sequences | 2.390016 | 2 |
File Transfer/Flyter/flyter.py | CryptoNyxz/Miscellaneous-Tools | 0 | 602 | <gh_stars>0
"""
Flyter
Tool for transferring files on the same network using raw sockets.
Doesn't use encryption.
"""
__version__ = (0, 0, 0)
__author__ = "CryptoNyxz"
__license__ = """
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from argparse import ArgumentParser
from base64 import b64encode
from datetime import timedelta
from math import log
from os import altsep, sep, \
mkdir, stat, unlink
from os.path import dirname, exists, join
from random import randint
from secrets import token_bytes
from shutil import get_terminal_size
from socket import \
socket, error, timeout, \
ntohs, ntohl, htons, htonl, \
gethostname, \
AF_INET, SOCK_STREAM
from threading import Thread
from time import time
from warnings import warn
from sys import argv, exit, version_info
if version_info < (3, 6):
warn('[!] Some features are not be compatible with the version of your '
'python interpreter')
FROMTERMINAL = False
# Utility Functions
def random_port(host):
"""Return a random available TCP port."""
while True:
port = randint(10_000, 65536)
with socket(AF_INET, SOCK_STREAM) as sock:
try:
sock.bind((host, port))
except error:
continue
else:
return port
def printerror(errormsg):
"""Print an error message."""
global FROMTERMINAL
if FROMTERMINAL:
print(f'\n[x] {errormsg}')
exit(-1)
exit(-1)
exit(-1)
exit(-1)
else:
warn(errormsg)
def printalert(alert):
"""Print an alert message."""
global FROMTERMINAL
print(f'[!] {alert}')
def int_to_bytes_s(integer):
"""Convert 16 - bit integer to bytes for packing."""
res = ntohs(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_s(byteseq):
"""Convert byte sequence to 16 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htons(res)
def int_to_bytes_l(integer):
"""Convert 32 - but integer to bytes for packing."""
res = ntohl(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_l(byteseq):
"""Convert byte sequence to 32 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htonl(res)
def pack_str(string):
"""Pack a string into a byte sequence."""
return string.encode()
def unpack_str(byteseq):
"""Unpack a byte sequence into a string."""
return byteseq.decode()
# Utility Classes
class ProgressBar:
"""
For displaying progress bars.
Parameters
----------
max_value : int, float
The upper limit of the progress bar.
length : :obj:`int`, optional
The length of the progress bar.
"""
@staticmethod
def byte_rescale(data, precision=1):
scale = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
p = int(log(data, 2)/10) if data else 0
r_bytes = round(data/pow(2, 10*p), precision)
return f"{r_bytes}{scale[p]}"
def __init__(self, max_value, length=50):
self.max_value = max_value
self.current_val = 0
self.length = length
self.rate = None
self.start_time = None
self.start_value = None
self.stopped = False
@property
def done(self):
"""Return if already finished."""
return self.current_val >= self.max_value or self.stopped
def start(self):
"""Start the progress bar."""
self.stopped = False
self.start_time = time()
self.start_value = self.current_val
def stop(self):
"""Stop the progress bar."""
self.stopped = True
def add_progress(self, value):
"""
Count new progress.
Parameter
---------
value : int, float
Added progress value.
"""
if self.stopped:
return
self.current_val += value
def display(self):
"""Display the current progress."""
if self.stopped:
return
d_value = self.current_val - self.start_value
d_max_value = self.max_value - self.start_value
d_time = time() - self.start_time
per = d_value/d_max_value
prog = int(self.length*per)
extra = self.length*round(per) > prog
prog_bar = '█'*prog + '▌'*extra
spaces = ' '*(self.length - (prog + extra))
rate = d_value/d_time if d_time else float('inf')
eta_s = round((d_max_value - d_value)/rate) if rate else \
None
eta = timedelta(seconds=eta_s) if eta_s is not None else '?'
clear_line = " "*(get_terminal_size().columns - 1)
print(f"{clear_line}\r"
"Progress: "
f"|{prog_bar}{spaces}| "
f"{100*per:.1f}% "
f"({ProgressBar.byte_rescale(d_value)}) "
f"[{ProgressBar.byte_rescale(rate)}/s] "
f"ETA: {eta}", end="\r")
# Flyter Classes
class FlyterSender:
"""
Handles Flyter file sending processes.
Note: Sends to FlyterReceiver instances.
Parameterss
----------
recver_ip : str
The IP address of the receiver.
main_port : int
The main TCP port of the receiver.
"""
DEFAULT_PACKET_SIZE = 1024
def __init__(self, recver_ip, main_port):
self.recver_ip = recver_ip
self.main_port = main_port
self.token = <PASSWORD>_<PASSWORD>(6)
self._recver_hostname = None
self._recver_token = None
self._transfer_type = None
self._worker_ports = None
self._packet_size = FlyterSender.DEFAULT_PACKET_SIZE
self._sending_file = False
self._workers_active = 0
self._progress_bar = None
try:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(60)
except:
printerror('Error initializing sockets')
self.param_set = False
def __del__(self):
if isinstance(self.socket, socket):
self.socket.close()
def _send_s(self, filepath, file_size):
"""
Send a file with a single worker.
Parameters
----------
filepath : str
The filepath to the file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
return printerror("File doesn't exist")
self._sending_file = True
try:
fs = file_size
with open(filepath, 'br') as f:
while self._sending_file and fs:
packet = f.read(self._packet_size)
if not packet:
break
self.socket.send(packet)
assert self.socket.recv(1) == b'\x06' # ACK
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
except AssertionError:
self._progress_bar.stop()
return printerror("Receiver rejected packet")
except FileNotFoundError:
self._progress_bar.stop()
return printerror("Couldn't access file")
except PermissionError:
self._progress_bar.stop()
return printerror("Couldn't access file due to permission error")
except timeout:
self._progress_bar.stop()
return printerror("Operation timed out")
except:
self._progress_bar.stop()
return printerror(f"Error while sending file")
else:
self._sending_file = False
return True
def _send_m(self, filepath, file_sizes):
"""
Send a file with multiple workers.
Speeds up transmission rate by using multiple workers.
Parameters
----------
filepath : str
The filepath to the file to be sent.
file_sizes : list(int)
The sizes of the split-up file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
printerror("File doesn't exist")
def threadfunc(worker_num, fpath, start, end):
self._workers_active += 1
try:
with socket(AF_INET, SOCK_STREAM) as sock:
sock.connect(
(self.recver_ip, self._worker_ports[worker_num])
)
sock.send(self.token)
assert sock.recv(1) == b'\x06' # ACK
fs = end - start
with open(fpath, 'br') as f:
f.seek(start)
while self._sending_file and fs:
end_size = f.tell() + self._packet_size
size = (self._packet_size - max(0, end_size - end))
packet = f.read(size)
if not packet:
break
sock.send(packet)
assert sock.recv(1) == b'\x06' # ACK
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
except KeyboardInterrupt:
self._progress_bar.stop()
self._sending_file = False
return printerror("User aborted operation")
except AssertionError:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Receiver rejected packet")
except FileNotFoundError:
self._progress_bar.stop()
self._sending_file = False
return printerror("Couldn't access file")
except PermissionError:
self._progress_bar.stop()
self._sending_file = False
return printerror("Couldn't access file due to permission "
"error")
except timeout:
self._progress_bar.stop()
self._sending_file = False
return printerror("Operation timed out")
except:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Error while sending file")
finally:
self._workers_active -= 1
num_workers = len(self._worker_ports)
self._sending_file = True
try:
size = 0
for w in range(num_workers):
Thread(
target=threadfunc,
args=(
w, filepath,
size, size + file_sizes[w]
),
).start()
size += file_sizes[w]
except FileNotFoundError:
return printerror("Couldn't access file")
except PermissionError:
return printerror("Couldn't access file due to permission error")
except:
return printerror("Error while starting to send file")
while self._workers_active:
try:
pass
except KeyboardInterrupt:
self._progress_bar.stop()
self._sending_file = False
return printerror("User aborted operation")
self._sending_file = False
return True
def send_file(self, filepath):
"""
Send a file.
Parameters
----------
filepath : str
The filepath of the file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
return printerror("File doesn't exist")
# Headers
try:
tok = self.token
num_w = max(1, len(self._worker_ports))
fpath = filepath.replace(altsep, sep)
fname = fpath.split(sep)[-1]
fsize = stat(fpath).st_size
fsizes = [fsize//num_w for w in range(num_w)]
fsizes[-1] += fsize - sum(fsizes)
fn = pack_str(fname)
len_fn = int_to_bytes_s(len(fn))
fs = [int_to_bytes_l(s) for s in fsizes]
fs = b''.join(fs)
len_fs = int_to_bytes_s(num_w)
headers = b''.join([tok, len_fn, fn, len_fs, fs])
except:
return printerror("Error while preparing headers")
try:
b64_tok = b64encode(self._recver_token).decode()
printalert(f"Sending to {self._recver_hostname}-{b64_tok}:"
f" [ {fname} ]")
self.socket.send(headers)
print("Waiting for receiver to accept file")
assert self.socket.recv(1) == b'\x06' # ACK
except KeyboardInterrupt:
return printerror("User aborted operation")
except AssertionError:
return printerror("Receiver rejected")
except timeout:
return printerror("Operation timed out")
except Exception:
return printerror("Error while sending headers to receiver")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now sending file ({ProgressBar.byte_rescale(fsize)})")
# Progress bar thread
self._progress_bar = ProgressBar(fsize, 40)
self._progress_bar.start()
def progress_thread():
try:
# Wait until sending file
while not self._sending_file:
pass
# Display until file is sent
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
# Start sending
res = None
try:
if self._transfer_type == 'S':
res = self._send_s(fpath, fsize)
elif self._transfer_type == 'M':
res = self._send_m(fpath, fsizes)
assert self.socket.recv(1) == b'\x06' # ACK
except:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Sending file was unsuccessful")
else:
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully sent: {fname}")
return res
def recv_param_set(self):
"""
Receive and unpack Receiver's parameter settings.
Used to set Sender's parameter settings used during data
transmissions.
"""
try:
self.socket.connect((self.recver_ip, self.main_port))
except error:
return printerror("Can't connect to "
f"{self.recver_ip}:{self.main_port}")
try:
sender_hn = pack_str(gethostname())
len_sender_hn = int_to_bytes_s(len(sender_hn))
self.socket.send(b''.join([len_sender_hn, sender_hn]))
assert self.socket.recv(1) == b'\x06' # ACK
except AssertionError:
return printerror("Receiver rejected handshake")
except timeout:
return printerror('Operation timed out')
except:
return printerror("Error during handshake")
try:
len_hn = bytes_to_int_s(self.socket.recv(2))
self._recver_hostname = unpack_str(self.socket.recv(len_hn))
self._recver_token = self.socket.recv(6)
self._transfer_type = unpack_str(self.socket.recv(1))
len_wp = bytes_to_int_s(self.socket.recv(2))
self._worker_ports = [bytes_to_int_s(self.socket.recv(2))
for w in range(len_wp)]
self.socket.send(b'\x06') # ACK
except error:
return printerror("Error getting connected with socket")
except:
self.socket.send(b'\x15') # NAK
return printerror("Error getting parameters from receiver")
else:
self.param_set = True
class FlyterReciever:
"""
Handles Flyter file receiving processes.
Note: Receives from FlyterSender instances.
Parameters
----------
host_ip : str
The Host IP address to be used.
main_port : int
The main TCP port to be used.
num_workers : int
The amount of workers to be used during transmission.
"""
@staticmethod
def storage_dir(hostname=None):
"""
Return the path of the storage dir for received files.
If storage directory doesn't exist, creates it first.
Parameters
----------
hostname : str
The name of the subdirectory where that
host's sent files are stored.
"""
app_dirname = dirname(__file__)
appfiles_dirname = join(app_dirname, 'Flyter')
if not exists(appfiles_dirname):
mkdir(appfiles_dirname)
storage_dirname = join(appfiles_dirname, 'Received Files')
if not exists(storage_dirname):
mkdir(storage_dirname)
if hostname:
host_storage_dirname = join(storage_dirname, hostname)
if not exists(host_storage_dirname):
mkdir(host_storage_dirname)
return host_storage_dirname
else:
return storage_dirname
DEFAULT_PACKET_SIZE = 512
def __init__(self, host_ip, main_port, num_workers):
self.host_ip = host_ip
self.main_port = main_port
self.token = <PASSWORD>_<PASSWORD>(6)
self.transfer_type = 'S' if num_workers == 1 else 'M'
self.worker_ports = [
random_port(self.host_ip) for w in range(num_workers)
] if num_workers > 1 else []
self._sender_socket = None
self._sender_hostname = None
self._sender_token = None
self._sender_filename = None
self._sender_filesizes = None
self._packet_size = FlyterSender.DEFAULT_PACKET_SIZE
self._recving_file = False
self._workers_active = 0
self._progress_bar = ProgressBar(None)
try:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.bind((self.host_ip, self.main_port))
self.socket.settimeout(60)
self.workers = [
socket(AF_INET, SOCK_STREAM) for w in range(num_workers)
] if num_workers > 1 else []
if self.workers:
for w in range(num_workers):
self.workers[w].bind((self.host_ip, self.worker_ports[w]))
self.workers[w].settimeout(60)
except:
printerror('Error initializing sockets')
self.param_set = False
def __del__(self):
if isinstance(self.__dict__.get('socket'), socket):
self.socket.close()
if self.__dict__.get('workers'):
for w in self.workers:
w.close()
def _recv_s(self):
"""Receive a file with a single worker."""
if not self.param_set:
return printerror("Sender not yet set with parameters")
try:
self._recving_file = True
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
fs = self._sender_filesizes[0]
with open(path, 'bw') as f:
while self._recving_file and fs:
packet = self._sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
self._sender_socket.send(b'\x06') # ACK
except timeout:
self._progress_bar.stop()
return printerror("Operation timed out")
except FileNotFoundError:
self._progress_bar.stop()
return printerror("Downloading file has been deleted")
except PermissionError:
self._progress_bar.stop()
return printerror("Couldn't access storage directory")
except error:
self._progress_bar.stop()
return printerror("Error with socket")
except:
self._progress_bar.stop()
return printerror("Error receiving file")
else:
self._recving_file = False
return True
def _recv_m(self):
"""
Receive a file with multiple workers.
Speeds up transmission rate by using multiple workers.
"""
if not self.param_set:
return printerror("Sender not yet set with parameters")
def threadfunc(worker_num, fpath):
self._workers_active += 1
try:
recver_socket = self.workers[worker_num]
recver_socket.listen(1)
sender_socket, hostaddr = recver_socket.accept()
send_tok = sender_socket.recv(6)
if send_tok == self._sender_token:
sender_socket.send(b'\x06') # ACK
else:
sender_socket.send(b'\x15') # NAK
fs = self._sender_filesizes[worker_num]
with open(fpath, 'bw') as f:
while self._recving_file and f.writable() and fs:
packet = sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
sender_socket.send(b'\x06') # ACK
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
return printerror("User aborted operation")
except timeout:
self._progress_bar.stop()
self._recving_file = False
return printerror("Operation timed out")
except error:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error with sockets")
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error while receiving file")
finally:
self._workers_active -= 1
num_workers = len(self.workers)
self._recving_file = True
try:
for w in range(len(self.worker_ports)):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
Thread(
target=threadfunc,
args=(w, wpath),
).start()
except FileNotFoundError:
return printerror("Couldn't access file")
except PermissionError:
return printerror("Couldn't access file due to permission error")
while self._workers_active:
try:
pass
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
printerror("User aborted operation")
self._recving_file = False
try:
# Build the file
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
with open(path, 'bw') as output:
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
with open(wpath, 'br') as temp:
packet = True
while packet:
packet = temp.read(self._packet_size)
output.write(packet)
# Clear the contents of the temp file
open(wpath, 'bw').close()
# Delete the temp files
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
unlink(wpath)
except PermissionError:
self._sender_socket.send(b'\x15') # NAK
return printerror("Couldn't save file due to permissions")
except error:
return printerror("Error with sockets")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while saving file")
else:
return True
def recv_file(self):
"""Receive a file."""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
# Headers
try:
tok = self._sender_socket.recv(6)
b64_tok = b64encode(tok).decode()
len_fn = bytes_to_int_s(self._sender_socket.recv(2))
fn = unpack_str(self._sender_socket.recv(len_fn))
len_fs = bytes_to_int_s(self._sender_socket.recv(2))
fs = [bytes_to_int_l(self._sender_socket.recv(4))
for s in range(len_fs)]
fs_all = sum(fs)
answer = input(f"{self._sender_hostname}-{b64_tok}"
f" wants to send: {fn} "
f"({ProgressBar.byte_rescale(fs_all)}). "
"Accept? (y/n) ")
if answer.lower() == 'y':
self._sender_socket.send(b'\x06') # ACK
else:
self._sender_socket.send(b'\x06') # NAK
return printalert("Rejected file transfer")
except error:
return printerror("Sender isn't available anymore")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while receiving headers")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now receiving file ({ProgressBar.byte_rescale(fs_all)})")
# Progress bar thread
self._progress_bar = ProgressBar(fs_all, 35)
self._progress_bar.start()
def progress_thread():
try:
# Wait until receiving file
while not self._recving_file:
pass
# Display until file is received
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
self._sender_token = tok
self._sender_filename = fn
self._sender_filesizes = fs
# Start receiving
try:
if self.transfer_type == 'S':
res = self._recv_s()
elif self.transfer_type == 'M':
res = self._recv_m()
else:
res = None
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Receiving file was unsuccessful")
else:
self._sender_socket.send(b'\x06') # ACK
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully received: {self._sender_filename}")
return res
def send_param_set(self):
"""
Pack and send Receiver's parameter settings.
Used to set Sender's parameter settings used during
data transmissions.
"""
try:
printalert("Waiting for sender")
self.socket.listen(1)
self._sender_socket, addrport = self.socket.accept()
except timeout:
return printerror("No sender available")
except:
return printerror("Error while waiting for sender")
try:
len_sender_hn = bytes_to_int_s(self._sender_socket.recv(2))
sender_hn = self._sender_socket.recv(len_sender_hn)
self._sender_hostname = unpack_str(sender_hn)
self._sender_socket.send(b'\x06') # ACK
except timeout:
return printerror("Operation timed out")
except:
return printerror("Error during handshake")
try:
hn = pack_str(gethostname())
len_hn = int_to_bytes_s(len(hn))
tok = self.token
tr_type = pack_str(self.transfer_type)
len_wp = int_to_bytes_s(len(self.worker_ports))
wp = [int_to_bytes_s(port)
for port in self.worker_ports]
wp = b''.join(wp)
headers = b''.join([len_hn, hn, tok, tr_type, len_wp, wp])
except:
return printerror("Error building headers")
try:
self._sender_socket.send(headers)
assert self._sender_socket.recv(1) == b'\x06' # ACK
except:
return printerror("Error while sending headers to sender")
else:
self.param_set = True
# Simplified Functions
def send(ip_address, port, filepath):
"""
Send file to receiver on the same network.
Parameters
----------
ip_address : str
The target receiver's IP address.
port : int
The target receiver's main TCP port.
filepath : str
The path to the file to be sent.
"""
sender = FlyterSender(ip_address, port)
sender.recv_param_set()
return sender.send_file(filepath)
def receive(host_ip_address, port, workers=1):
"""
Receive a file from sender on the same network.
Parameters
----------
host_ip_address : str
The receiver's host IP address.
port : int
The receiver's host port to listen on.
workers : :obj:`int`, optional
The number of workers to use.
"""
receiver = FlyterReciever(host_ip_address, port, workers)
receiver.send_param_set()
receiver.recv_file()
if __name__ == '__main__':
parser = ArgumentParser(
prog="Flyter",
epilog="See '<command> --help' to read about a specific sub-command."
)
subparsers = parser.add_subparsers(
dest="action",
help="The action to be performed"
)
send_parser = subparsers.add_parser("send")
recv_parser = subparsers.add_parser("recv")
send_parser.add_argument('-i', '--ip',
required=True,
help="Target receiver's IP address")
send_parser.add_argument('-p', '--port',
type=int,
required=True,
help="Target receiver's TCP port number")
send_parser.add_argument('-f', '--file',
required=True,
help="Path to the file to be sent")
recv_parser.add_argument('-i', '--ip',
required=True,
help="Host IP address")
recv_parser.add_argument('-p', '--port',
type=int,
required=True,
help="TCP port to listen on")
recv_parser.add_argument('-w', '--workers',
type=int,
default=1,
help="TCP port to listen on")
if len(argv) > 1:
FROMTERMINAL = True
args = parser.parse_args()
if args.action == "send":
send(args.ip, args.port, args.file)
elif args.action == "recv":
receive(args.ip, args.port, args.workers)
else:
parser.print_help()
| """
Flyter
Tool for transferring files on the same network using raw sockets.
Doesn't use encryption.
"""
__version__ = (0, 0, 0)
__author__ = "CryptoNyxz"
__license__ = """
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from argparse import ArgumentParser
from base64 import b64encode
from datetime import timedelta
from math import log
from os import altsep, sep, \
mkdir, stat, unlink
from os.path import dirname, exists, join
from random import randint
from secrets import token_bytes
from shutil import get_terminal_size
from socket import \
socket, error, timeout, \
ntohs, ntohl, htons, htonl, \
gethostname, \
AF_INET, SOCK_STREAM
from threading import Thread
from time import time
from warnings import warn
from sys import argv, exit, version_info
if version_info < (3, 6):
warn('[!] Some features are not be compatible with the version of your '
'python interpreter')
FROMTERMINAL = False
# Utility Functions
def random_port(host):
"""Return a random available TCP port."""
while True:
port = randint(10_000, 65536)
with socket(AF_INET, SOCK_STREAM) as sock:
try:
sock.bind((host, port))
except error:
continue
else:
return port
def printerror(errormsg):
"""Print an error message."""
global FROMTERMINAL
if FROMTERMINAL:
print(f'\n[x] {errormsg}')
exit(-1)
exit(-1)
exit(-1)
exit(-1)
else:
warn(errormsg)
def printalert(alert):
"""Print an alert message."""
global FROMTERMINAL
print(f'[!] {alert}')
def int_to_bytes_s(integer):
"""Convert 16 - bit integer to bytes for packing."""
res = ntohs(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_s(byteseq):
"""Convert byte sequence to 16 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htons(res)
def int_to_bytes_l(integer):
"""Convert 32 - but integer to bytes for packing."""
res = ntohl(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_l(byteseq):
"""Convert byte sequence to 32 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htonl(res)
def pack_str(string):
"""Pack a string into a byte sequence."""
return string.encode()
def unpack_str(byteseq):
"""Unpack a byte sequence into a string."""
return byteseq.decode()
# Utility Classes
class ProgressBar:
"""
For displaying progress bars.
Parameters
----------
max_value : int, float
The upper limit of the progress bar.
length : :obj:`int`, optional
The length of the progress bar.
"""
@staticmethod
def byte_rescale(data, precision=1):
scale = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
p = int(log(data, 2)/10) if data else 0
r_bytes = round(data/pow(2, 10*p), precision)
return f"{r_bytes}{scale[p]}"
def __init__(self, max_value, length=50):
self.max_value = max_value
self.current_val = 0
self.length = length
self.rate = None
self.start_time = None
self.start_value = None
self.stopped = False
@property
def done(self):
"""Return if already finished."""
return self.current_val >= self.max_value or self.stopped
def start(self):
"""Start the progress bar."""
self.stopped = False
self.start_time = time()
self.start_value = self.current_val
def stop(self):
"""Stop the progress bar."""
self.stopped = True
def add_progress(self, value):
"""
Count new progress.
Parameter
---------
value : int, float
Added progress value.
"""
if self.stopped:
return
self.current_val += value
def display(self):
"""Display the current progress."""
if self.stopped:
return
d_value = self.current_val - self.start_value
d_max_value = self.max_value - self.start_value
d_time = time() - self.start_time
per = d_value/d_max_value
prog = int(self.length*per)
extra = self.length*round(per) > prog
prog_bar = '█'*prog + '▌'*extra
spaces = ' '*(self.length - (prog + extra))
rate = d_value/d_time if d_time else float('inf')
eta_s = round((d_max_value - d_value)/rate) if rate else \
None
eta = timedelta(seconds=eta_s) if eta_s is not None else '?'
clear_line = " "*(get_terminal_size().columns - 1)
print(f"{clear_line}\r"
"Progress: "
f"|{prog_bar}{spaces}| "
f"{100*per:.1f}% "
f"({ProgressBar.byte_rescale(d_value)}) "
f"[{ProgressBar.byte_rescale(rate)}/s] "
f"ETA: {eta}", end="\r")
# Flyter Classes
class FlyterSender:
"""
Handles Flyter file sending processes.
Note: Sends to FlyterReceiver instances.
Parameterss
----------
recver_ip : str
The IP address of the receiver.
main_port : int
The main TCP port of the receiver.
"""
DEFAULT_PACKET_SIZE = 1024
def __init__(self, recver_ip, main_port):
self.recver_ip = recver_ip
self.main_port = main_port
self.token = <PASSWORD>_<PASSWORD>(6)
self._recver_hostname = None
self._recver_token = None
self._transfer_type = None
self._worker_ports = None
self._packet_size = FlyterSender.DEFAULT_PACKET_SIZE
self._sending_file = False
self._workers_active = 0
self._progress_bar = None
try:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(60)
except:
printerror('Error initializing sockets')
self.param_set = False
def __del__(self):
if isinstance(self.socket, socket):
self.socket.close()
def _send_s(self, filepath, file_size):
"""
Send a file with a single worker.
Parameters
----------
filepath : str
The filepath to the file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
return printerror("File doesn't exist")
self._sending_file = True
try:
fs = file_size
with open(filepath, 'br') as f:
while self._sending_file and fs:
packet = f.read(self._packet_size)
if not packet:
break
self.socket.send(packet)
assert self.socket.recv(1) == b'\x06' # ACK
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
except AssertionError:
self._progress_bar.stop()
return printerror("Receiver rejected packet")
except FileNotFoundError:
self._progress_bar.stop()
return printerror("Couldn't access file")
except PermissionError:
self._progress_bar.stop()
return printerror("Couldn't access file due to permission error")
except timeout:
self._progress_bar.stop()
return printerror("Operation timed out")
except:
self._progress_bar.stop()
return printerror(f"Error while sending file")
else:
self._sending_file = False
return True
def _send_m(self, filepath, file_sizes):
"""
Send a file with multiple workers.
Speeds up transmission rate by using multiple workers.
Parameters
----------
filepath : str
The filepath to the file to be sent.
file_sizes : list(int)
The sizes of the split-up file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
printerror("File doesn't exist")
def threadfunc(worker_num, fpath, start, end):
self._workers_active += 1
try:
with socket(AF_INET, SOCK_STREAM) as sock:
sock.connect(
(self.recver_ip, self._worker_ports[worker_num])
)
sock.send(self.token)
assert sock.recv(1) == b'\x06' # ACK
fs = end - start
with open(fpath, 'br') as f:
f.seek(start)
while self._sending_file and fs:
end_size = f.tell() + self._packet_size
size = (self._packet_size - max(0, end_size - end))
packet = f.read(size)
if not packet:
break
sock.send(packet)
assert sock.recv(1) == b'\x06' # ACK
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
except KeyboardInterrupt:
self._progress_bar.stop()
self._sending_file = False
return printerror("User aborted operation")
except AssertionError:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Receiver rejected packet")
except FileNotFoundError:
self._progress_bar.stop()
self._sending_file = False
return printerror("Couldn't access file")
except PermissionError:
self._progress_bar.stop()
self._sending_file = False
return printerror("Couldn't access file due to permission "
"error")
except timeout:
self._progress_bar.stop()
self._sending_file = False
return printerror("Operation timed out")
except:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Error while sending file")
finally:
self._workers_active -= 1
num_workers = len(self._worker_ports)
self._sending_file = True
try:
size = 0
for w in range(num_workers):
Thread(
target=threadfunc,
args=(
w, filepath,
size, size + file_sizes[w]
),
).start()
size += file_sizes[w]
except FileNotFoundError:
return printerror("Couldn't access file")
except PermissionError:
return printerror("Couldn't access file due to permission error")
except:
return printerror("Error while starting to send file")
while self._workers_active:
try:
pass
except KeyboardInterrupt:
self._progress_bar.stop()
self._sending_file = False
return printerror("User aborted operation")
self._sending_file = False
return True
def send_file(self, filepath):
"""
Send a file.
Parameters
----------
filepath : str
The filepath of the file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
return printerror("File doesn't exist")
# Headers
try:
tok = self.token
num_w = max(1, len(self._worker_ports))
fpath = filepath.replace(altsep, sep)
fname = fpath.split(sep)[-1]
fsize = stat(fpath).st_size
fsizes = [fsize//num_w for w in range(num_w)]
fsizes[-1] += fsize - sum(fsizes)
fn = pack_str(fname)
len_fn = int_to_bytes_s(len(fn))
fs = [int_to_bytes_l(s) for s in fsizes]
fs = b''.join(fs)
len_fs = int_to_bytes_s(num_w)
headers = b''.join([tok, len_fn, fn, len_fs, fs])
except:
return printerror("Error while preparing headers")
try:
b64_tok = b64encode(self._recver_token).decode()
printalert(f"Sending to {self._recver_hostname}-{b64_tok}:"
f" [ {fname} ]")
self.socket.send(headers)
print("Waiting for receiver to accept file")
assert self.socket.recv(1) == b'\x06' # ACK
except KeyboardInterrupt:
return printerror("User aborted operation")
except AssertionError:
return printerror("Receiver rejected")
except timeout:
return printerror("Operation timed out")
except Exception:
return printerror("Error while sending headers to receiver")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now sending file ({ProgressBar.byte_rescale(fsize)})")
# Progress bar thread
self._progress_bar = ProgressBar(fsize, 40)
self._progress_bar.start()
def progress_thread():
try:
# Wait until sending file
while not self._sending_file:
pass
# Display until file is sent
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
# Start sending
res = None
try:
if self._transfer_type == 'S':
res = self._send_s(fpath, fsize)
elif self._transfer_type == 'M':
res = self._send_m(fpath, fsizes)
assert self.socket.recv(1) == b'\x06' # ACK
except:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Sending file was unsuccessful")
else:
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully sent: {fname}")
return res
def recv_param_set(self):
"""
Receive and unpack Receiver's parameter settings.
Used to set Sender's parameter settings used during data
transmissions.
"""
try:
self.socket.connect((self.recver_ip, self.main_port))
except error:
return printerror("Can't connect to "
f"{self.recver_ip}:{self.main_port}")
try:
sender_hn = pack_str(gethostname())
len_sender_hn = int_to_bytes_s(len(sender_hn))
self.socket.send(b''.join([len_sender_hn, sender_hn]))
assert self.socket.recv(1) == b'\x06' # ACK
except AssertionError:
return printerror("Receiver rejected handshake")
except timeout:
return printerror('Operation timed out')
except:
return printerror("Error during handshake")
try:
len_hn = bytes_to_int_s(self.socket.recv(2))
self._recver_hostname = unpack_str(self.socket.recv(len_hn))
self._recver_token = self.socket.recv(6)
self._transfer_type = unpack_str(self.socket.recv(1))
len_wp = bytes_to_int_s(self.socket.recv(2))
self._worker_ports = [bytes_to_int_s(self.socket.recv(2))
for w in range(len_wp)]
self.socket.send(b'\x06') # ACK
except error:
return printerror("Error getting connected with socket")
except:
self.socket.send(b'\x15') # NAK
return printerror("Error getting parameters from receiver")
else:
self.param_set = True
class FlyterReciever:
"""
Handles Flyter file receiving processes.
Note: Receives from FlyterSender instances.
Parameters
----------
host_ip : str
The Host IP address to be used.
main_port : int
The main TCP port to be used.
num_workers : int
The amount of workers to be used during transmission.
"""
@staticmethod
def storage_dir(hostname=None):
"""
Return the path of the storage dir for received files.
If storage directory doesn't exist, creates it first.
Parameters
----------
hostname : str
The name of the subdirectory where that
host's sent files are stored.
"""
app_dirname = dirname(__file__)
appfiles_dirname = join(app_dirname, 'Flyter')
if not exists(appfiles_dirname):
mkdir(appfiles_dirname)
storage_dirname = join(appfiles_dirname, 'Received Files')
if not exists(storage_dirname):
mkdir(storage_dirname)
if hostname:
host_storage_dirname = join(storage_dirname, hostname)
if not exists(host_storage_dirname):
mkdir(host_storage_dirname)
return host_storage_dirname
else:
return storage_dirname
DEFAULT_PACKET_SIZE = 512
def __init__(self, host_ip, main_port, num_workers):
self.host_ip = host_ip
self.main_port = main_port
self.token = <PASSWORD>_<PASSWORD>(6)
self.transfer_type = 'S' if num_workers == 1 else 'M'
self.worker_ports = [
random_port(self.host_ip) for w in range(num_workers)
] if num_workers > 1 else []
self._sender_socket = None
self._sender_hostname = None
self._sender_token = None
self._sender_filename = None
self._sender_filesizes = None
self._packet_size = FlyterSender.DEFAULT_PACKET_SIZE
self._recving_file = False
self._workers_active = 0
self._progress_bar = ProgressBar(None)
try:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.bind((self.host_ip, self.main_port))
self.socket.settimeout(60)
self.workers = [
socket(AF_INET, SOCK_STREAM) for w in range(num_workers)
] if num_workers > 1 else []
if self.workers:
for w in range(num_workers):
self.workers[w].bind((self.host_ip, self.worker_ports[w]))
self.workers[w].settimeout(60)
except:
printerror('Error initializing sockets')
self.param_set = False
def __del__(self):
if isinstance(self.__dict__.get('socket'), socket):
self.socket.close()
if self.__dict__.get('workers'):
for w in self.workers:
w.close()
def _recv_s(self):
"""Receive a file with a single worker."""
if not self.param_set:
return printerror("Sender not yet set with parameters")
try:
self._recving_file = True
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
fs = self._sender_filesizes[0]
with open(path, 'bw') as f:
while self._recving_file and fs:
packet = self._sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
self._sender_socket.send(b'\x06') # ACK
except timeout:
self._progress_bar.stop()
return printerror("Operation timed out")
except FileNotFoundError:
self._progress_bar.stop()
return printerror("Downloading file has been deleted")
except PermissionError:
self._progress_bar.stop()
return printerror("Couldn't access storage directory")
except error:
self._progress_bar.stop()
return printerror("Error with socket")
except:
self._progress_bar.stop()
return printerror("Error receiving file")
else:
self._recving_file = False
return True
def _recv_m(self):
"""
Receive a file with multiple workers.
Speeds up transmission rate by using multiple workers.
"""
if not self.param_set:
return printerror("Sender not yet set with parameters")
def threadfunc(worker_num, fpath):
self._workers_active += 1
try:
recver_socket = self.workers[worker_num]
recver_socket.listen(1)
sender_socket, hostaddr = recver_socket.accept()
send_tok = sender_socket.recv(6)
if send_tok == self._sender_token:
sender_socket.send(b'\x06') # ACK
else:
sender_socket.send(b'\x15') # NAK
fs = self._sender_filesizes[worker_num]
with open(fpath, 'bw') as f:
while self._recving_file and f.writable() and fs:
packet = sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
sender_socket.send(b'\x06') # ACK
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
return printerror("User aborted operation")
except timeout:
self._progress_bar.stop()
self._recving_file = False
return printerror("Operation timed out")
except error:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error with sockets")
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error while receiving file")
finally:
self._workers_active -= 1
num_workers = len(self.workers)
self._recving_file = True
try:
for w in range(len(self.worker_ports)):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
Thread(
target=threadfunc,
args=(w, wpath),
).start()
except FileNotFoundError:
return printerror("Couldn't access file")
except PermissionError:
return printerror("Couldn't access file due to permission error")
while self._workers_active:
try:
pass
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
printerror("User aborted operation")
self._recving_file = False
try:
# Build the file
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
with open(path, 'bw') as output:
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
with open(wpath, 'br') as temp:
packet = True
while packet:
packet = temp.read(self._packet_size)
output.write(packet)
# Clear the contents of the temp file
open(wpath, 'bw').close()
# Delete the temp files
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
unlink(wpath)
except PermissionError:
self._sender_socket.send(b'\x15') # NAK
return printerror("Couldn't save file due to permissions")
except error:
return printerror("Error with sockets")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while saving file")
else:
return True
def recv_file(self):
"""Receive a file."""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
# Headers
try:
tok = self._sender_socket.recv(6)
b64_tok = b64encode(tok).decode()
len_fn = bytes_to_int_s(self._sender_socket.recv(2))
fn = unpack_str(self._sender_socket.recv(len_fn))
len_fs = bytes_to_int_s(self._sender_socket.recv(2))
fs = [bytes_to_int_l(self._sender_socket.recv(4))
for s in range(len_fs)]
fs_all = sum(fs)
answer = input(f"{self._sender_hostname}-{b64_tok}"
f" wants to send: {fn} "
f"({ProgressBar.byte_rescale(fs_all)}). "
"Accept? (y/n) ")
if answer.lower() == 'y':
self._sender_socket.send(b'\x06') # ACK
else:
self._sender_socket.send(b'\x06') # NAK
return printalert("Rejected file transfer")
except error:
return printerror("Sender isn't available anymore")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while receiving headers")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now receiving file ({ProgressBar.byte_rescale(fs_all)})")
# Progress bar thread
self._progress_bar = ProgressBar(fs_all, 35)
self._progress_bar.start()
def progress_thread():
try:
# Wait until receiving file
while not self._recving_file:
pass
# Display until file is received
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
self._sender_token = tok
self._sender_filename = fn
self._sender_filesizes = fs
# Start receiving
try:
if self.transfer_type == 'S':
res = self._recv_s()
elif self.transfer_type == 'M':
res = self._recv_m()
else:
res = None
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Receiving file was unsuccessful")
else:
self._sender_socket.send(b'\x06') # ACK
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully received: {self._sender_filename}")
return res
def send_param_set(self):
"""
Pack and send Receiver's parameter settings.
Used to set Sender's parameter settings used during
data transmissions.
"""
try:
printalert("Waiting for sender")
self.socket.listen(1)
self._sender_socket, addrport = self.socket.accept()
except timeout:
return printerror("No sender available")
except:
return printerror("Error while waiting for sender")
try:
len_sender_hn = bytes_to_int_s(self._sender_socket.recv(2))
sender_hn = self._sender_socket.recv(len_sender_hn)
self._sender_hostname = unpack_str(sender_hn)
self._sender_socket.send(b'\x06') # ACK
except timeout:
return printerror("Operation timed out")
except:
return printerror("Error during handshake")
try:
hn = pack_str(gethostname())
len_hn = int_to_bytes_s(len(hn))
tok = self.token
tr_type = pack_str(self.transfer_type)
len_wp = int_to_bytes_s(len(self.worker_ports))
wp = [int_to_bytes_s(port)
for port in self.worker_ports]
wp = b''.join(wp)
headers = b''.join([len_hn, hn, tok, tr_type, len_wp, wp])
except:
return printerror("Error building headers")
try:
self._sender_socket.send(headers)
assert self._sender_socket.recv(1) == b'\x06' # ACK
except:
return printerror("Error while sending headers to sender")
else:
self.param_set = True
# Simplified Functions
def send(ip_address, port, filepath):
"""
Send file to receiver on the same network.
Parameters
----------
ip_address : str
The target receiver's IP address.
port : int
The target receiver's main TCP port.
filepath : str
The path to the file to be sent.
"""
sender = FlyterSender(ip_address, port)
sender.recv_param_set()
return sender.send_file(filepath)
def receive(host_ip_address, port, workers=1):
"""
Receive a file from sender on the same network.
Parameters
----------
host_ip_address : str
The receiver's host IP address.
port : int
The receiver's host port to listen on.
workers : :obj:`int`, optional
The number of workers to use.
"""
receiver = FlyterReciever(host_ip_address, port, workers)
receiver.send_param_set()
receiver.recv_file()
if __name__ == '__main__':
parser = ArgumentParser(
prog="Flyter",
epilog="See '<command> --help' to read about a specific sub-command."
)
subparsers = parser.add_subparsers(
dest="action",
help="The action to be performed"
)
send_parser = subparsers.add_parser("send")
recv_parser = subparsers.add_parser("recv")
send_parser.add_argument('-i', '--ip',
required=True,
help="Target receiver's IP address")
send_parser.add_argument('-p', '--port',
type=int,
required=True,
help="Target receiver's TCP port number")
send_parser.add_argument('-f', '--file',
required=True,
help="Path to the file to be sent")
recv_parser.add_argument('-i', '--ip',
required=True,
help="Host IP address")
recv_parser.add_argument('-p', '--port',
type=int,
required=True,
help="TCP port to listen on")
recv_parser.add_argument('-w', '--workers',
type=int,
default=1,
help="TCP port to listen on")
if len(argv) > 1:
FROMTERMINAL = True
args = parser.parse_args()
if args.action == "send":
send(args.ip, args.port, args.file)
elif args.action == "recv":
receive(args.ip, args.port, args.workers)
else:
parser.print_help() | en | 0.792057 | Flyter Tool for transferring files on the same network using raw sockets. Doesn't use encryption. MIT License Copyright (c) 2021 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Utility Functions Return a random available TCP port. Print an error message. Print an alert message. Convert 16 - bit integer to bytes for packing. Convert byte sequence to 16 - but integer for unpacking. Convert 32 - but integer to bytes for packing. Convert byte sequence to 32 - but integer for unpacking. Pack a string into a byte sequence. Unpack a byte sequence into a string. # Utility Classes For displaying progress bars. Parameters ---------- max_value : int, float The upper limit of the progress bar. length : :obj:`int`, optional The length of the progress bar. Return if already finished. Start the progress bar. Stop the progress bar. Count new progress. Parameter --------- value : int, float Added progress value. Display the current progress. # Flyter Classes Handles Flyter file sending processes. Note: Sends to FlyterReceiver instances. Parameterss ---------- recver_ip : str The IP address of the receiver. main_port : int The main TCP port of the receiver. Send a file with a single worker. Parameters ---------- filepath : str The filepath to the file to be sent. # ACK Send a file with multiple workers. Speeds up transmission rate by using multiple workers. Parameters ---------- filepath : str The filepath to the file to be sent. file_sizes : list(int) The sizes of the split-up file to be sent. # ACK # ACK Send a file. Parameters ---------- filepath : str The filepath of the file to be sent. # Headers # ACK # Progress bar thread # Wait until sending file # Display until file is sent # Start sending # ACK # Wait for progress bar Receive and unpack Receiver's parameter settings. Used to set Sender's parameter settings used during data transmissions. # ACK # ACK # NAK Handles Flyter file receiving processes. Note: Receives from FlyterSender instances. Parameters ---------- host_ip : str The Host IP address to be used. main_port : int The main TCP port to be used. num_workers : int The amount of workers to be used during transmission. Return the path of the storage dir for received files. If storage directory doesn't exist, creates it first. Parameters ---------- hostname : str The name of the subdirectory where that host's sent files are stored. Receive a file with a single worker. # ACK Receive a file with multiple workers. Speeds up transmission rate by using multiple workers. # ACK # NAK # ACK # Build the file # Clear the contents of the temp file # Delete the temp files # NAK # NAK Receive a file. # Headers # ACK # NAK # NAK # Progress bar thread # Wait until receiving file # Display until file is received # Start receiving # ACK # Wait for progress bar Pack and send Receiver's parameter settings. Used to set Sender's parameter settings used during data transmissions. # ACK # ACK # Simplified Functions Send file to receiver on the same network. Parameters ---------- ip_address : str The target receiver's IP address. port : int The target receiver's main TCP port. filepath : str The path to the file to be sent. Receive a file from sender on the same network. Parameters ---------- host_ip_address : str The receiver's host IP address. port : int The receiver's host port to listen on. workers : :obj:`int`, optional The number of workers to use. | 1.959146 | 2 |
tests/test_modeling_tf_led.py | patelrajnath/transformers | 0 | 603 | # coding=utf-8
# Copyright <NAME>, <NAME>, <NAME> and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class TFLEDModelTester:
config_cls = LEDConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
attention_window=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.attention_window = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
self.key_length = self.attention_window + 1
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
self.encoder_seq_length = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
attention_window=self.attention_window,
**self.config_updates,
)
inputs_dict = prepare_led_inputs_dict(config, input_ids, decoder_input_ids)
global_attention_mask = tf.concat(
[tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]],
axis=-1,
)
inputs_dict["global_attention_mask"] = global_attention_mask
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFLEDModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_led_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.cast(tf.math.not_equal(decoder_input_ids, config.pad_token_id), tf.int8)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_tf
class TFLEDModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
all_generative_model_classes = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
def setUp(self):
self.model_tester = TFLEDModelTester(self)
self.config_tester = ConfigTester(self, config_class=LEDConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_layer_with_bias()
assert x is None
name = model.get_prefix_bias_name()
assert name is None
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict["global_attention_mask"] = tf.zeros_like(inputs_dict["attention_mask"])
num_global_attn_indices = 2
inputs_dict["global_attention_mask"] = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices,
1,
inputs_dict["global_attention_mask"],
)
config.return_dict = True
seq_length = self.model_tester.seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(outputs):
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
def check_encoder_attentions_output(outputs):
attentions = [t.numpy() for t in outputs.encoder_attentions]
global_attentions = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertEqual(len(global_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, seq_length],
)
self.assertListEqual(
list(global_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
@slow
def test_saved_model_with_attentions_output(self):
# longformer has special attentions which are not
# compatible in graph mode
pass
@slow
def test_saved_model_with_hidden_states_output(self):
# TODO(JPLU, PVP) this test should pass!!! PVP:
# IMO there is a problem with the signature check.
# Test passes for TFLEDModel, but not for TFLEDForConditionalGeneration
# IMO the reason is that the tensor variable name cannot be changed
# from decoder_input_ids -> input_ids, which poses a BIG restrictions
pass
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
TOLERANCE = 1e-4
@slow
@require_tf
class TFLEDModelIntegrationTest(unittest.TestCase):
def test_inference_no_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, 768)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
def test_inference_with_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
| # coding=utf-8
# Copyright <NAME>, <NAME>, <NAME> and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class TFLEDModelTester:
config_cls = LEDConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
attention_window=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.attention_window = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
self.key_length = self.attention_window + 1
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
self.encoder_seq_length = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
attention_window=self.attention_window,
**self.config_updates,
)
inputs_dict = prepare_led_inputs_dict(config, input_ids, decoder_input_ids)
global_attention_mask = tf.concat(
[tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]],
axis=-1,
)
inputs_dict["global_attention_mask"] = global_attention_mask
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFLEDModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_led_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.cast(tf.math.not_equal(decoder_input_ids, config.pad_token_id), tf.int8)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_tf
class TFLEDModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
all_generative_model_classes = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
def setUp(self):
self.model_tester = TFLEDModelTester(self)
self.config_tester = ConfigTester(self, config_class=LEDConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_layer_with_bias()
assert x is None
name = model.get_prefix_bias_name()
assert name is None
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict["global_attention_mask"] = tf.zeros_like(inputs_dict["attention_mask"])
num_global_attn_indices = 2
inputs_dict["global_attention_mask"] = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices,
1,
inputs_dict["global_attention_mask"],
)
config.return_dict = True
seq_length = self.model_tester.seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(outputs):
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
def check_encoder_attentions_output(outputs):
attentions = [t.numpy() for t in outputs.encoder_attentions]
global_attentions = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertEqual(len(global_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, seq_length],
)
self.assertListEqual(
list(global_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
@slow
def test_saved_model_with_attentions_output(self):
# longformer has special attentions which are not
# compatible in graph mode
pass
@slow
def test_saved_model_with_hidden_states_output(self):
# TODO(JPLU, PVP) this test should pass!!! PVP:
# IMO there is a problem with the signature check.
# Test passes for TFLEDModel, but not for TFLEDForConditionalGeneration
# IMO the reason is that the tensor variable name cannot be changed
# from decoder_input_ids -> input_ids, which poses a BIG restrictions
pass
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
TOLERANCE = 1e-4
@slow
@require_tf
class TFLEDModelIntegrationTest(unittest.TestCase):
def test_inference_no_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, 768)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
def test_inference_with_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
| en | 0.821307 | # coding=utf-8 # Copyright <NAME>, <NAME>, <NAME> and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests # first forward pass # create hypothetical next token and extent to next_input_ids # append to next input_ids and # select random slice # test that outputs are equal for slice # Check that output attentions can also be changed via the config # Check attention is always last and order is fine # longformer has special attentions which are not # compatible in graph mode # TODO(JPLU, PVP) this test should pass!!! PVP: # IMO there is a problem with the signature check. # Test passes for TFLEDModel, but not for TFLEDForConditionalGeneration # IMO the reason is that the tensor variable name cannot be changed # from decoder_input_ids -> input_ids, which poses a BIG restrictions If tensors not close, or a and b arent both tensors, raise a nice Assertion error. # change to intended input here # change to expected output here # change to intended input here # change to expected output here | 1.795203 | 2 |
src/wann_genetic/individual/numpy/ffnn.py | plonerma/wann-genetic | 0 | 604 | <filename>src/wann_genetic/individual/numpy/ffnn.py
import numpy as np
import sklearn
import logging
from wann_genetic.individual.network_base import BaseFFNN
def softmax(x, axis=-1):
"""Compute softmax values for each sets of scores in x.
Returns:
softmax - softmax normalized in dim axis
"""
e_x = np.exp(x - np.expand_dims(np.max(x,axis=axis), axis=axis))
s = (e_x / np.expand_dims(e_x.sum(axis=-1), axis=axis))
return s
def apply_act_function(available_funcs, selected_funcs, x=None):
"""Apply the activation function of the selected nodes to their sums.
This fullfils the same function as the
:class:`wann_genetic.individual.torch.ffn.MultiActivationModule`.
"""
if x is not None:
result = np.empty(x.shape)
for i, func in enumerate(selected_funcs):
assert func < len(available_funcs)
result[..., i] = available_funcs[func][1](x[..., i])
return result
else:
return np.array([ # return function names
available_funcs[func][0] for func in selected_funcs
])
class Network(BaseFFNN):
"""Numpy implmentation of a Feed Forward Neural Network
For an explanation of how propagation works, see :doc:`numpy_network`.
"""
# Definition of the activations functions
available_act_functions = [
('relu', lambda x: np.maximum(0, x)),
('sigmoid', lambda x: (np.tanh(x/2.0) + 1.0)/2.0),
('tanh', lambda x: np.tanh(x)),
('gaussian (standard)', lambda x: np.exp(-np.multiply(x, x) / 2.0)),
('step', lambda x: 1.0*(x>0.0)),
('identity', lambda x: x),
('inverse', lambda x: -x),
('squared', lambda x: x**2), # unstable if applied multiple times
('abs', lambda x: np.abs(x)),
('cos', lambda x: np.cos(np.pi*x)),
('sin ', lambda x: np.sin(np.pi*x)),
]
enabled_act_functions = available_act_functions
def get_measurements(self, weights, x, y_true=None, measures=['predictions']):
assert len(x.shape) == 2 # multiple one dimensional input arrays
assert isinstance(weights, np.ndarray)
# initial activations
act_vec = np.empty((weights.shape[0], x.shape[0], self.n_nodes), dtype=float)
act_vec[..., :self.n_in] = x[...]
act_vec[..., self.n_in] = 1 # bias
# propagate signal through all layers
for active_nodes in self.layers():
act_vec[..., active_nodes] = self.calc_act(act_vec, active_nodes, weights)
# if any node is nan, we cant rely on the result
valid = np.all(~np.isnan(act_vec), axis=-1)
act_vec[~valid, :] = np.nan
y_raw = act_vec[..., -self.n_out:]
return self.measurements_from_output(y_raw, y_true, measures)
def measurements_from_output(self, y_raw, y_true, measures):
return_values = dict()
if 'raw' in measures:
return_values['raw'] = y_raw
y_pred = np.argmax(y_raw, axis=-1)
y_prob = softmax(y_raw, axis=-1)
if 'probabilities' in measures:
return_values['probabilities'] = y_prob
if 'predictions' in measures:
return_values['predictions'] = y_pred
y_raw = y_raw.reshape(y_raw.shape[0], -1, self.n_out)
y_prob = y_prob.reshape(y_raw.shape[0], -1, self.n_out)
y_pred = y_pred.reshape(y_raw.shape[0], -1)
if y_true is not None:
y_true = y_true.reshape(-1)
if 'log_loss' in measures:
# nan is same as maximally falsely predicted
y_prob[~np.isfinite(y_prob)] = 0
return_values['log_loss'] = np.array([
sklearn.metrics.log_loss(y_true, prob, labels=np.arange(self.n_out))
for prob in y_prob
])
if 'mse_loss' in measures:
return_values['mse_loss'] = np.array([
sklearn.metrics.mean_squared_error(y_true, raw)
for raw in y_raw
])
if 'accuracy' in measures:
return_values['accuracy'] = np.array([
sklearn.metrics.accuracy_score(y_true, pred)
for pred in y_pred
])
if 'kappa' in measures:
return_values['kappa'] = np.array([
sklearn.metrics.cohen_kappa_score(y_true, pred)
for pred in y_pred
])
return return_values
def activation_functions(self, nodes, x=None):
funcs = self.nodes['func'][nodes - self.offset]
return apply_act_function(self.enabled_act_functions, funcs, x)
def calc_act(self, x, active_nodes, base_weights, add_to_sum=0):
"""Apply updates for active nodes (active nodes can't share edges).
"""
addend_nodes = active_nodes[0]
M = self.weight_matrix[:addend_nodes, active_nodes - self.offset]
# x3d: weights, samples, source nodes
# M3d: weights, source, target
# multiply relevant weight matrix with base weights
M3d = M[None, :, :] * base_weights[:, None, None]
x3d = x[..., :addend_nodes]
act_sums = np.matmul(x3d, M3d) + add_to_sum
# apply activation function for active nodes
return self.activation_functions(active_nodes, act_sums)
| <filename>src/wann_genetic/individual/numpy/ffnn.py
import numpy as np
import sklearn
import logging
from wann_genetic.individual.network_base import BaseFFNN
def softmax(x, axis=-1):
"""Compute softmax values for each sets of scores in x.
Returns:
softmax - softmax normalized in dim axis
"""
e_x = np.exp(x - np.expand_dims(np.max(x,axis=axis), axis=axis))
s = (e_x / np.expand_dims(e_x.sum(axis=-1), axis=axis))
return s
def apply_act_function(available_funcs, selected_funcs, x=None):
"""Apply the activation function of the selected nodes to their sums.
This fullfils the same function as the
:class:`wann_genetic.individual.torch.ffn.MultiActivationModule`.
"""
if x is not None:
result = np.empty(x.shape)
for i, func in enumerate(selected_funcs):
assert func < len(available_funcs)
result[..., i] = available_funcs[func][1](x[..., i])
return result
else:
return np.array([ # return function names
available_funcs[func][0] for func in selected_funcs
])
class Network(BaseFFNN):
"""Numpy implmentation of a Feed Forward Neural Network
For an explanation of how propagation works, see :doc:`numpy_network`.
"""
# Definition of the activations functions
available_act_functions = [
('relu', lambda x: np.maximum(0, x)),
('sigmoid', lambda x: (np.tanh(x/2.0) + 1.0)/2.0),
('tanh', lambda x: np.tanh(x)),
('gaussian (standard)', lambda x: np.exp(-np.multiply(x, x) / 2.0)),
('step', lambda x: 1.0*(x>0.0)),
('identity', lambda x: x),
('inverse', lambda x: -x),
('squared', lambda x: x**2), # unstable if applied multiple times
('abs', lambda x: np.abs(x)),
('cos', lambda x: np.cos(np.pi*x)),
('sin ', lambda x: np.sin(np.pi*x)),
]
enabled_act_functions = available_act_functions
def get_measurements(self, weights, x, y_true=None, measures=['predictions']):
assert len(x.shape) == 2 # multiple one dimensional input arrays
assert isinstance(weights, np.ndarray)
# initial activations
act_vec = np.empty((weights.shape[0], x.shape[0], self.n_nodes), dtype=float)
act_vec[..., :self.n_in] = x[...]
act_vec[..., self.n_in] = 1 # bias
# propagate signal through all layers
for active_nodes in self.layers():
act_vec[..., active_nodes] = self.calc_act(act_vec, active_nodes, weights)
# if any node is nan, we cant rely on the result
valid = np.all(~np.isnan(act_vec), axis=-1)
act_vec[~valid, :] = np.nan
y_raw = act_vec[..., -self.n_out:]
return self.measurements_from_output(y_raw, y_true, measures)
def measurements_from_output(self, y_raw, y_true, measures):
return_values = dict()
if 'raw' in measures:
return_values['raw'] = y_raw
y_pred = np.argmax(y_raw, axis=-1)
y_prob = softmax(y_raw, axis=-1)
if 'probabilities' in measures:
return_values['probabilities'] = y_prob
if 'predictions' in measures:
return_values['predictions'] = y_pred
y_raw = y_raw.reshape(y_raw.shape[0], -1, self.n_out)
y_prob = y_prob.reshape(y_raw.shape[0], -1, self.n_out)
y_pred = y_pred.reshape(y_raw.shape[0], -1)
if y_true is not None:
y_true = y_true.reshape(-1)
if 'log_loss' in measures:
# nan is same as maximally falsely predicted
y_prob[~np.isfinite(y_prob)] = 0
return_values['log_loss'] = np.array([
sklearn.metrics.log_loss(y_true, prob, labels=np.arange(self.n_out))
for prob in y_prob
])
if 'mse_loss' in measures:
return_values['mse_loss'] = np.array([
sklearn.metrics.mean_squared_error(y_true, raw)
for raw in y_raw
])
if 'accuracy' in measures:
return_values['accuracy'] = np.array([
sklearn.metrics.accuracy_score(y_true, pred)
for pred in y_pred
])
if 'kappa' in measures:
return_values['kappa'] = np.array([
sklearn.metrics.cohen_kappa_score(y_true, pred)
for pred in y_pred
])
return return_values
def activation_functions(self, nodes, x=None):
funcs = self.nodes['func'][nodes - self.offset]
return apply_act_function(self.enabled_act_functions, funcs, x)
def calc_act(self, x, active_nodes, base_weights, add_to_sum=0):
"""Apply updates for active nodes (active nodes can't share edges).
"""
addend_nodes = active_nodes[0]
M = self.weight_matrix[:addend_nodes, active_nodes - self.offset]
# x3d: weights, samples, source nodes
# M3d: weights, source, target
# multiply relevant weight matrix with base weights
M3d = M[None, :, :] * base_weights[:, None, None]
x3d = x[..., :addend_nodes]
act_sums = np.matmul(x3d, M3d) + add_to_sum
# apply activation function for active nodes
return self.activation_functions(active_nodes, act_sums)
| en | 0.739237 | Compute softmax values for each sets of scores in x. Returns: softmax - softmax normalized in dim axis Apply the activation function of the selected nodes to their sums. This fullfils the same function as the :class:`wann_genetic.individual.torch.ffn.MultiActivationModule`. # return function names Numpy implmentation of a Feed Forward Neural Network For an explanation of how propagation works, see :doc:`numpy_network`. # Definition of the activations functions # unstable if applied multiple times # multiple one dimensional input arrays # initial activations # bias # propagate signal through all layers # if any node is nan, we cant rely on the result # nan is same as maximally falsely predicted Apply updates for active nodes (active nodes can't share edges). # x3d: weights, samples, source nodes # M3d: weights, source, target # multiply relevant weight matrix with base weights # apply activation function for active nodes | 2.981031 | 3 |
common/tests/util.py | uktrade/tamato | 14 | 605 | import contextlib
from datetime import date
from datetime import datetime
from datetime import timezone
from functools import wraps
from io import BytesIO
from itertools import count
from typing import Any
from typing import Dict
from typing import Sequence
import pytest
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.urls import reverse
from freezegun import freeze_time
from lxml import etree
from common.models.records import TrackedModel
from common.renderers import counter_generator
from common.serializers import validate_taric_xml_record_order
from common.util import TaricDateRange
from common.util import get_accessor
from common.util import get_field_tuple
INTERDEPENDENT_IMPORT_IMPLEMENTED = True
UPDATE_IMPORTER_IMPLEMENTED = True
EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False
COMMODITIES_IMPLEMENTED = True
MEURSING_TABLES_IMPLEMENTED = False
PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False
UTC = timezone.utc
requires_commodities = pytest.mark.skipif(
not COMMODITIES_IMPLEMENTED,
reason="Commodities not implemented",
)
requires_export_refund_nomenclature = pytest.mark.skipif(
not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED,
reason="Export refund nomenclature not implemented",
)
requires_meursing_tables = pytest.mark.skipif(
not MEURSING_TABLES_IMPLEMENTED,
reason="Meursing tables not implemented",
)
requires_partial_temporary_stop = pytest.mark.skipif(
not PARTIAL_TEMPORARY_STOP_IMPLEMENTED,
reason="Partial temporary stop not implemented",
)
requires_interdependent_import = pytest.mark.skipif(
not INTERDEPENDENT_IMPORT_IMPLEMENTED,
reason="Interdependent imports not implemented",
)
requires_update_importer = pytest.mark.skipif(
not UPDATE_IMPORTER_IMPLEMENTED,
reason="Requires Updating importers to be implemented",
)
@contextlib.contextmanager
def raises_if(exception, expected):
try:
yield
except exception:
if not expected:
raise
else:
if expected:
pytest.fail(f"Did not raise {exception}")
def check_validator(validate, value, expected_valid):
try:
validate(value)
except ValidationError:
if expected_valid:
pytest.fail(f'Unexpected validation error for value "{value}"')
except Exception:
raise
else:
if not expected_valid:
pytest.fail(f'Expected validation error for value "{value}"')
def make_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are duplicates of each
other and returns the record created last."""
existing = factory.create()
# allow overriding identifying_fields
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
return factory.create(
**dict(get_field_tuple(existing, field) for field in identifying_fields)
)
def make_non_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are not duplicates of
each other and returns the record created last."""
existing = factory.create()
not_duplicate = factory.create()
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
assert any(
get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f)
for f in identifying_fields
)
return not_duplicate
def get_checkable_data(model: TrackedModel, ignore=frozenset()):
"""
Returns a dict representing the model's data ignoring any automatically set
fields and fields with names passed to `ignore`.
The returned data will contain the identifying fields for any linked
models rather than internal PKs.
For example:
get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"})
# {
# "description": "My sample footnote text",
# "described_footnote": {
# "footnote_type__footnote_type_id": "FN"
# "footnote_id": "123",
# },
# }
"""
checked_field_names = {f.name for f in model.copyable_fields} - ignore
data = {
name: getattr(model, get_accessor(model._meta.get_field(name)))
for name in checked_field_names
}
identifying_fields = {
name: data[name].get_identifying_fields()
for name in checked_field_names
if hasattr(data[name], "get_identifying_fields")
}
data.update(identifying_fields)
return data
def assert_records_match(
expected: TrackedModel,
imported: TrackedModel,
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported model is the same
as the data in the expected model.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = get_checkable_data(expected, ignore=ignore)
imported_data = get_checkable_data(imported, ignore=ignore)
assert expected_data == imported_data
def assert_many_records_match(
expected: Sequence[TrackedModel],
imported: Sequence[TrackedModel],
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported models is the same
as the data in the expected models, and that the count of both is equal.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = [get_checkable_data(e, ignore=ignore) for e in expected]
imported_data = [get_checkable_data(i, ignore=ignore) for i in imported]
assert expected_data == imported_data
_transaction_counter = count(start=1)
def generate_test_import_xml(obj: dict) -> BytesIO:
xml = render_to_string(
template_name="workbaskets/taric/transaction_detail.xml",
context={
"envelope_id": next(_transaction_counter),
"tracked_models": [obj],
"transaction_id": next(_transaction_counter),
"message_counter": counter_generator(),
"counter_generator": counter_generator,
},
)
return BytesIO(xml.encode())
def taric_xml_record_codes(xml):
"""Yields tuples of (record_code, subrecord_code)"""
records = xml.xpath(".//*[local-name() = 'record']")
codes = etree.XPath(
".//*[local-name()='record.code' or local-name()='subrecord.code']/text()",
)
return [tuple(codes(record)) for record in records]
def validate_taric_xml(
factory=None,
instance=None,
factory_kwargs=None,
check_order=True,
):
def decorator(func):
def wraps(
api_client,
taric_schema,
approved_transaction,
valid_user,
*args,
**kwargs,
):
if not factory and not instance:
raise AssertionError(
"Either a factory or an object instance need to be provided",
)
if factory and instance:
raise AssertionError(
"Either a factory or an object instance need to be provided - not both.",
)
current_instance = instance or factory.create(
transaction=approved_transaction, **factory_kwargs or {}
)
api_client.force_login(user=valid_user)
response = api_client.get(
reverse(
"workbaskets:workbasket-detail",
kwargs={"pk": approved_transaction.workbasket.pk},
),
{"format": "xml"},
)
assert response.status_code == 200
content = response.content
xml = etree.XML(content)
taric_schema.validate(xml)
assert not taric_schema.error_log, f"XML errors: {taric_schema.error_log}"
if check_order:
validate_taric_xml_record_order(xml)
kwargs = {"xml": xml, **kwargs}
func(
*args,
**kwargs,
)
return wraps
return decorator
class Dates:
deltas = {
"normal": (relativedelta(), relativedelta(months=+1)),
"earlier": (relativedelta(years=-1), relativedelta(years=-1, months=+1)),
"later": (
relativedelta(years=+1, months=+1, days=+1),
relativedelta(years=+1, months=+2),
),
"big": (relativedelta(years=-2), relativedelta(years=+2, days=+1)),
"adjacent": (relativedelta(days=+1), relativedelta(months=+1)),
"adjacent_earlier": (relativedelta(months=-1), relativedelta(days=-1)),
"adjacent_later": (relativedelta(months=+1, days=+1), relativedelta(months=+2)),
"adjacent_no_end": (relativedelta(months=+1, days=+1), None),
"adjacent_even_later": (
relativedelta(months=+2, days=+1),
relativedelta(months=+3),
),
"adjacent_earlier_big": (
relativedelta(years=-2, months=-2),
relativedelta(years=-2),
),
"adjacent_later_big": (
relativedelta(months=+1, days=+1),
relativedelta(years=+2, months=+2),
),
"overlap_normal": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1, years=+1),
),
"overlap_normal_earlier": (
relativedelta(months=-1, days=+14),
relativedelta(days=+14),
),
"overlap_normal_same_year": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1),
),
"overlap_big": (relativedelta(years=+1), relativedelta(years=+3, days=+2)),
"after_big": (
relativedelta(years=+3, months=+1),
relativedelta(years=+3, months=+2),
),
"backwards": (relativedelta(months=+1), relativedelta(days=+1)),
"starts_with_normal": (relativedelta(), relativedelta(days=+14)),
"ends_with_normal": (relativedelta(days=+14), relativedelta(months=+1)),
"current": (relativedelta(weeks=-4), relativedelta(weeks=+4)),
"future": (relativedelta(weeks=+10), relativedelta(weeks=+20)),
"no_end": (relativedelta(), None),
"normal_first_half": (relativedelta(), relativedelta(days=+14)),
}
@property
def now(self):
return self.datetime_now.date()
@property
def datetime_now(self):
return datetime.now(tz=UTC).replace(hour=0, minute=0, second=0, microsecond=0)
def __getattr__(self, name):
if name in self.deltas:
start, end = self.deltas[name]
start = self.now + start
if end is not None:
end = self.now + end
return TaricDateRange(start, end)
raise AttributeError(name)
@classmethod
def short_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-14),
)
@classmethod
def medium_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-1),
)
@classmethod
def short_after(cls, dt):
return TaricDateRange(
dt + relativedelta(days=+14),
dt + relativedelta(months=+1),
)
@classmethod
def short_overlap(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(months=+1),
)
@classmethod
def no_end_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
None,
)
def only_applicable_after(cutoff):
"""
Decorator which asserts that a test fails after a specified cutoff date.
:param cutoff: A date string, or datetime object before which the test should fail.
"""
cutoff = parse_date(cutoff)
def decorator(fn):
@wraps(fn)
def do_test(*args, **kwargs):
# test should pass normally
fn(*args, **kwargs)
# test should fail before cutoff
with freeze_time(cutoff + relativedelta(days=-1)):
try:
fn(*args, **kwargs)
except pytest.fail.Exception:
pass
except Exception:
raise
else:
pytest.fail(f"Rule applied before {cutoff:%Y-%m-%d}")
return True
return do_test
return decorator
def validity_period_post_data(start: date, end: date) -> Dict[str, int]:
"""
Construct a POST data fragment for the validity period start and end dates
of a ValidityPeriodForm from the given date objects, eg:
>>> validity_period_post_data(
>>> datetime.date(2021, 1, 2),
>>> datetime.date(2022, 3, 4),
>>> )
{
"start_date_0": 1,
"start_date_1": 2,
"start_date_2": 2021,
"end_date_0": 4,
"end_date_1": 3,
"end_date_2": 2022,
}
"""
return {
f"{name}_{i}": part
for name, date in (("start_date", start), ("end_date", end))
for i, part in enumerate([date.day, date.month, date.year])
}
def get_form_data(form: forms.ModelForm) -> Dict[str, Any]:
"""Returns a dictionary of the fields that the form will put onto a page and
their current values, taking account of any fields that have sub-fields and
hence result in multiple HTML <input> objects."""
data = {**form.initial}
for field in form.rendered_fields:
value = data[field] if field in data else form.fields[field].initial
if hasattr(form.fields[field].widget, "decompress"):
# If the widget can be decompressed, then it is not just a simple
# value and has some internal structure. So we need to generate one
# form item per decompressed value and append the name with _0, _1,
# etc. This mirrors the MultiValueWidget in django/forms/widgets.py.
if field in data:
del data[field]
value = form.fields[field].widget.decompress(value)
data.update(
**{f"{field}_{i}": v for i, v in enumerate(value) if v is not None}
)
elif value is not None:
data.setdefault(field, value)
return data
| import contextlib
from datetime import date
from datetime import datetime
from datetime import timezone
from functools import wraps
from io import BytesIO
from itertools import count
from typing import Any
from typing import Dict
from typing import Sequence
import pytest
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.urls import reverse
from freezegun import freeze_time
from lxml import etree
from common.models.records import TrackedModel
from common.renderers import counter_generator
from common.serializers import validate_taric_xml_record_order
from common.util import TaricDateRange
from common.util import get_accessor
from common.util import get_field_tuple
INTERDEPENDENT_IMPORT_IMPLEMENTED = True
UPDATE_IMPORTER_IMPLEMENTED = True
EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False
COMMODITIES_IMPLEMENTED = True
MEURSING_TABLES_IMPLEMENTED = False
PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False
UTC = timezone.utc
requires_commodities = pytest.mark.skipif(
not COMMODITIES_IMPLEMENTED,
reason="Commodities not implemented",
)
requires_export_refund_nomenclature = pytest.mark.skipif(
not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED,
reason="Export refund nomenclature not implemented",
)
requires_meursing_tables = pytest.mark.skipif(
not MEURSING_TABLES_IMPLEMENTED,
reason="Meursing tables not implemented",
)
requires_partial_temporary_stop = pytest.mark.skipif(
not PARTIAL_TEMPORARY_STOP_IMPLEMENTED,
reason="Partial temporary stop not implemented",
)
requires_interdependent_import = pytest.mark.skipif(
not INTERDEPENDENT_IMPORT_IMPLEMENTED,
reason="Interdependent imports not implemented",
)
requires_update_importer = pytest.mark.skipif(
not UPDATE_IMPORTER_IMPLEMENTED,
reason="Requires Updating importers to be implemented",
)
@contextlib.contextmanager
def raises_if(exception, expected):
try:
yield
except exception:
if not expected:
raise
else:
if expected:
pytest.fail(f"Did not raise {exception}")
def check_validator(validate, value, expected_valid):
try:
validate(value)
except ValidationError:
if expected_valid:
pytest.fail(f'Unexpected validation error for value "{value}"')
except Exception:
raise
else:
if not expected_valid:
pytest.fail(f'Expected validation error for value "{value}"')
def make_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are duplicates of each
other and returns the record created last."""
existing = factory.create()
# allow overriding identifying_fields
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
return factory.create(
**dict(get_field_tuple(existing, field) for field in identifying_fields)
)
def make_non_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are not duplicates of
each other and returns the record created last."""
existing = factory.create()
not_duplicate = factory.create()
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
assert any(
get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f)
for f in identifying_fields
)
return not_duplicate
def get_checkable_data(model: TrackedModel, ignore=frozenset()):
"""
Returns a dict representing the model's data ignoring any automatically set
fields and fields with names passed to `ignore`.
The returned data will contain the identifying fields for any linked
models rather than internal PKs.
For example:
get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"})
# {
# "description": "My sample footnote text",
# "described_footnote": {
# "footnote_type__footnote_type_id": "FN"
# "footnote_id": "123",
# },
# }
"""
checked_field_names = {f.name for f in model.copyable_fields} - ignore
data = {
name: getattr(model, get_accessor(model._meta.get_field(name)))
for name in checked_field_names
}
identifying_fields = {
name: data[name].get_identifying_fields()
for name in checked_field_names
if hasattr(data[name], "get_identifying_fields")
}
data.update(identifying_fields)
return data
def assert_records_match(
expected: TrackedModel,
imported: TrackedModel,
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported model is the same
as the data in the expected model.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = get_checkable_data(expected, ignore=ignore)
imported_data = get_checkable_data(imported, ignore=ignore)
assert expected_data == imported_data
def assert_many_records_match(
expected: Sequence[TrackedModel],
imported: Sequence[TrackedModel],
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported models is the same
as the data in the expected models, and that the count of both is equal.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = [get_checkable_data(e, ignore=ignore) for e in expected]
imported_data = [get_checkable_data(i, ignore=ignore) for i in imported]
assert expected_data == imported_data
_transaction_counter = count(start=1)
def generate_test_import_xml(obj: dict) -> BytesIO:
xml = render_to_string(
template_name="workbaskets/taric/transaction_detail.xml",
context={
"envelope_id": next(_transaction_counter),
"tracked_models": [obj],
"transaction_id": next(_transaction_counter),
"message_counter": counter_generator(),
"counter_generator": counter_generator,
},
)
return BytesIO(xml.encode())
def taric_xml_record_codes(xml):
"""Yields tuples of (record_code, subrecord_code)"""
records = xml.xpath(".//*[local-name() = 'record']")
codes = etree.XPath(
".//*[local-name()='record.code' or local-name()='subrecord.code']/text()",
)
return [tuple(codes(record)) for record in records]
def validate_taric_xml(
factory=None,
instance=None,
factory_kwargs=None,
check_order=True,
):
def decorator(func):
def wraps(
api_client,
taric_schema,
approved_transaction,
valid_user,
*args,
**kwargs,
):
if not factory and not instance:
raise AssertionError(
"Either a factory or an object instance need to be provided",
)
if factory and instance:
raise AssertionError(
"Either a factory or an object instance need to be provided - not both.",
)
current_instance = instance or factory.create(
transaction=approved_transaction, **factory_kwargs or {}
)
api_client.force_login(user=valid_user)
response = api_client.get(
reverse(
"workbaskets:workbasket-detail",
kwargs={"pk": approved_transaction.workbasket.pk},
),
{"format": "xml"},
)
assert response.status_code == 200
content = response.content
xml = etree.XML(content)
taric_schema.validate(xml)
assert not taric_schema.error_log, f"XML errors: {taric_schema.error_log}"
if check_order:
validate_taric_xml_record_order(xml)
kwargs = {"xml": xml, **kwargs}
func(
*args,
**kwargs,
)
return wraps
return decorator
class Dates:
deltas = {
"normal": (relativedelta(), relativedelta(months=+1)),
"earlier": (relativedelta(years=-1), relativedelta(years=-1, months=+1)),
"later": (
relativedelta(years=+1, months=+1, days=+1),
relativedelta(years=+1, months=+2),
),
"big": (relativedelta(years=-2), relativedelta(years=+2, days=+1)),
"adjacent": (relativedelta(days=+1), relativedelta(months=+1)),
"adjacent_earlier": (relativedelta(months=-1), relativedelta(days=-1)),
"adjacent_later": (relativedelta(months=+1, days=+1), relativedelta(months=+2)),
"adjacent_no_end": (relativedelta(months=+1, days=+1), None),
"adjacent_even_later": (
relativedelta(months=+2, days=+1),
relativedelta(months=+3),
),
"adjacent_earlier_big": (
relativedelta(years=-2, months=-2),
relativedelta(years=-2),
),
"adjacent_later_big": (
relativedelta(months=+1, days=+1),
relativedelta(years=+2, months=+2),
),
"overlap_normal": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1, years=+1),
),
"overlap_normal_earlier": (
relativedelta(months=-1, days=+14),
relativedelta(days=+14),
),
"overlap_normal_same_year": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1),
),
"overlap_big": (relativedelta(years=+1), relativedelta(years=+3, days=+2)),
"after_big": (
relativedelta(years=+3, months=+1),
relativedelta(years=+3, months=+2),
),
"backwards": (relativedelta(months=+1), relativedelta(days=+1)),
"starts_with_normal": (relativedelta(), relativedelta(days=+14)),
"ends_with_normal": (relativedelta(days=+14), relativedelta(months=+1)),
"current": (relativedelta(weeks=-4), relativedelta(weeks=+4)),
"future": (relativedelta(weeks=+10), relativedelta(weeks=+20)),
"no_end": (relativedelta(), None),
"normal_first_half": (relativedelta(), relativedelta(days=+14)),
}
@property
def now(self):
return self.datetime_now.date()
@property
def datetime_now(self):
return datetime.now(tz=UTC).replace(hour=0, minute=0, second=0, microsecond=0)
def __getattr__(self, name):
if name in self.deltas:
start, end = self.deltas[name]
start = self.now + start
if end is not None:
end = self.now + end
return TaricDateRange(start, end)
raise AttributeError(name)
@classmethod
def short_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-14),
)
@classmethod
def medium_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-1),
)
@classmethod
def short_after(cls, dt):
return TaricDateRange(
dt + relativedelta(days=+14),
dt + relativedelta(months=+1),
)
@classmethod
def short_overlap(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(months=+1),
)
@classmethod
def no_end_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
None,
)
def only_applicable_after(cutoff):
"""
Decorator which asserts that a test fails after a specified cutoff date.
:param cutoff: A date string, or datetime object before which the test should fail.
"""
cutoff = parse_date(cutoff)
def decorator(fn):
@wraps(fn)
def do_test(*args, **kwargs):
# test should pass normally
fn(*args, **kwargs)
# test should fail before cutoff
with freeze_time(cutoff + relativedelta(days=-1)):
try:
fn(*args, **kwargs)
except pytest.fail.Exception:
pass
except Exception:
raise
else:
pytest.fail(f"Rule applied before {cutoff:%Y-%m-%d}")
return True
return do_test
return decorator
def validity_period_post_data(start: date, end: date) -> Dict[str, int]:
"""
Construct a POST data fragment for the validity period start and end dates
of a ValidityPeriodForm from the given date objects, eg:
>>> validity_period_post_data(
>>> datetime.date(2021, 1, 2),
>>> datetime.date(2022, 3, 4),
>>> )
{
"start_date_0": 1,
"start_date_1": 2,
"start_date_2": 2021,
"end_date_0": 4,
"end_date_1": 3,
"end_date_2": 2022,
}
"""
return {
f"{name}_{i}": part
for name, date in (("start_date", start), ("end_date", end))
for i, part in enumerate([date.day, date.month, date.year])
}
def get_form_data(form: forms.ModelForm) -> Dict[str, Any]:
"""Returns a dictionary of the fields that the form will put onto a page and
their current values, taking account of any fields that have sub-fields and
hence result in multiple HTML <input> objects."""
data = {**form.initial}
for field in form.rendered_fields:
value = data[field] if field in data else form.fields[field].initial
if hasattr(form.fields[field].widget, "decompress"):
# If the widget can be decompressed, then it is not just a simple
# value and has some internal structure. So we need to generate one
# form item per decompressed value and append the name with _0, _1,
# etc. This mirrors the MultiValueWidget in django/forms/widgets.py.
if field in data:
del data[field]
value = form.fields[field].widget.decompress(value)
data.update(
**{f"{field}_{i}": v for i, v in enumerate(value) if v is not None}
)
elif value is not None:
data.setdefault(field, value)
return data
| en | 0.801294 | Creates two records using the passed factory that are duplicates of each other and returns the record created last. # allow overriding identifying_fields Creates two records using the passed factory that are not duplicates of each other and returns the record created last. Returns a dict representing the model's data ignoring any automatically set fields and fields with names passed to `ignore`. The returned data will contain the identifying fields for any linked models rather than internal PKs. For example: get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"}) # { # "description": "My sample footnote text", # "described_footnote": { # "footnote_type__footnote_type_id": "FN" # "footnote_id": "123", # }, # } Asserts that every value for every field in the imported model is the same as the data in the expected model. System fields that will change from model to model are not checked. Any field names given to `ignore` will also not be checked. Asserts that every value for every field in the imported models is the same as the data in the expected models, and that the count of both is equal. System fields that will change from model to model are not checked. Any field names given to `ignore` will also not be checked. Yields tuples of (record_code, subrecord_code) Decorator which asserts that a test fails after a specified cutoff date. :param cutoff: A date string, or datetime object before which the test should fail. # test should pass normally # test should fail before cutoff Construct a POST data fragment for the validity period start and end dates of a ValidityPeriodForm from the given date objects, eg: >>> validity_period_post_data( >>> datetime.date(2021, 1, 2), >>> datetime.date(2022, 3, 4), >>> ) { "start_date_0": 1, "start_date_1": 2, "start_date_2": 2021, "end_date_0": 4, "end_date_1": 3, "end_date_2": 2022, } Returns a dictionary of the fields that the form will put onto a page and their current values, taking account of any fields that have sub-fields and hence result in multiple HTML <input> objects. # If the widget can be decompressed, then it is not just a simple # value and has some internal structure. So we need to generate one # form item per decompressed value and append the name with _0, _1, # etc. This mirrors the MultiValueWidget in django/forms/widgets.py. | 1.832593 | 2 |
src/com/python/email/send_mail.py | Leeo1124/pythonDemo | 0 | 606 | <gh_stars>0
'''
Created on 2016年8月10日
@author: Administrator
'''
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.multipart import MIMEBase
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
from_addr = '<EMAIL>'#input('From: ')
password = input('Password: ')
to_addr = '<EMAIL>'#input('To: ')
smtp_server = 'smtp.163.com'#input('SMTP server: ')
# 发送纯文本邮件
# msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
# 发送HTML邮件
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8')
# 发送带附件的邮件
# 邮件对象:
msg = MIMEMultipart()
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
# 邮件正文是MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
# 添加附件就是加上一个MIMEBase,从本地读取一个图片:
with open('D:/pythonWorkspace/pthonDemo/src/com/python/email/test.jpg', 'rb') as f:
# 设置附件的MIME和文件名,这里是png类型:
mime = MIMEBase('image', 'png', filename='test.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='test.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit() | '''
Created on 2016年8月10日
@author: Administrator
'''
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.multipart import MIMEBase
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
from_addr = '<EMAIL>'#input('From: ')
password = input('Password: ')
to_addr = '<EMAIL>'#input('To: ')
smtp_server = 'smtp.163.com'#input('SMTP server: ')
# 发送纯文本邮件
# msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
# 发送HTML邮件
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8')
# 发送带附件的邮件
# 邮件对象:
msg = MIMEMultipart()
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
# 邮件正文是MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
# 添加附件就是加上一个MIMEBase,从本地读取一个图片:
with open('D:/pythonWorkspace/pthonDemo/src/com/python/email/test.jpg', 'rb') as f:
# 设置附件的MIME和文件名,这里是png类型:
mime = MIMEBase('image', 'png', filename='test.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='test.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit() | zh | 0.611837 | Created on 2016年8月10日 @author: Administrator # 发送纯文本邮件 # msg = MIMEText('hello, send by Python...', 'plain', 'utf-8') # 发送HTML邮件 # msg = MIMEText('<html><body><h1>Hello</h1>' + # '<p>send by <a href="http://www.python.org">Python</a>...</p>' + # '</body></html>', 'html', 'utf-8') # 发送带附件的邮件 # 邮件对象: # 邮件正文是MIMEText: # 添加附件就是加上一个MIMEBase,从本地读取一个图片: # 设置附件的MIME和文件名,这里是png类型: # 加上必要的头信息: # 把附件的内容读进来: # 用Base64编码: # 添加到MIMEMultipart: | 3.05878 | 3 |
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py | openforcefield/nistdataselection | 3 | 607 | <filename>studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py<gh_stars>1-10
from evaluator import unit
from evaluator.backends import QueueWorkerResources
from evaluator.backends.dask import DaskLSFBackend
from evaluator.client import ConnectionOptions, EvaluatorClient
from evaluator.datasets import PhysicalPropertyDataSet
from evaluator.forcefield import SmirnoffForceFieldSource
from evaluator.server import EvaluatorServer
from evaluator.utils import setup_timestamp_logging
def main():
setup_timestamp_logging()
# Load in the force field
force_field_path = "openff-1.0.0.offxml"
force_field_source = SmirnoffForceFieldSource.from_path(force_field_path)
# Load in the test set.
data_set = PhysicalPropertyDataSet.from_json("full_set.json")
# Set up a server object to run the calculations using.
working_directory = "working_directory"
# Set up a backend to run the calculations on. This assume running
# on a HPC resources with the LSF queue system installed.
queue_resources = QueueWorkerResources(
number_of_threads=1,
number_of_gpus=1,
preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
per_thread_memory_limit=5 * unit.gigabyte,
wallclock_time_limit="05:59",
)
worker_script_commands = ["conda activate forcebalance", "module load cuda/10.1"]
calculation_backend = DaskLSFBackend(
minimum_number_of_workers=1,
maximum_number_of_workers=50,
resources_per_worker=queue_resources,
queue_name="gpuqueue",
setup_script_commands=worker_script_commands,
adaptive_interval="1000ms",
)
with calculation_backend:
server = EvaluatorServer(
calculation_backend=calculation_backend,
working_directory=working_directory,
port=8004,
)
with server:
# Request the estimates.
client = EvaluatorClient(ConnectionOptions(server_port=8004))
request, _ = client.request_estimate(
property_set=data_set, force_field_source=force_field_source,
)
# Wait for the results.
results, _ = request.results(True, 5)
results.json(f"results.json")
if __name__ == "__main__":
main()
| <filename>studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py<gh_stars>1-10
from evaluator import unit
from evaluator.backends import QueueWorkerResources
from evaluator.backends.dask import DaskLSFBackend
from evaluator.client import ConnectionOptions, EvaluatorClient
from evaluator.datasets import PhysicalPropertyDataSet
from evaluator.forcefield import SmirnoffForceFieldSource
from evaluator.server import EvaluatorServer
from evaluator.utils import setup_timestamp_logging
def main():
setup_timestamp_logging()
# Load in the force field
force_field_path = "openff-1.0.0.offxml"
force_field_source = SmirnoffForceFieldSource.from_path(force_field_path)
# Load in the test set.
data_set = PhysicalPropertyDataSet.from_json("full_set.json")
# Set up a server object to run the calculations using.
working_directory = "working_directory"
# Set up a backend to run the calculations on. This assume running
# on a HPC resources with the LSF queue system installed.
queue_resources = QueueWorkerResources(
number_of_threads=1,
number_of_gpus=1,
preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
per_thread_memory_limit=5 * unit.gigabyte,
wallclock_time_limit="05:59",
)
worker_script_commands = ["conda activate forcebalance", "module load cuda/10.1"]
calculation_backend = DaskLSFBackend(
minimum_number_of_workers=1,
maximum_number_of_workers=50,
resources_per_worker=queue_resources,
queue_name="gpuqueue",
setup_script_commands=worker_script_commands,
adaptive_interval="1000ms",
)
with calculation_backend:
server = EvaluatorServer(
calculation_backend=calculation_backend,
working_directory=working_directory,
port=8004,
)
with server:
# Request the estimates.
client = EvaluatorClient(ConnectionOptions(server_port=8004))
request, _ = client.request_estimate(
property_set=data_set, force_field_source=force_field_source,
)
# Wait for the results.
results, _ = request.results(True, 5)
results.json(f"results.json")
if __name__ == "__main__":
main()
| en | 0.817711 | # Load in the force field # Load in the test set. # Set up a server object to run the calculations using. # Set up a backend to run the calculations on. This assume running # on a HPC resources with the LSF queue system installed. # Request the estimates. # Wait for the results. | 2.094242 | 2 |
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py | motional/nuplan-devkit | 128 | 608 | import logging
import unittest
from pyinstrument import Profiler
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.idm_agents import IDMAgents
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TestProfileIDM(unittest.TestCase):
"""
Profiling test for IDM agents.
"""
def setUp(self) -> None:
"""
Inherited, see super class.
"""
self.n_repeat_trials = 1
self.display_results = True
self.scenario = get_test_nuplan_scenario()
def test_profile_idm_agent_observation(self) -> None:
"""Profile IDMAgents."""
profiler = Profiler(interval=0.0001)
profiler.start()
# How many times to repeat runtime test
for _ in range(self.n_repeat_trials):
observation = IDMAgents(
target_velocity=10,
min_gap_to_lead_agent=0.5,
headway_time=1.5,
accel_max=1.0,
decel_max=2.0,
scenario=self.scenario,
)
for step in range(self.scenario.get_number_of_iterations() - 1):
iteration = SimulationIteration(time_point=self.scenario.get_time_point(step), index=step)
next_iteration = SimulationIteration(time_point=self.scenario.get_time_point(step + 1), index=step + 1)
buffer = SimulationHistoryBuffer.initialize_from_list(
1,
[self.scenario.get_ego_state_at_iteration(step)],
[self.scenario.get_tracked_objects_at_iteration(step)],
next_iteration.time_point.time_s - iteration.time_point.time_s,
)
observation.update_observation(iteration, next_iteration, buffer)
profiler.stop()
if self.display_results:
logger.info(profiler.output_text(unicode=True, color=True))
if __name__ == "__main__":
unittest.main()
| import logging
import unittest
from pyinstrument import Profiler
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.idm_agents import IDMAgents
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TestProfileIDM(unittest.TestCase):
"""
Profiling test for IDM agents.
"""
def setUp(self) -> None:
"""
Inherited, see super class.
"""
self.n_repeat_trials = 1
self.display_results = True
self.scenario = get_test_nuplan_scenario()
def test_profile_idm_agent_observation(self) -> None:
"""Profile IDMAgents."""
profiler = Profiler(interval=0.0001)
profiler.start()
# How many times to repeat runtime test
for _ in range(self.n_repeat_trials):
observation = IDMAgents(
target_velocity=10,
min_gap_to_lead_agent=0.5,
headway_time=1.5,
accel_max=1.0,
decel_max=2.0,
scenario=self.scenario,
)
for step in range(self.scenario.get_number_of_iterations() - 1):
iteration = SimulationIteration(time_point=self.scenario.get_time_point(step), index=step)
next_iteration = SimulationIteration(time_point=self.scenario.get_time_point(step + 1), index=step + 1)
buffer = SimulationHistoryBuffer.initialize_from_list(
1,
[self.scenario.get_ego_state_at_iteration(step)],
[self.scenario.get_tracked_objects_at_iteration(step)],
next_iteration.time_point.time_s - iteration.time_point.time_s,
)
observation.update_observation(iteration, next_iteration, buffer)
profiler.stop()
if self.display_results:
logger.info(profiler.output_text(unicode=True, color=True))
if __name__ == "__main__":
unittest.main()
| en | 0.812625 | Profiling test for IDM agents. Inherited, see super class. Profile IDMAgents. # How many times to repeat runtime test | 2.352455 | 2 |
baselines/ddpg/ddpg.py | RDaneelOlivav/baselines | 11 | 609 | <filename>baselines/ddpg/ddpg.py
import os
import os.path as osp
import time
from collections import deque
import pickle
from baselines.ddpg.ddpg_learner import DDPG
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from baselines.common import set_global_seeds
from baselines import logger
import tensorflow as tf
import numpy as np
try:
from mpi4py import MPI
except ImportError:
MPI = None
def learn(network, env,
seed=None,
total_timesteps=None,
nb_epochs=None, # with default settings, perform 1M steps total
nb_epoch_cycles=20,
nb_rollout_steps=100,
reward_scale=1.0,
render=False,
render_eval=False,
noise_type='adaptive-param_0.2',
normalize_returns=False,
normalize_observations=True,
critic_l2_reg=1e-2,
actor_lr=1e-4,
critic_lr=1e-3,
popart=False,
gamma=0.99,
clip_norm=None,
nb_train_steps=50, # per epoch cycle and MPI worker,
nb_eval_steps=100,
batch_size=64, # per MPI worker
tau=0.01,
eval_env=None,
param_noise_adaption_interval=50,
load_path=None,
**network_kwargs):
set_global_seeds(seed)
if total_timesteps is not None:
assert nb_epochs is None
nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps)
else:
nb_epochs = 500
if MPI is not None:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
nb_actions = env.action_space.shape[-1]
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)
critic = Critic(nb_actions, ob_shape=env.observation_space.shape, network=network, **network_kwargs)
actor = Actor(nb_actions, ob_shape=env.observation_space.shape, network=network, **network_kwargs)
action_noise = None
param_noise = None
if noise_type is not None:
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'adaptive-param' in current_noise_type:
_, stddev = current_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
max_action = env.action_space.high
logger.info('scaling actions by {} before executing in env'.format(max_action))
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
if load_path is not None:
load_path = osp.expanduser(load_path)
ckpt = tf.train.Checkpoint(model=agent)
manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
print("Restoring from {}".format(manager.latest_checkpoint))
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
# Prepare everything.
agent.initialize()
agent.reset()
obs = env.reset()
if eval_env is not None:
eval_obs = eval_env.reset()
nenvs = obs.shape[0]
episode_reward = np.zeros(nenvs, dtype = np.float32) #vector
episode_step = np.zeros(nenvs, dtype = int) # vector
episodes = 0 #scalar
t = 0 # scalar
epoch = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
if nenvs > 1:
# if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each
# of the environments, so resetting here instead
agent.reset()
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q, _, _ = agent.step(tf.constant(obs), apply_noise=True, compute_Q=True)
action, q = action.numpy(), q.numpy()
# Execute next action.
if rank == 0 and render:
env.render()
# max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch
new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
# note these outputs are batched from vecenv
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done) #the batched data will be unrolled in memory.py's append.
obs = new_obs
for d in range(len(done)):
if done[d]:
# Episode done.
epoch_episode_rewards.append(episode_reward[d])
episode_rewards_history.append(episode_reward[d])
epoch_episode_steps.append(episode_step[d])
episode_reward[d] = 0.
episode_step[d] = 0
epoch_episodes += 1
episodes += 1
if nenvs == 1:
agent.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0:
batch = agent.memory.sample(batch_size=batch_size)
obs0 = tf.constant(batch['obs0'])
distance = agent.adapt_param_noise(obs0)
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if eval_env is not None:
nenvs_eval = eval_obs.shape[0]
eval_episode_reward = np.zeros(nenvs_eval, dtype = np.float32)
for t_rollout in range(nb_eval_steps):
eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True)
eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
if render_eval:
eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
for d in range(len(eval_done)):
if eval_done[d]:
eval_episode_rewards.append(eval_episode_reward[d])
eval_episode_rewards_history.append(eval_episode_reward[d])
eval_episode_reward[d] = 0.0
if MPI is not None:
mpi_size = MPI.COMM_WORLD.Get_size()
else:
mpi_size = 1
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = agent.get_stats()
combined_stats = stats.copy()
combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)
combined_stats['rollout/return_std'] = np.std(epoch_episode_rewards)
combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
combined_stats['rollout/return_history_std'] = np.std(episode_rewards_history)
combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)
combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)
combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(t) / float(duration)
combined_stats['total/episodes'] = episodes
combined_stats['rollout/episodes'] = epoch_episodes
combined_stats['rollout/actions_std'] = np.std(epoch_actions)
# Evaluation statistics.
if eval_env is not None:
combined_stats['eval/return'] = eval_episode_rewards
combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
combined_stats['eval/Q'] = eval_qs
combined_stats['eval/episodes'] = len(eval_episode_rewards)
def as_scalar(x):
if isinstance(x, np.ndarray):
assert x.size == 1
return x[0]
elif np.isscalar(x):
return x
else:
raise ValueError('expected scalar, got %s'%x)
combined_stats_sums = np.array([ np.array(x).flatten()[0] for x in combined_stats.values()])
if MPI is not None:
combined_stats_sums = MPI.COMM_WORLD.allreduce(combined_stats_sums)
combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = t
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
if rank == 0:
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
return agent
| <filename>baselines/ddpg/ddpg.py
import os
import os.path as osp
import time
from collections import deque
import pickle
from baselines.ddpg.ddpg_learner import DDPG
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from baselines.common import set_global_seeds
from baselines import logger
import tensorflow as tf
import numpy as np
try:
from mpi4py import MPI
except ImportError:
MPI = None
def learn(network, env,
seed=None,
total_timesteps=None,
nb_epochs=None, # with default settings, perform 1M steps total
nb_epoch_cycles=20,
nb_rollout_steps=100,
reward_scale=1.0,
render=False,
render_eval=False,
noise_type='adaptive-param_0.2',
normalize_returns=False,
normalize_observations=True,
critic_l2_reg=1e-2,
actor_lr=1e-4,
critic_lr=1e-3,
popart=False,
gamma=0.99,
clip_norm=None,
nb_train_steps=50, # per epoch cycle and MPI worker,
nb_eval_steps=100,
batch_size=64, # per MPI worker
tau=0.01,
eval_env=None,
param_noise_adaption_interval=50,
load_path=None,
**network_kwargs):
set_global_seeds(seed)
if total_timesteps is not None:
assert nb_epochs is None
nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps)
else:
nb_epochs = 500
if MPI is not None:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
nb_actions = env.action_space.shape[-1]
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)
critic = Critic(nb_actions, ob_shape=env.observation_space.shape, network=network, **network_kwargs)
actor = Actor(nb_actions, ob_shape=env.observation_space.shape, network=network, **network_kwargs)
action_noise = None
param_noise = None
if noise_type is not None:
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'adaptive-param' in current_noise_type:
_, stddev = current_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
max_action = env.action_space.high
logger.info('scaling actions by {} before executing in env'.format(max_action))
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
if load_path is not None:
load_path = osp.expanduser(load_path)
ckpt = tf.train.Checkpoint(model=agent)
manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
print("Restoring from {}".format(manager.latest_checkpoint))
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
# Prepare everything.
agent.initialize()
agent.reset()
obs = env.reset()
if eval_env is not None:
eval_obs = eval_env.reset()
nenvs = obs.shape[0]
episode_reward = np.zeros(nenvs, dtype = np.float32) #vector
episode_step = np.zeros(nenvs, dtype = int) # vector
episodes = 0 #scalar
t = 0 # scalar
epoch = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
if nenvs > 1:
# if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each
# of the environments, so resetting here instead
agent.reset()
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q, _, _ = agent.step(tf.constant(obs), apply_noise=True, compute_Q=True)
action, q = action.numpy(), q.numpy()
# Execute next action.
if rank == 0 and render:
env.render()
# max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch
new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
# note these outputs are batched from vecenv
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done) #the batched data will be unrolled in memory.py's append.
obs = new_obs
for d in range(len(done)):
if done[d]:
# Episode done.
epoch_episode_rewards.append(episode_reward[d])
episode_rewards_history.append(episode_reward[d])
epoch_episode_steps.append(episode_step[d])
episode_reward[d] = 0.
episode_step[d] = 0
epoch_episodes += 1
episodes += 1
if nenvs == 1:
agent.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0:
batch = agent.memory.sample(batch_size=batch_size)
obs0 = tf.constant(batch['obs0'])
distance = agent.adapt_param_noise(obs0)
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if eval_env is not None:
nenvs_eval = eval_obs.shape[0]
eval_episode_reward = np.zeros(nenvs_eval, dtype = np.float32)
for t_rollout in range(nb_eval_steps):
eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True)
eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
if render_eval:
eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
for d in range(len(eval_done)):
if eval_done[d]:
eval_episode_rewards.append(eval_episode_reward[d])
eval_episode_rewards_history.append(eval_episode_reward[d])
eval_episode_reward[d] = 0.0
if MPI is not None:
mpi_size = MPI.COMM_WORLD.Get_size()
else:
mpi_size = 1
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = agent.get_stats()
combined_stats = stats.copy()
combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)
combined_stats['rollout/return_std'] = np.std(epoch_episode_rewards)
combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
combined_stats['rollout/return_history_std'] = np.std(episode_rewards_history)
combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)
combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)
combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(t) / float(duration)
combined_stats['total/episodes'] = episodes
combined_stats['rollout/episodes'] = epoch_episodes
combined_stats['rollout/actions_std'] = np.std(epoch_actions)
# Evaluation statistics.
if eval_env is not None:
combined_stats['eval/return'] = eval_episode_rewards
combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
combined_stats['eval/Q'] = eval_qs
combined_stats['eval/episodes'] = len(eval_episode_rewards)
def as_scalar(x):
if isinstance(x, np.ndarray):
assert x.size == 1
return x[0]
elif np.isscalar(x):
return x
else:
raise ValueError('expected scalar, got %s'%x)
combined_stats_sums = np.array([ np.array(x).flatten()[0] for x in combined_stats.values()])
if MPI is not None:
combined_stats_sums = MPI.COMM_WORLD.allreduce(combined_stats_sums)
combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = t
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
if rank == 0:
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
return agent
| en | 0.891972 | # with default settings, perform 1M steps total # per epoch cycle and MPI worker, # per MPI worker # we assume symmetric actions. # Prepare everything. #vector # vector #scalar # scalar # Perform rollouts. # if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each # of the environments, so resetting here instead # Predict next action. # Execute next action. # max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) # note these outputs are batched from vecenv # Book-keeping. #the batched data will be unrolled in memory.py's append. # Episode done. # Train. # Adapt param noise, if necessary. # Evaluate. # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) # Log stats. # XXX shouldn't call np.mean on variable length lists # Evaluation statistics. # Total statistics. | 1.829489 | 2 |
footprints/transaction_details.py | enwawerueli/footprints | 1 | 610 | <filename>footprints/transaction_details.py
import os
from datetime import datetime
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtPrintSupport import QPrinter, QPrintDialog
from jinja2 import TemplateNotFound
from .ui.ui_transaction_details import Ui_TransactionDetails
from .ui import images_rc
from . import jinja_env
from .exceptions import PrinterError
class TransactionDetails(QDialog, Ui_TransactionDetails):
def __init__(self, transaction, parent=None, *args, **kwargs):
QDialog.__init__(self, parent, *args, **kwargs)
self._transaction = transaction
self.setupUi(self)
self.setWindowTitle(QApplication.applicationName())
self.print_pb.setIcon(QIcon.fromTheme('document-print-symbolic', QIcon(':/icons/print')))
try:
trans = jinja_env.get_template('trans.jinja2.html')
except TemplateNotFound:
pass
else:
html = trans.render(transaction=self._transaction, standalone=True)
self.statement_tb.setHtml(html)
self.print_pb.clicked.connect(self.print_statement)
def print_statement(self):
printer = QPrinter()
printer.setOutputFileName(os.path.join(
os.environ.get('HOME'), '%s_%s.pdf' %
(self._transaction.created_at.strftime('%Y%m%d'), self._transaction.transaction_code)))
if QPrintDialog(printer, self.parentWidget()).exec_() != QDialog.Accepted:
return None
try:
trans = jinja_env.get_template('trans.jinja2.html')
except TemplateNotFound as e:
raise PrinterError('Printer data source unavailable') from e
html = trans.render(transaction=self._transaction, printed_at=datetime.now().strftime('%d/%m/%Y, %I:%M:%S %p'))
doc = QTextDocument(self)
doc.setHtml(html)
doc.print_(printer)
return None
| <filename>footprints/transaction_details.py
import os
from datetime import datetime
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtPrintSupport import QPrinter, QPrintDialog
from jinja2 import TemplateNotFound
from .ui.ui_transaction_details import Ui_TransactionDetails
from .ui import images_rc
from . import jinja_env
from .exceptions import PrinterError
class TransactionDetails(QDialog, Ui_TransactionDetails):
def __init__(self, transaction, parent=None, *args, **kwargs):
QDialog.__init__(self, parent, *args, **kwargs)
self._transaction = transaction
self.setupUi(self)
self.setWindowTitle(QApplication.applicationName())
self.print_pb.setIcon(QIcon.fromTheme('document-print-symbolic', QIcon(':/icons/print')))
try:
trans = jinja_env.get_template('trans.jinja2.html')
except TemplateNotFound:
pass
else:
html = trans.render(transaction=self._transaction, standalone=True)
self.statement_tb.setHtml(html)
self.print_pb.clicked.connect(self.print_statement)
def print_statement(self):
printer = QPrinter()
printer.setOutputFileName(os.path.join(
os.environ.get('HOME'), '%s_%s.pdf' %
(self._transaction.created_at.strftime('%Y%m%d'), self._transaction.transaction_code)))
if QPrintDialog(printer, self.parentWidget()).exec_() != QDialog.Accepted:
return None
try:
trans = jinja_env.get_template('trans.jinja2.html')
except TemplateNotFound as e:
raise PrinterError('Printer data source unavailable') from e
html = trans.render(transaction=self._transaction, printed_at=datetime.now().strftime('%d/%m/%Y, %I:%M:%S %p'))
doc = QTextDocument(self)
doc.setHtml(html)
doc.print_(printer)
return None
| none | 1 | 1.964325 | 2 |
|
yt/units/yt_array.py | FeiLi5/git-github.com-yt-project-yt | 0 | 611 | """
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def iterable(obj):
try: len(obj)
except: return False
return True
def return_arr(func):
@wraps(func)
def wrapped(*args, **kwargs):
ret, units = func(*args, **kwargs)
if ret.shape == ():
return YTQuantity(ret, units)
else:
# This could be a subclass, so don't call YTArray directly.
return type(args[0])(ret, units)
return wrapped
@lru_cache(maxsize=128, typed=False)
def sqrt_unit(unit):
return unit**0.5
@lru_cache(maxsize=128, typed=False)
def multiply_units(unit1, unit2):
return unit1 * unit2
def preserve_units(unit1, unit2=None):
return unit1
@lru_cache(maxsize=128, typed=False)
def power_unit(unit, power):
return unit**power
@lru_cache(maxsize=128, typed=False)
def square_unit(unit):
return unit*unit
@lru_cache(maxsize=128, typed=False)
def divide_units(unit1, unit2):
return unit1/unit2
@lru_cache(maxsize=128, typed=False)
def reciprocal_unit(unit):
return unit**-1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
@lru_cache(maxsize=128, typed=False)
def _unit_repr_check_same(my_units, other_units):
"""
Takes a Unit object, or string of known unit symbol, and check that it
is compatible with this quantity. Returns Unit object.
"""
# let Unit() handle units arg if it's not already a Unit obj.
if not isinstance(other_units, Unit):
other_units = Unit(other_units, registry=my_units.registry)
equiv_dims = em_dimensions.get(my_units.dimensions, None)
if equiv_dims == other_units.dimensions:
if current_mks in equiv_dims.free_symbols:
base = "SI"
else:
base = "CGS"
raise YTEquivalentDimsError(my_units, other_units, base)
if not my_units.same_dimensions_as(other_units):
raise YTUnitConversionError(
my_units, my_units.dimensions, other_units, other_units.dimensions)
return other_units
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
class YTArray(np.ndarray):
"""
An ndarray subclass that attaches a symbolic unit object to the array data.
Parameters
----------
input_array : :obj:`!iterable`
A tuple, list, or array to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the array. Powers must be specified using python
syntax (cm**3, not cm^3).
registry : ~yt.units.unit_registry.UnitRegistry
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data. Defaults to the dtype of the input data,
or, if none is found, uses np.float64
bypass_validation : boolean
If True, all input validation is skipped. Using this option may produce
corrupted, invalid units or array data, but can lead to significant
speedups in the input validation logic adds significant overhead. If set,
input_units *must* be a valid unit object. Defaults to False.
Examples
--------
>>> from yt import YTArray
>>> a = YTArray([1, 2, 3], 'cm')
>>> b = YTArray([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTArray(np.arange(8) - 4, 'g/cm**3')
>>> np.abs(a)
YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3
and strip them when it would be annoying to deal with them.
>>> np.log10(a)
array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999,
0.69897 , 0.77815125, 0.84509804])
YTArray is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_cgs()
YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24,
3.08600000e+24, 3.08600000e+24]) cm
This is equivalent to:
>>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
_ufunc_registry = {
add: preserve_units,
subtract: preserve_units,
multiply: multiply_units,
divide: divide_units,
logaddexp: return_without_unit,
logaddexp2: return_without_unit,
true_divide: divide_units,
floor_divide: divide_units,
negative: passthrough_unit,
power: power_unit,
remainder: preserve_units,
mod: preserve_units,
fmod: preserve_units,
absolute: passthrough_unit,
fabs: passthrough_unit,
rint: return_without_unit,
sign: return_without_unit,
conj: passthrough_unit,
exp: return_without_unit,
exp2: return_without_unit,
log: return_without_unit,
log2: return_without_unit,
log10: return_without_unit,
expm1: return_without_unit,
log1p: return_without_unit,
sqrt: sqrt_unit,
square: square_unit,
reciprocal: reciprocal_unit,
sin: return_without_unit,
cos: return_without_unit,
tan: return_without_unit,
sinh: return_without_unit,
cosh: return_without_unit,
tanh: return_without_unit,
arcsin: return_without_unit,
arccos: return_without_unit,
arctan: return_without_unit,
arctan2: arctan2_unit,
arcsinh: return_without_unit,
arccosh: return_without_unit,
arctanh: return_without_unit,
hypot: preserve_units,
deg2rad: return_without_unit,
rad2deg: return_without_unit,
bitwise_and: bitop_units,
bitwise_or: bitop_units,
bitwise_xor: bitop_units,
invert: invert_units,
left_shift: bitop_units,
right_shift: bitop_units,
greater: comparison_unit,
greater_equal: comparison_unit,
less: comparison_unit,
less_equal: comparison_unit,
not_equal: comparison_unit,
equal: comparison_unit,
logical_and: comparison_unit,
logical_or: comparison_unit,
logical_xor: comparison_unit,
logical_not: return_without_unit,
maximum: preserve_units,
minimum: preserve_units,
fmax: preserve_units,
fmin: preserve_units,
isreal: return_without_unit,
iscomplex: return_without_unit,
isfinite: return_without_unit,
isinf: return_without_unit,
isnan: return_without_unit,
signbit: return_without_unit,
copysign: passthrough_unit,
nextafter: preserve_units,
modf: passthrough_unit,
ldexp: bitop_units,
frexp: return_without_unit,
floor: passthrough_unit,
ceil: passthrough_unit,
trunc: passthrough_unit,
spacing: passthrough_unit,
positive: passthrough_unit,
divmod_: passthrough_unit,
isnat: return_without_unit,
heaviside: preserve_units,
}
__array_priority__ = 2.0
def __new__(cls, input_array, input_units=None, registry=None, dtype=None,
bypass_validation=False):
if dtype is None:
dtype = getattr(input_array, 'dtype', np.float64)
if bypass_validation is True:
obj = np.asarray(input_array, dtype=dtype).view(cls)
obj.units = input_units
if registry is not None:
obj.units.registry = registry
return obj
if input_array is NotImplemented:
return input_array.view(cls)
if registry is None and isinstance(input_units, (str, bytes)):
if input_units.startswith('code_'):
raise UnitParseError(
"Code units used without referring to a dataset. \n"
"Perhaps you meant to do something like this instead: \n"
"ds.arr(%s, \"%s\")" % (input_array, input_units)
)
if isinstance(input_array, YTArray):
ret = input_array.view(cls)
if input_units is None:
if registry is None:
ret.units = input_array.units
else:
units = Unit(str(input_array.units), registry=registry)
ret.units = units
elif isinstance(input_units, Unit):
ret.units = input_units
else:
ret.units = Unit(input_units, registry=registry)
return ret
elif isinstance(input_array, np.ndarray):
pass
elif iterable(input_array) and input_array:
if isinstance(input_array[0], YTArray):
return YTArray(np.array(input_array, dtype=dtype),
input_array[0].units, registry=registry)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array, dtype=dtype).view(cls)
# Check units type
if input_units is None:
# Nothing provided. Make dimensionless...
units = Unit()
elif isinstance(input_units, Unit):
if registry and registry is not input_units.registry:
units = Unit(str(input_units), registry=registry)
else:
units = input_units
else:
# units kwarg set, but it's not a Unit object.
# don't handle all the cases here, let the Unit class handle if
# it's a str.
units = Unit(input_units, registry=registry)
# Attach the units
obj.units = units
return obj
def __repr__(self):
"""
"""
return super(YTArray, self).__repr__()+' '+self.units.__repr__()
def __str__(self):
"""
"""
return str(self.view(np.ndarray)) + ' ' + str(self.units)
#
# Start unit conversion methods
#
def convert_to_units(self, units):
"""
Convert the array and units to the given units.
Parameters
----------
units : Unit object or str
The units you want to convert to.
"""
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
self.units = new_units
values = self.d
values *= conversion_factor
if offset:
np.subtract(self, offset*self.uq, self)
return self
def convert_to_base(self, unit_system="cgs"):
"""
Convert the array and units to the equivalent base units in
the specified unit system.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E.convert_to_base(unit_system="galactic")
"""
return self.convert_to_units(self.units.get_base_equivalent(unit_system))
def convert_to_cgs(self):
"""
Convert the array and units to the equivalent cgs units.
"""
return self.convert_to_units(self.units.get_cgs_equivalent())
def convert_to_mks(self):
"""
Convert the array and units to the equivalent mks units.
"""
return self.convert_to_units(self.units.get_mks_equivalent())
def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
YTArray
"""
if equivalence is None:
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
new_array = type(self)(self.ndview * conversion_factor, new_units)
if offset:
np.subtract(new_array, offset*new_array.uq, new_array)
return new_array
else:
return self.to_equivalent(units, equivalence, **kwargs)
def to(self, units, equivalence=None, **kwargs):
"""
An alias for YTArray.in_units().
See the docstrings of that function for details.
"""
return self.in_units(units, equivalence=equivalence, **kwargs)
def to_value(self, units=None, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it without units. Output is therefore a
bare NumPy array.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string, optional
The units you want to get the bare quantity in. If not
specified, the value will be returned in the current units.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
NumPy array
"""
if units is None:
v = self.value
else:
v = self.in_units(units, equivalence=equivalence, **kwargs).value
if isinstance(self, YTQuantity):
return float(v)
else:
return v
def in_base(self, unit_system="cgs"):
"""
Creates a copy of this array with the data in the specified unit system,
and returns it in that system's base units.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E_new = E.in_base(unit_system="galactic")
"""
return self.in_units(self.units.get_base_equivalent(unit_system))
def in_cgs(self):
"""
Creates a copy of this array with the data in the equivalent cgs units,
and returns it.
Returns
-------
Quantity object with data converted to cgs units.
"""
return self.in_units(self.units.get_cgs_equivalent())
def in_mks(self):
"""
Creates a copy of this array with the data in the equivalent mks units,
and returns it.
Returns
-------
Quantity object with data converted to mks units.
"""
return self.in_units(self.units.get_mks_equivalent())
def to_equivalent(self, unit, equiv, **kwargs):
"""
Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
related by only a constant factor but not in the same units.
Parameters
----------
unit : string
The unit that you wish to convert to.
equiv : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> a = yt.YTArray(1.0e7,"K")
>>> a.to_equivalent("keV", "thermal")
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
return self.in_units(conv_unit)
this_equiv = equivalence_registry[equiv]()
oneway_or_equivalent = (
conv_unit.has_equivalent(equiv) or this_equiv._one_way)
if self.has_equivalent(equiv) and oneway_or_equivalent:
new_arr = this_equiv.convert(
self, conv_unit.dimensions, **kwargs)
if isinstance(new_arr, tuple):
try:
return type(self)(new_arr[0], new_arr[1]).in_units(unit)
except YTUnitConversionError:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
else:
return new_arr.in_units(unit)
else:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
def list_equivalencies(self):
"""
Lists the possible equivalencies associated with this YTArray or
YTQuantity.
"""
self.units.list_equivalencies()
def has_equivalent(self, equiv):
"""
Check to see if this YTArray or YTQuantity has an equivalent unit in
*equiv*.
"""
return self.units.has_equivalent(equiv)
def ndarray_view(self):
"""
Returns a view into the array, but as an ndarray rather than ytarray.
Returns
-------
View of this array's data.
"""
return self.view(np.ndarray)
def to_ndarray(self):
"""
Creates a copy of this array with the unit information stripped
"""
return np.array(self)
@classmethod
def from_astropy(cls, arr, unit_registry=None):
"""
Convert an AstroPy "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : AstroPy Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
"""
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
else:
return YTQuantity(arr.value, ap_units, registry=unit_registry)
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
"""
if _astropy.units is None:
raise ImportError("You don't have AstroPy installed, so you can't convert to " +
"an AstroPy quantity.")
return self.value*_astropy.units.Unit(str(self.units), **kwargs)
@classmethod
def from_pint(cls, arr, unit_registry=None):
"""
Convert a Pint "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : Pint Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
Examples
--------
>>> from pint import UnitRegistry
>>> import numpy as np
>>> ureg = UnitRegistry()
>>> a = np.random.random(10)
>>> b = ureg.Quantity(a, "erg/cm**3")
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
for base, exponent in arr._units.items():
bs = convert_pint_units(base)
p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
else:
return YTQuantity(arr.magnitude, p_units, registry=unit_registry)
def to_pint(self, unit_registry=None):
"""
Convert a YTArray or YTQuantity to a Pint Quantity.
Parameters
----------
arr : YTArray or YTQuantity
The unitful quantity to convert from.
unit_registry : Pint UnitRegistry, optional
The Pint UnitRegistry to use in the conversion. If one is not
supplied, the default one will be used. NOTE: This is not
the same as a yt UnitRegistry object.
Examples
--------
>>> a = YTQuantity(4.0, "cm**2/s")
>>> b = a.to_pint()
"""
from pint import UnitRegistry
if unit_registry is None:
unit_registry = UnitRegistry()
powers_dict = self.units.expr.as_powers_dict()
units = []
for unit, pow in powers_dict.items():
# we have to do this because Pint doesn't recognize
# "yr" as "year"
if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
unit = str(unit).replace("yr","year")
units.append("%s**(%s)" % (unit, Rational(pow)))
units = "*".join(units)
return unit_registry.Quantity(self.value, units)
#
# End unit conversion methods
#
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info['units'] = str(self.units)
info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
@classmethod
def from_hdf5(cls, filename, dataset_name=None, group_name=None):
r"""Attempts read in and convert a dataset in an hdf5 file into a
YTArray.
Parameters
----------
filename: string
The filename to of the hdf5 file.
dataset_name: string
The name of the dataset to read from. If the dataset has a units
attribute, attempt to infer units as well.
group_name: string
An optional group to read the arrays from. If not specified, the
arrays are datasets at the top level by default.
"""
import h5py
from yt.extern.six.moves import cPickle as pickle
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
g = f[group_name]
else:
g = f
dataset = g[dataset_name]
data = dataset[:]
units = dataset.attrs.get('units', '')
if 'unit_registry' in dataset.attrs.keys():
unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring())
else:
unit_lut = None
f.close()
registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
return cls(data, units, registry=registry)
#
# Start convenience methods
#
@property
def value(self):
"""Get a copy of the array data as a numpy ndarray"""
return np.array(self)
v = value
@property
def ndview(self):
"""Get a view of the array data."""
return self.ndarray_view()
d = ndview
@property
def unit_quantity(self):
"""Get a YTQuantity with the same unit as this array and a value of
1.0"""
return YTQuantity(1.0, self.units)
uq = unit_quantity
@property
def unit_array(self):
"""Get a YTArray filled with ones with the same unit and shape as this
array"""
return np.ones_like(self)
ua = unit_array
def __getitem__(self, item):
ret = super(YTArray, self).__getitem__(item)
if ret.shape == ():
return YTQuantity(ret, self.units, bypass_validation=True)
else:
if hasattr(self, 'units'):
ret.units = self.units
return ret
#
# Start operation methods
#
if LooseVersion(np.__version__) < LooseVersion('1.13.0'):
def __add__(self, right_object):
"""
Add this ytarray to the object on the right of the `+` operator.
Must check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "addition")
return super(YTArray, self).__add__(ro)
def __radd__(self, left_object):
""" See __add__. """
lo = sanitize_units_add(self, left_object, "addition")
return super(YTArray, self).__radd__(lo)
def __iadd__(self, other):
""" See __add__. """
oth = sanitize_units_add(self, other, "addition")
np.add(self, oth, out=self)
return self
def __sub__(self, right_object):
"""
Subtract the object on the right of the `-` from this ytarray. Must
check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "subtraction")
return super(YTArray, self).__sub__(ro)
def __rsub__(self, left_object):
""" See __sub__. """
lo = sanitize_units_add(self, left_object, "subtraction")
return super(YTArray, self).__rsub__(lo)
def __isub__(self, other):
""" See __sub__. """
oth = sanitize_units_add(self, other, "subtraction")
np.subtract(self, oth, out=self)
return self
def __neg__(self):
""" Negate the data. """
return super(YTArray, self).__neg__()
def __mul__(self, right_object):
"""
Multiply this YTArray by the object on the right of the `*`
operator. The unit objects handle being multiplied.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__mul__(ro)
def __rmul__(self, left_object):
""" See __mul__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rmul__(lo)
def __imul__(self, other):
""" See __mul__. """
oth = sanitize_units_mul(self, other)
np.multiply(self, oth, out=self)
return self
def __div__(self, right_object):
"""
Divide this YTArray by the object on the right of the `/` operator.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__div__(ro)
def __rdiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rdiv__(lo)
def __idiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.divide(self, oth, out=self)
return self
def __truediv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__truediv__(ro)
def __rtruediv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rtruediv__(lo)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def __floordiv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__floordiv__(ro)
def __rfloordiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rfloordiv__(lo)
def __ifloordiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.floor_divide(self, oth, out=self)
return self
def __or__(self, right_object):
return super(YTArray, self).__or__(right_object)
def __ror__(self, left_object):
return super(YTArray, self).__ror__(left_object)
def __ior__(self, other):
np.bitwise_or(self, other, out=self)
return self
def __xor__(self, right_object):
return super(YTArray, self).__xor__(right_object)
def __rxor__(self, left_object):
return super(YTArray, self).__rxor__(left_object)
def __ixor__(self, other):
np.bitwise_xor(self, other, out=self)
return self
def __and__(self, right_object):
return super(YTArray, self).__and__(right_object)
def __rand__(self, left_object):
return super(YTArray, self).__rand__(left_object)
def __iand__(self, other):
np.bitwise_and(self, other, out=self)
return self
def __pow__(self, power):
"""
Raise this YTArray to some power.
Parameters
----------
power : float or dimensionless YTArray.
The pow value.
"""
if isinstance(power, YTArray):
if not power.units.is_dimensionless:
raise YTUnitOperationError('power', power.unit)
# Work around a sympy issue (I think?)
#
# If I don't do this, super(YTArray, self).__pow__ returns a YTArray
# with a unit attribute set to the sympy expression 1/1 rather than
# a dimensionless Unit object.
if self.units.is_dimensionless and power == -1:
ret = super(YTArray, self).__pow__(power)
return type(self)(ret, input_units='')
return super(YTArray, self).__pow__(power)
def __abs__(self):
""" Return a YTArray with the abs of the data. """
return super(YTArray, self).__abs__()
#
# Start comparison operators.
#
def __lt__(self, other):
""" Test if this is less than the object on the right. """
# converts if possible
oth = validate_comparison_units(self, other, 'less_than')
return super(YTArray, self).__lt__(oth)
def __le__(self, other):
"""Test if this is less than or equal to the object on the right.
"""
oth = validate_comparison_units(self, other, 'less_than or equal')
return super(YTArray, self).__le__(oth)
def __eq__(self, other):
""" Test if this is equal to the object on the right. """
# Check that other is a YTArray.
if other is None:
# self is a YTArray, so it can't be None.
return False
oth = validate_comparison_units(self, other, 'equal')
return super(YTArray, self).__eq__(oth)
def __ne__(self, other):
""" Test if this is not equal to the object on the right. """
# Check that the other is a YTArray.
if other is None:
return True
oth = validate_comparison_units(self, other, 'not equal')
return super(YTArray, self).__ne__(oth)
def __ge__(self, other):
""" Test if this is greater than or equal to other. """
# Check that the other is a YTArray.
oth = validate_comparison_units(
self, other, 'greater than or equal')
return super(YTArray, self).__ge__(oth)
def __gt__(self, other):
""" Test if this is greater than the object on the right. """
# Check that the other is a YTArray.
oth = validate_comparison_units(self, other, 'greater than')
return super(YTArray, self).__gt__(oth)
#
# End comparison operators
#
#
# Begin reduction operators
#
@return_arr
def prod(self, axis=None, dtype=None, out=None):
if axis is not None:
units = self.units**self.shape[axis]
else:
units = self.units**self.size
return super(YTArray, self).prod(axis, dtype, out), units
@return_arr
def mean(self, axis=None, dtype=None, out=None):
return super(YTArray, self).mean(axis, dtype, out), self.units
@return_arr
def sum(self, axis=None, dtype=None, out=None):
return super(YTArray, self).sum(axis, dtype, out), self.units
@return_arr
def std(self, axis=None, dtype=None, out=None, ddof=0):
return super(YTArray, self).std(axis, dtype, out, ddof), self.units
def __array_wrap__(self, out_arr, context=None):
ret = super(YTArray, self).__array_wrap__(out_arr, context)
if isinstance(ret, YTQuantity) and ret.shape != ():
ret = ret.view(YTArray)
if context is None:
if ret.shape == ():
return ret[()]
else:
return ret
ufunc = context[0]
inputs = context[1]
if ufunc in unary_operators:
out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr)
unit = self._ufunc_registry[context[0]](u)
ret_class = type(self)
elif ufunc in binary_operators:
unit_operator = self._ufunc_registry[context[0]]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (preserve_units, comparison_unit,
arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class, raise_error=True)
unit = unit_operator(*units)
if unit_operator in (multiply_units, divide_units):
out_arr, out_arr, unit = handle_multiply_divide_units(
unit, units, out_arr, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc has not been added "
"to YTArray." % str(context[0]))
if unit is None:
out_arr = np.array(out_arr, copy=False)
return out_arr
out_arr.units = unit
if out_arr.size == 1:
return YTQuantity(np.array(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
return YTArray(np.array(out_arr), unit)
return ret_class(np.array(out_arr, copy=False), unit)
else: # numpy version equal to or newer than 1.13
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if 'out' in kwargs:
out_orig = kwargs.pop('out')
out = np.asarray(out_orig[0])
else:
out = None
if len(inputs) == 1:
_, inp, u = get_inp_u_unary(ufunc, inputs)
out_arr = func(np.asarray(inp), out=out, **kwargs)
if ufunc in (multiply, divide) and method == 'reduce':
power_sign = POWER_SIGN_MAPPING[ufunc]
if 'axis' in kwargs and kwargs['axis'] is not None:
unit = u**(power_sign*inp.shape[kwargs['axis']])
else:
unit = u**(power_sign*inp.size)
else:
unit = self._ufunc_registry[ufunc](u)
ret_class = type(self)
elif len(inputs) == 2:
unit_operator = self._ufunc_registry[ufunc]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (comparison_unit, arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class)
elif unit_operator is preserve_units:
inps, units = handle_preserve_units(
inps, units, ufunc, ret_class)
unit = unit_operator(*units)
out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]),
out=out, **kwargs)
if unit_operator in (multiply_units, divide_units):
out, out_arr, unit = handle_multiply_divide_units(
unit, units, out, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc with %i inputs has not been"
"added to YTArray." % (str(ufunc), len(inputs)))
if unit is None:
out_arr = np.array(out_arr, copy=False)
elif ufunc in (modf, divmod_):
out_arr = tuple((ret_class(o, unit) for o in out_arr))
elif out_arr.size == 1:
out_arr = YTQuantity(np.asarray(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
out_arr = YTArray(np.asarray(out_arr), unit)
else:
out_arr = ret_class(np.asarray(out_arr), unit)
if out is not None:
out_orig[0].flat[:] = out.flat[:]
if isinstance(out_orig[0], YTArray):
out_orig[0].units = unit
return out_arr
def copy(self, order='C'):
return type(self)(np.copy(np.asarray(self)), self.units)
def __array_finalize__(self, obj):
if obj is None and hasattr(self, 'units'):
return
self.units = getattr(obj, 'units', NULL_UNIT)
def __pos__(self):
""" Posify the data. """
# this needs to be defined for all numpy versions, see
# numpy issue #9081
return type(self)(super(YTArray, self).__pos__(), self.units)
@return_arr
def dot(self, b, out=None):
return super(YTArray, self).dot(b), self.units*b.units
def __reduce__(self):
"""Pickle reduction method
See the documentation for the standard library pickle module:
http://docs.python.org/2/library/pickle.html
Unit metadata is encoded in the zeroth element of third element of the
returned tuple, itself a tuple used to restore the state of the ndarray.
This is always defined for numpy arrays.
"""
np_ret = super(YTArray, self).__reduce__()
obj_state = np_ret[2]
unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],)
new_ret = np_ret[:2] + unit_state + np_ret[3:]
return new_ret
def __setstate__(self, state):
"""Pickle setstate method
This is called inside pickle.read() and restores the unit data from the
metadata extracted in __reduce__ and then serialized by pickle.
"""
super(YTArray, self).__setstate__(state[1:])
try:
unit, lut = state[0]
except TypeError:
# this case happens when we try to load an old pickle file
# created before we serialized the unit symbol lookup table
# into the pickle file
unit, lut = str(state[0]), default_unit_symbol_lut.copy()
# need to fix up the lut if the pickle was saved prior to PR #1728
# when the pickle format changed
if len(lut['m']) == 2:
lut.update(default_unit_symbol_lut)
for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]:
lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}')
registry = UnitRegistry(lut=lut, add_default_symbols=False)
self.units = Unit(unit, registry=registry)
def __deepcopy__(self, memodict=None):
"""copy.deepcopy implementation
This is necessary for stdlib deepcopy of arrays and quantities.
"""
if memodict is None:
memodict = {}
ret = super(YTArray, self).__deepcopy__(memodict)
return type(self)(ret, copy.deepcopy(self.units))
class YTQuantity(YTArray):
"""
A scalar associated with a unit.
Parameters
----------
input_scalar : an integer or floating point scalar
The scalar to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the quantity. Powers must be specified using python syntax
(cm**3, not cm^3).
registry : A UnitRegistry object
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data.
Examples
--------
>>> from yt import YTQuantity
>>> a = YTQuantity(1, 'cm')
>>> b = YTQuantity(2, 'm')
>>> a + b
201.0 cm
>>> b + a
2.01 m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTQuantity(12, 'g/cm**3')
>>> np.abs(a)
12 g/cm**3
and strip them when it would be annoying to deal with them.
>>> print(np.log10(a))
1.07918124605
YTQuantity is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.quan(5, 'code_length')
>>> a.in_cgs()
1.543e+25 cm
This is equivalent to:
>>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
def __new__(cls, input_scalar, input_units=None, registry=None,
dtype=np.float64, bypass_validation=False):
if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)):
raise RuntimeError("YTQuantity values must be numeric")
ret = YTArray.__new__(cls, input_scalar, input_units, registry,
dtype=dtype, bypass_validation=bypass_validation)
if ret.size > 1:
raise RuntimeError("YTQuantity instances must be scalars")
return ret
def __repr__(self):
return str(self)
def validate_numpy_wrapper_units(v, arrs):
if not any(isinstance(a, YTArray) for a in arrs):
return v
if not all(isinstance(a, YTArray) for a in arrs):
raise RuntimeError("Not all of your arrays are YTArrays.")
a1 = arrs[0]
if not all(a.units == a1.units for a in arrs[1:]):
raise RuntimeError("Your arrays must have identical units.")
v.units = a1.units
return v
def uconcatenate(arrs, axis=0):
"""Concatenate a sequence of arrays.
This wrapper around numpy.concatenate preserves units. All input arrays must
have the same units. See the documentation of numpy.concatenate for full
details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uconcatenate((A, B))
YTArray([ 1., 2., 3., 2., 3., 4.]) cm
"""
v = np.concatenate(arrs, axis=axis)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Applies the cross product to two YT arrays.
This wrapper around numpy.cross preserves units.
See the documentation of numpy.cross for full
details.
"""
v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
units = arr1.units * arr2.units
arr = YTArray(v, units, registry=registry)
return arr
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uintersect1d(A, B)
YTArray([ 2., 3.]) cm
"""
v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def uunion1d(arr1, arr2):
"""Find the union of two arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uunion1d(A, B)
YTArray([ 1., 2., 3., 4.]) cm
"""
v = np.union1d(arr1, arr2)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def unorm(data, ord=None, axis=None, keepdims=False):
"""Matrix or vector norm that preserves units
This is a wrapper around np.linalg.norm that preserves units. See
the documentation for that function for descriptions of the keyword
arguments.
The keepdims argument is ignored if the version of numpy installed is
older than numpy 1.10.0.
"""
if LooseVersion(np.__version__) < LooseVersion('1.10.0'):
norm = np.linalg.norm(data, ord=ord, axis=axis)
else:
norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims)
if norm.shape == ():
return YTQuantity(norm, data.units)
return YTArray(norm, data.units)
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
"""
dot = np.dot(op1.d, op2.d)
units = op1.units*op2.units
if dot.shape == ():
return YTQuantity(dot, units)
return YTArray(dot, units)
def uvstack(arrs):
"""Stack arrays in sequence vertically (row wise) while preserving units
This is a wrapper around np.vstack that preserves units.
"""
v = np.vstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def uhstack(arrs):
"""Stack arrays in sequence horizontally (column wise) while preserving units
This is a wrapper around np.hstack that preserves units.
"""
v = np.hstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ustack(arrs, axis=0):
"""Join a sequence of arrays along a new axis while preserving units
The axis parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the
first dimension and if ``axis=-1`` it will be the last dimension.
This is a wrapper around np.stack that preserves units.
"""
v = np.stack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def array_like_field(data, x, field):
field = data._determine_fields(field)[0]
if isinstance(field, tuple):
finfo = data.ds._get_field_info(field[0],field[1])
else:
finfo = data.ds._get_field_info(field)
if finfo.sampling_type == 'particle':
units = finfo.output_units
else:
units = finfo.units
if isinstance(x, YTArray):
arr = copy.deepcopy(x)
arr.convert_to_units(units)
return arr
if isinstance(x, np.ndarray):
return data.ds.arr(x, units)
else:
return data.ds.quan(x, units)
def get_binary_op_return_class(cls1, cls2):
if cls1 is cls2:
return cls1
if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)):
return cls2
if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)):
return cls1
if issubclass(cls1, YTQuantity):
return cls2
if issubclass(cls2, YTQuantity):
return cls1
if issubclass(cls1, cls2):
return cls1
if issubclass(cls2, cls1):
return cls2
else:
raise RuntimeError("Undefined operation for a YTArray subclass. "
"Received operand types (%s) and (%s)" % (cls1, cls2))
def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
r"""
Load YTArrays with unit information from a text file. Each row in the
text file must have the same number of values.
Parameters
----------
fname : str
Filename to read.
dtype : data-type, optional
Data-type of the resulting array; default: float.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
Examples
--------
>>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
"""
f = open(fname, 'r')
next_one = False
units = []
num_cols = -1
for line in f.readlines():
words = line.strip().split()
if len(words) == 0:
continue
if line[0] == comments:
if next_one:
units = words[1:]
if len(words) == 2 and words[1] == "Units":
next_one = True
else:
# Here we catch the first line of numbers
try:
col_words = line.strip().split(delimiter)
for word in col_words:
float(word)
num_cols = len(col_words)
break
except ValueError:
mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
f.close()
if len(units) != num_cols:
mylog.warning("Malformed or incomplete units header. Arrays will be "
"dimensionless!")
units = ["dimensionless"]*num_cols
arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
delimiter=delimiter, converters=None,
unpack=True, usecols=usecols, ndmin=0)
if usecols is not None:
units = [units[col] for col in usecols]
mylog.info("Array units: %s" % ", ".join(units))
return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
footer='', comments='#'):
r"""
Write YTArrays with unit information to a text file.
Parameters
----------
fname : str
The file to write the YTArrays to.
arrays : list of YTArrays or single YTArray
The array(s) to write to the file.
fmt : str or sequence of strs, optional
A single format (%10.5f), or a sequence of formats.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file, before the
unit header.
footer : str, optional
String that will be written at the end of the file.
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``yt.loadtxt``.
Examples
--------
>>> sp = ds.sphere("c", (100,"kpc"))
>>> a = sp["density"]
>>> b = sp["temperature"]
>>> c = sp["velocity_x"]
>>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
"""
if not isinstance(arrays, list):
arrays = [arrays]
units = []
for array in arrays:
if hasattr(array, "units"):
units.append(str(array.units))
else:
units.append("dimensionless")
if header != '':
header += '\n'
header += " Units\n " + '\t'.join(units)
np.savetxt(fname, np.transpose(arrays), header=header,
fmt=fmt, delimiter=delimiter, footer=footer,
newline='\n', comments=comments)
| """
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def iterable(obj):
try: len(obj)
except: return False
return True
def return_arr(func):
@wraps(func)
def wrapped(*args, **kwargs):
ret, units = func(*args, **kwargs)
if ret.shape == ():
return YTQuantity(ret, units)
else:
# This could be a subclass, so don't call YTArray directly.
return type(args[0])(ret, units)
return wrapped
@lru_cache(maxsize=128, typed=False)
def sqrt_unit(unit):
return unit**0.5
@lru_cache(maxsize=128, typed=False)
def multiply_units(unit1, unit2):
return unit1 * unit2
def preserve_units(unit1, unit2=None):
return unit1
@lru_cache(maxsize=128, typed=False)
def power_unit(unit, power):
return unit**power
@lru_cache(maxsize=128, typed=False)
def square_unit(unit):
return unit*unit
@lru_cache(maxsize=128, typed=False)
def divide_units(unit1, unit2):
return unit1/unit2
@lru_cache(maxsize=128, typed=False)
def reciprocal_unit(unit):
return unit**-1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
@lru_cache(maxsize=128, typed=False)
def _unit_repr_check_same(my_units, other_units):
"""
Takes a Unit object, or string of known unit symbol, and check that it
is compatible with this quantity. Returns Unit object.
"""
# let Unit() handle units arg if it's not already a Unit obj.
if not isinstance(other_units, Unit):
other_units = Unit(other_units, registry=my_units.registry)
equiv_dims = em_dimensions.get(my_units.dimensions, None)
if equiv_dims == other_units.dimensions:
if current_mks in equiv_dims.free_symbols:
base = "SI"
else:
base = "CGS"
raise YTEquivalentDimsError(my_units, other_units, base)
if not my_units.same_dimensions_as(other_units):
raise YTUnitConversionError(
my_units, my_units.dimensions, other_units, other_units.dimensions)
return other_units
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
class YTArray(np.ndarray):
"""
An ndarray subclass that attaches a symbolic unit object to the array data.
Parameters
----------
input_array : :obj:`!iterable`
A tuple, list, or array to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the array. Powers must be specified using python
syntax (cm**3, not cm^3).
registry : ~yt.units.unit_registry.UnitRegistry
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data. Defaults to the dtype of the input data,
or, if none is found, uses np.float64
bypass_validation : boolean
If True, all input validation is skipped. Using this option may produce
corrupted, invalid units or array data, but can lead to significant
speedups in the input validation logic adds significant overhead. If set,
input_units *must* be a valid unit object. Defaults to False.
Examples
--------
>>> from yt import YTArray
>>> a = YTArray([1, 2, 3], 'cm')
>>> b = YTArray([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTArray(np.arange(8) - 4, 'g/cm**3')
>>> np.abs(a)
YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3
and strip them when it would be annoying to deal with them.
>>> np.log10(a)
array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999,
0.69897 , 0.77815125, 0.84509804])
YTArray is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_cgs()
YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24,
3.08600000e+24, 3.08600000e+24]) cm
This is equivalent to:
>>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
_ufunc_registry = {
add: preserve_units,
subtract: preserve_units,
multiply: multiply_units,
divide: divide_units,
logaddexp: return_without_unit,
logaddexp2: return_without_unit,
true_divide: divide_units,
floor_divide: divide_units,
negative: passthrough_unit,
power: power_unit,
remainder: preserve_units,
mod: preserve_units,
fmod: preserve_units,
absolute: passthrough_unit,
fabs: passthrough_unit,
rint: return_without_unit,
sign: return_without_unit,
conj: passthrough_unit,
exp: return_without_unit,
exp2: return_without_unit,
log: return_without_unit,
log2: return_without_unit,
log10: return_without_unit,
expm1: return_without_unit,
log1p: return_without_unit,
sqrt: sqrt_unit,
square: square_unit,
reciprocal: reciprocal_unit,
sin: return_without_unit,
cos: return_without_unit,
tan: return_without_unit,
sinh: return_without_unit,
cosh: return_without_unit,
tanh: return_without_unit,
arcsin: return_without_unit,
arccos: return_without_unit,
arctan: return_without_unit,
arctan2: arctan2_unit,
arcsinh: return_without_unit,
arccosh: return_without_unit,
arctanh: return_without_unit,
hypot: preserve_units,
deg2rad: return_without_unit,
rad2deg: return_without_unit,
bitwise_and: bitop_units,
bitwise_or: bitop_units,
bitwise_xor: bitop_units,
invert: invert_units,
left_shift: bitop_units,
right_shift: bitop_units,
greater: comparison_unit,
greater_equal: comparison_unit,
less: comparison_unit,
less_equal: comparison_unit,
not_equal: comparison_unit,
equal: comparison_unit,
logical_and: comparison_unit,
logical_or: comparison_unit,
logical_xor: comparison_unit,
logical_not: return_without_unit,
maximum: preserve_units,
minimum: preserve_units,
fmax: preserve_units,
fmin: preserve_units,
isreal: return_without_unit,
iscomplex: return_without_unit,
isfinite: return_without_unit,
isinf: return_without_unit,
isnan: return_without_unit,
signbit: return_without_unit,
copysign: passthrough_unit,
nextafter: preserve_units,
modf: passthrough_unit,
ldexp: bitop_units,
frexp: return_without_unit,
floor: passthrough_unit,
ceil: passthrough_unit,
trunc: passthrough_unit,
spacing: passthrough_unit,
positive: passthrough_unit,
divmod_: passthrough_unit,
isnat: return_without_unit,
heaviside: preserve_units,
}
__array_priority__ = 2.0
def __new__(cls, input_array, input_units=None, registry=None, dtype=None,
bypass_validation=False):
if dtype is None:
dtype = getattr(input_array, 'dtype', np.float64)
if bypass_validation is True:
obj = np.asarray(input_array, dtype=dtype).view(cls)
obj.units = input_units
if registry is not None:
obj.units.registry = registry
return obj
if input_array is NotImplemented:
return input_array.view(cls)
if registry is None and isinstance(input_units, (str, bytes)):
if input_units.startswith('code_'):
raise UnitParseError(
"Code units used without referring to a dataset. \n"
"Perhaps you meant to do something like this instead: \n"
"ds.arr(%s, \"%s\")" % (input_array, input_units)
)
if isinstance(input_array, YTArray):
ret = input_array.view(cls)
if input_units is None:
if registry is None:
ret.units = input_array.units
else:
units = Unit(str(input_array.units), registry=registry)
ret.units = units
elif isinstance(input_units, Unit):
ret.units = input_units
else:
ret.units = Unit(input_units, registry=registry)
return ret
elif isinstance(input_array, np.ndarray):
pass
elif iterable(input_array) and input_array:
if isinstance(input_array[0], YTArray):
return YTArray(np.array(input_array, dtype=dtype),
input_array[0].units, registry=registry)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array, dtype=dtype).view(cls)
# Check units type
if input_units is None:
# Nothing provided. Make dimensionless...
units = Unit()
elif isinstance(input_units, Unit):
if registry and registry is not input_units.registry:
units = Unit(str(input_units), registry=registry)
else:
units = input_units
else:
# units kwarg set, but it's not a Unit object.
# don't handle all the cases here, let the Unit class handle if
# it's a str.
units = Unit(input_units, registry=registry)
# Attach the units
obj.units = units
return obj
def __repr__(self):
"""
"""
return super(YTArray, self).__repr__()+' '+self.units.__repr__()
def __str__(self):
"""
"""
return str(self.view(np.ndarray)) + ' ' + str(self.units)
#
# Start unit conversion methods
#
def convert_to_units(self, units):
"""
Convert the array and units to the given units.
Parameters
----------
units : Unit object or str
The units you want to convert to.
"""
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
self.units = new_units
values = self.d
values *= conversion_factor
if offset:
np.subtract(self, offset*self.uq, self)
return self
def convert_to_base(self, unit_system="cgs"):
"""
Convert the array and units to the equivalent base units in
the specified unit system.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E.convert_to_base(unit_system="galactic")
"""
return self.convert_to_units(self.units.get_base_equivalent(unit_system))
def convert_to_cgs(self):
"""
Convert the array and units to the equivalent cgs units.
"""
return self.convert_to_units(self.units.get_cgs_equivalent())
def convert_to_mks(self):
"""
Convert the array and units to the equivalent mks units.
"""
return self.convert_to_units(self.units.get_mks_equivalent())
def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
YTArray
"""
if equivalence is None:
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
new_array = type(self)(self.ndview * conversion_factor, new_units)
if offset:
np.subtract(new_array, offset*new_array.uq, new_array)
return new_array
else:
return self.to_equivalent(units, equivalence, **kwargs)
def to(self, units, equivalence=None, **kwargs):
"""
An alias for YTArray.in_units().
See the docstrings of that function for details.
"""
return self.in_units(units, equivalence=equivalence, **kwargs)
def to_value(self, units=None, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it without units. Output is therefore a
bare NumPy array.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string, optional
The units you want to get the bare quantity in. If not
specified, the value will be returned in the current units.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
NumPy array
"""
if units is None:
v = self.value
else:
v = self.in_units(units, equivalence=equivalence, **kwargs).value
if isinstance(self, YTQuantity):
return float(v)
else:
return v
def in_base(self, unit_system="cgs"):
"""
Creates a copy of this array with the data in the specified unit system,
and returns it in that system's base units.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E_new = E.in_base(unit_system="galactic")
"""
return self.in_units(self.units.get_base_equivalent(unit_system))
def in_cgs(self):
"""
Creates a copy of this array with the data in the equivalent cgs units,
and returns it.
Returns
-------
Quantity object with data converted to cgs units.
"""
return self.in_units(self.units.get_cgs_equivalent())
def in_mks(self):
"""
Creates a copy of this array with the data in the equivalent mks units,
and returns it.
Returns
-------
Quantity object with data converted to mks units.
"""
return self.in_units(self.units.get_mks_equivalent())
def to_equivalent(self, unit, equiv, **kwargs):
"""
Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
related by only a constant factor but not in the same units.
Parameters
----------
unit : string
The unit that you wish to convert to.
equiv : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> a = yt.YTArray(1.0e7,"K")
>>> a.to_equivalent("keV", "thermal")
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
return self.in_units(conv_unit)
this_equiv = equivalence_registry[equiv]()
oneway_or_equivalent = (
conv_unit.has_equivalent(equiv) or this_equiv._one_way)
if self.has_equivalent(equiv) and oneway_or_equivalent:
new_arr = this_equiv.convert(
self, conv_unit.dimensions, **kwargs)
if isinstance(new_arr, tuple):
try:
return type(self)(new_arr[0], new_arr[1]).in_units(unit)
except YTUnitConversionError:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
else:
return new_arr.in_units(unit)
else:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
def list_equivalencies(self):
"""
Lists the possible equivalencies associated with this YTArray or
YTQuantity.
"""
self.units.list_equivalencies()
def has_equivalent(self, equiv):
"""
Check to see if this YTArray or YTQuantity has an equivalent unit in
*equiv*.
"""
return self.units.has_equivalent(equiv)
def ndarray_view(self):
"""
Returns a view into the array, but as an ndarray rather than ytarray.
Returns
-------
View of this array's data.
"""
return self.view(np.ndarray)
def to_ndarray(self):
"""
Creates a copy of this array with the unit information stripped
"""
return np.array(self)
@classmethod
def from_astropy(cls, arr, unit_registry=None):
"""
Convert an AstroPy "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : AstroPy Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
"""
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
else:
return YTQuantity(arr.value, ap_units, registry=unit_registry)
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
"""
if _astropy.units is None:
raise ImportError("You don't have AstroPy installed, so you can't convert to " +
"an AstroPy quantity.")
return self.value*_astropy.units.Unit(str(self.units), **kwargs)
@classmethod
def from_pint(cls, arr, unit_registry=None):
"""
Convert a Pint "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : Pint Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
Examples
--------
>>> from pint import UnitRegistry
>>> import numpy as np
>>> ureg = UnitRegistry()
>>> a = np.random.random(10)
>>> b = ureg.Quantity(a, "erg/cm**3")
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
for base, exponent in arr._units.items():
bs = convert_pint_units(base)
p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
else:
return YTQuantity(arr.magnitude, p_units, registry=unit_registry)
def to_pint(self, unit_registry=None):
"""
Convert a YTArray or YTQuantity to a Pint Quantity.
Parameters
----------
arr : YTArray or YTQuantity
The unitful quantity to convert from.
unit_registry : Pint UnitRegistry, optional
The Pint UnitRegistry to use in the conversion. If one is not
supplied, the default one will be used. NOTE: This is not
the same as a yt UnitRegistry object.
Examples
--------
>>> a = YTQuantity(4.0, "cm**2/s")
>>> b = a.to_pint()
"""
from pint import UnitRegistry
if unit_registry is None:
unit_registry = UnitRegistry()
powers_dict = self.units.expr.as_powers_dict()
units = []
for unit, pow in powers_dict.items():
# we have to do this because Pint doesn't recognize
# "yr" as "year"
if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
unit = str(unit).replace("yr","year")
units.append("%s**(%s)" % (unit, Rational(pow)))
units = "*".join(units)
return unit_registry.Quantity(self.value, units)
#
# End unit conversion methods
#
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info['units'] = str(self.units)
info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
@classmethod
def from_hdf5(cls, filename, dataset_name=None, group_name=None):
r"""Attempts read in and convert a dataset in an hdf5 file into a
YTArray.
Parameters
----------
filename: string
The filename to of the hdf5 file.
dataset_name: string
The name of the dataset to read from. If the dataset has a units
attribute, attempt to infer units as well.
group_name: string
An optional group to read the arrays from. If not specified, the
arrays are datasets at the top level by default.
"""
import h5py
from yt.extern.six.moves import cPickle as pickle
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
g = f[group_name]
else:
g = f
dataset = g[dataset_name]
data = dataset[:]
units = dataset.attrs.get('units', '')
if 'unit_registry' in dataset.attrs.keys():
unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring())
else:
unit_lut = None
f.close()
registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
return cls(data, units, registry=registry)
#
# Start convenience methods
#
@property
def value(self):
"""Get a copy of the array data as a numpy ndarray"""
return np.array(self)
v = value
@property
def ndview(self):
"""Get a view of the array data."""
return self.ndarray_view()
d = ndview
@property
def unit_quantity(self):
"""Get a YTQuantity with the same unit as this array and a value of
1.0"""
return YTQuantity(1.0, self.units)
uq = unit_quantity
@property
def unit_array(self):
"""Get a YTArray filled with ones with the same unit and shape as this
array"""
return np.ones_like(self)
ua = unit_array
def __getitem__(self, item):
ret = super(YTArray, self).__getitem__(item)
if ret.shape == ():
return YTQuantity(ret, self.units, bypass_validation=True)
else:
if hasattr(self, 'units'):
ret.units = self.units
return ret
#
# Start operation methods
#
if LooseVersion(np.__version__) < LooseVersion('1.13.0'):
def __add__(self, right_object):
"""
Add this ytarray to the object on the right of the `+` operator.
Must check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "addition")
return super(YTArray, self).__add__(ro)
def __radd__(self, left_object):
""" See __add__. """
lo = sanitize_units_add(self, left_object, "addition")
return super(YTArray, self).__radd__(lo)
def __iadd__(self, other):
""" See __add__. """
oth = sanitize_units_add(self, other, "addition")
np.add(self, oth, out=self)
return self
def __sub__(self, right_object):
"""
Subtract the object on the right of the `-` from this ytarray. Must
check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "subtraction")
return super(YTArray, self).__sub__(ro)
def __rsub__(self, left_object):
""" See __sub__. """
lo = sanitize_units_add(self, left_object, "subtraction")
return super(YTArray, self).__rsub__(lo)
def __isub__(self, other):
""" See __sub__. """
oth = sanitize_units_add(self, other, "subtraction")
np.subtract(self, oth, out=self)
return self
def __neg__(self):
""" Negate the data. """
return super(YTArray, self).__neg__()
def __mul__(self, right_object):
"""
Multiply this YTArray by the object on the right of the `*`
operator. The unit objects handle being multiplied.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__mul__(ro)
def __rmul__(self, left_object):
""" See __mul__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rmul__(lo)
def __imul__(self, other):
""" See __mul__. """
oth = sanitize_units_mul(self, other)
np.multiply(self, oth, out=self)
return self
def __div__(self, right_object):
"""
Divide this YTArray by the object on the right of the `/` operator.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__div__(ro)
def __rdiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rdiv__(lo)
def __idiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.divide(self, oth, out=self)
return self
def __truediv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__truediv__(ro)
def __rtruediv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rtruediv__(lo)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def __floordiv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__floordiv__(ro)
def __rfloordiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rfloordiv__(lo)
def __ifloordiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.floor_divide(self, oth, out=self)
return self
def __or__(self, right_object):
return super(YTArray, self).__or__(right_object)
def __ror__(self, left_object):
return super(YTArray, self).__ror__(left_object)
def __ior__(self, other):
np.bitwise_or(self, other, out=self)
return self
def __xor__(self, right_object):
return super(YTArray, self).__xor__(right_object)
def __rxor__(self, left_object):
return super(YTArray, self).__rxor__(left_object)
def __ixor__(self, other):
np.bitwise_xor(self, other, out=self)
return self
def __and__(self, right_object):
return super(YTArray, self).__and__(right_object)
def __rand__(self, left_object):
return super(YTArray, self).__rand__(left_object)
def __iand__(self, other):
np.bitwise_and(self, other, out=self)
return self
def __pow__(self, power):
"""
Raise this YTArray to some power.
Parameters
----------
power : float or dimensionless YTArray.
The pow value.
"""
if isinstance(power, YTArray):
if not power.units.is_dimensionless:
raise YTUnitOperationError('power', power.unit)
# Work around a sympy issue (I think?)
#
# If I don't do this, super(YTArray, self).__pow__ returns a YTArray
# with a unit attribute set to the sympy expression 1/1 rather than
# a dimensionless Unit object.
if self.units.is_dimensionless and power == -1:
ret = super(YTArray, self).__pow__(power)
return type(self)(ret, input_units='')
return super(YTArray, self).__pow__(power)
def __abs__(self):
""" Return a YTArray with the abs of the data. """
return super(YTArray, self).__abs__()
#
# Start comparison operators.
#
def __lt__(self, other):
""" Test if this is less than the object on the right. """
# converts if possible
oth = validate_comparison_units(self, other, 'less_than')
return super(YTArray, self).__lt__(oth)
def __le__(self, other):
"""Test if this is less than or equal to the object on the right.
"""
oth = validate_comparison_units(self, other, 'less_than or equal')
return super(YTArray, self).__le__(oth)
def __eq__(self, other):
""" Test if this is equal to the object on the right. """
# Check that other is a YTArray.
if other is None:
# self is a YTArray, so it can't be None.
return False
oth = validate_comparison_units(self, other, 'equal')
return super(YTArray, self).__eq__(oth)
def __ne__(self, other):
""" Test if this is not equal to the object on the right. """
# Check that the other is a YTArray.
if other is None:
return True
oth = validate_comparison_units(self, other, 'not equal')
return super(YTArray, self).__ne__(oth)
def __ge__(self, other):
""" Test if this is greater than or equal to other. """
# Check that the other is a YTArray.
oth = validate_comparison_units(
self, other, 'greater than or equal')
return super(YTArray, self).__ge__(oth)
def __gt__(self, other):
""" Test if this is greater than the object on the right. """
# Check that the other is a YTArray.
oth = validate_comparison_units(self, other, 'greater than')
return super(YTArray, self).__gt__(oth)
#
# End comparison operators
#
#
# Begin reduction operators
#
@return_arr
def prod(self, axis=None, dtype=None, out=None):
if axis is not None:
units = self.units**self.shape[axis]
else:
units = self.units**self.size
return super(YTArray, self).prod(axis, dtype, out), units
@return_arr
def mean(self, axis=None, dtype=None, out=None):
return super(YTArray, self).mean(axis, dtype, out), self.units
@return_arr
def sum(self, axis=None, dtype=None, out=None):
return super(YTArray, self).sum(axis, dtype, out), self.units
@return_arr
def std(self, axis=None, dtype=None, out=None, ddof=0):
return super(YTArray, self).std(axis, dtype, out, ddof), self.units
def __array_wrap__(self, out_arr, context=None):
ret = super(YTArray, self).__array_wrap__(out_arr, context)
if isinstance(ret, YTQuantity) and ret.shape != ():
ret = ret.view(YTArray)
if context is None:
if ret.shape == ():
return ret[()]
else:
return ret
ufunc = context[0]
inputs = context[1]
if ufunc in unary_operators:
out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr)
unit = self._ufunc_registry[context[0]](u)
ret_class = type(self)
elif ufunc in binary_operators:
unit_operator = self._ufunc_registry[context[0]]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (preserve_units, comparison_unit,
arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class, raise_error=True)
unit = unit_operator(*units)
if unit_operator in (multiply_units, divide_units):
out_arr, out_arr, unit = handle_multiply_divide_units(
unit, units, out_arr, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc has not been added "
"to YTArray." % str(context[0]))
if unit is None:
out_arr = np.array(out_arr, copy=False)
return out_arr
out_arr.units = unit
if out_arr.size == 1:
return YTQuantity(np.array(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
return YTArray(np.array(out_arr), unit)
return ret_class(np.array(out_arr, copy=False), unit)
else: # numpy version equal to or newer than 1.13
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if 'out' in kwargs:
out_orig = kwargs.pop('out')
out = np.asarray(out_orig[0])
else:
out = None
if len(inputs) == 1:
_, inp, u = get_inp_u_unary(ufunc, inputs)
out_arr = func(np.asarray(inp), out=out, **kwargs)
if ufunc in (multiply, divide) and method == 'reduce':
power_sign = POWER_SIGN_MAPPING[ufunc]
if 'axis' in kwargs and kwargs['axis'] is not None:
unit = u**(power_sign*inp.shape[kwargs['axis']])
else:
unit = u**(power_sign*inp.size)
else:
unit = self._ufunc_registry[ufunc](u)
ret_class = type(self)
elif len(inputs) == 2:
unit_operator = self._ufunc_registry[ufunc]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (comparison_unit, arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class)
elif unit_operator is preserve_units:
inps, units = handle_preserve_units(
inps, units, ufunc, ret_class)
unit = unit_operator(*units)
out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]),
out=out, **kwargs)
if unit_operator in (multiply_units, divide_units):
out, out_arr, unit = handle_multiply_divide_units(
unit, units, out, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc with %i inputs has not been"
"added to YTArray." % (str(ufunc), len(inputs)))
if unit is None:
out_arr = np.array(out_arr, copy=False)
elif ufunc in (modf, divmod_):
out_arr = tuple((ret_class(o, unit) for o in out_arr))
elif out_arr.size == 1:
out_arr = YTQuantity(np.asarray(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
out_arr = YTArray(np.asarray(out_arr), unit)
else:
out_arr = ret_class(np.asarray(out_arr), unit)
if out is not None:
out_orig[0].flat[:] = out.flat[:]
if isinstance(out_orig[0], YTArray):
out_orig[0].units = unit
return out_arr
def copy(self, order='C'):
return type(self)(np.copy(np.asarray(self)), self.units)
def __array_finalize__(self, obj):
if obj is None and hasattr(self, 'units'):
return
self.units = getattr(obj, 'units', NULL_UNIT)
def __pos__(self):
""" Posify the data. """
# this needs to be defined for all numpy versions, see
# numpy issue #9081
return type(self)(super(YTArray, self).__pos__(), self.units)
@return_arr
def dot(self, b, out=None):
return super(YTArray, self).dot(b), self.units*b.units
def __reduce__(self):
"""Pickle reduction method
See the documentation for the standard library pickle module:
http://docs.python.org/2/library/pickle.html
Unit metadata is encoded in the zeroth element of third element of the
returned tuple, itself a tuple used to restore the state of the ndarray.
This is always defined for numpy arrays.
"""
np_ret = super(YTArray, self).__reduce__()
obj_state = np_ret[2]
unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],)
new_ret = np_ret[:2] + unit_state + np_ret[3:]
return new_ret
def __setstate__(self, state):
"""Pickle setstate method
This is called inside pickle.read() and restores the unit data from the
metadata extracted in __reduce__ and then serialized by pickle.
"""
super(YTArray, self).__setstate__(state[1:])
try:
unit, lut = state[0]
except TypeError:
# this case happens when we try to load an old pickle file
# created before we serialized the unit symbol lookup table
# into the pickle file
unit, lut = str(state[0]), default_unit_symbol_lut.copy()
# need to fix up the lut if the pickle was saved prior to PR #1728
# when the pickle format changed
if len(lut['m']) == 2:
lut.update(default_unit_symbol_lut)
for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]:
lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}')
registry = UnitRegistry(lut=lut, add_default_symbols=False)
self.units = Unit(unit, registry=registry)
def __deepcopy__(self, memodict=None):
"""copy.deepcopy implementation
This is necessary for stdlib deepcopy of arrays and quantities.
"""
if memodict is None:
memodict = {}
ret = super(YTArray, self).__deepcopy__(memodict)
return type(self)(ret, copy.deepcopy(self.units))
class YTQuantity(YTArray):
"""
A scalar associated with a unit.
Parameters
----------
input_scalar : an integer or floating point scalar
The scalar to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the quantity. Powers must be specified using python syntax
(cm**3, not cm^3).
registry : A UnitRegistry object
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data.
Examples
--------
>>> from yt import YTQuantity
>>> a = YTQuantity(1, 'cm')
>>> b = YTQuantity(2, 'm')
>>> a + b
201.0 cm
>>> b + a
2.01 m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTQuantity(12, 'g/cm**3')
>>> np.abs(a)
12 g/cm**3
and strip them when it would be annoying to deal with them.
>>> print(np.log10(a))
1.07918124605
YTQuantity is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.quan(5, 'code_length')
>>> a.in_cgs()
1.543e+25 cm
This is equivalent to:
>>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
def __new__(cls, input_scalar, input_units=None, registry=None,
dtype=np.float64, bypass_validation=False):
if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)):
raise RuntimeError("YTQuantity values must be numeric")
ret = YTArray.__new__(cls, input_scalar, input_units, registry,
dtype=dtype, bypass_validation=bypass_validation)
if ret.size > 1:
raise RuntimeError("YTQuantity instances must be scalars")
return ret
def __repr__(self):
return str(self)
def validate_numpy_wrapper_units(v, arrs):
if not any(isinstance(a, YTArray) for a in arrs):
return v
if not all(isinstance(a, YTArray) for a in arrs):
raise RuntimeError("Not all of your arrays are YTArrays.")
a1 = arrs[0]
if not all(a.units == a1.units for a in arrs[1:]):
raise RuntimeError("Your arrays must have identical units.")
v.units = a1.units
return v
def uconcatenate(arrs, axis=0):
"""Concatenate a sequence of arrays.
This wrapper around numpy.concatenate preserves units. All input arrays must
have the same units. See the documentation of numpy.concatenate for full
details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uconcatenate((A, B))
YTArray([ 1., 2., 3., 2., 3., 4.]) cm
"""
v = np.concatenate(arrs, axis=axis)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Applies the cross product to two YT arrays.
This wrapper around numpy.cross preserves units.
See the documentation of numpy.cross for full
details.
"""
v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
units = arr1.units * arr2.units
arr = YTArray(v, units, registry=registry)
return arr
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uintersect1d(A, B)
YTArray([ 2., 3.]) cm
"""
v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def uunion1d(arr1, arr2):
"""Find the union of two arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uunion1d(A, B)
YTArray([ 1., 2., 3., 4.]) cm
"""
v = np.union1d(arr1, arr2)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def unorm(data, ord=None, axis=None, keepdims=False):
"""Matrix or vector norm that preserves units
This is a wrapper around np.linalg.norm that preserves units. See
the documentation for that function for descriptions of the keyword
arguments.
The keepdims argument is ignored if the version of numpy installed is
older than numpy 1.10.0.
"""
if LooseVersion(np.__version__) < LooseVersion('1.10.0'):
norm = np.linalg.norm(data, ord=ord, axis=axis)
else:
norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims)
if norm.shape == ():
return YTQuantity(norm, data.units)
return YTArray(norm, data.units)
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
"""
dot = np.dot(op1.d, op2.d)
units = op1.units*op2.units
if dot.shape == ():
return YTQuantity(dot, units)
return YTArray(dot, units)
def uvstack(arrs):
"""Stack arrays in sequence vertically (row wise) while preserving units
This is a wrapper around np.vstack that preserves units.
"""
v = np.vstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def uhstack(arrs):
"""Stack arrays in sequence horizontally (column wise) while preserving units
This is a wrapper around np.hstack that preserves units.
"""
v = np.hstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ustack(arrs, axis=0):
"""Join a sequence of arrays along a new axis while preserving units
The axis parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the
first dimension and if ``axis=-1`` it will be the last dimension.
This is a wrapper around np.stack that preserves units.
"""
v = np.stack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def array_like_field(data, x, field):
field = data._determine_fields(field)[0]
if isinstance(field, tuple):
finfo = data.ds._get_field_info(field[0],field[1])
else:
finfo = data.ds._get_field_info(field)
if finfo.sampling_type == 'particle':
units = finfo.output_units
else:
units = finfo.units
if isinstance(x, YTArray):
arr = copy.deepcopy(x)
arr.convert_to_units(units)
return arr
if isinstance(x, np.ndarray):
return data.ds.arr(x, units)
else:
return data.ds.quan(x, units)
def get_binary_op_return_class(cls1, cls2):
if cls1 is cls2:
return cls1
if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)):
return cls2
if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)):
return cls1
if issubclass(cls1, YTQuantity):
return cls2
if issubclass(cls2, YTQuantity):
return cls1
if issubclass(cls1, cls2):
return cls1
if issubclass(cls2, cls1):
return cls2
else:
raise RuntimeError("Undefined operation for a YTArray subclass. "
"Received operand types (%s) and (%s)" % (cls1, cls2))
def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
r"""
Load YTArrays with unit information from a text file. Each row in the
text file must have the same number of values.
Parameters
----------
fname : str
Filename to read.
dtype : data-type, optional
Data-type of the resulting array; default: float.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
Examples
--------
>>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
"""
f = open(fname, 'r')
next_one = False
units = []
num_cols = -1
for line in f.readlines():
words = line.strip().split()
if len(words) == 0:
continue
if line[0] == comments:
if next_one:
units = words[1:]
if len(words) == 2 and words[1] == "Units":
next_one = True
else:
# Here we catch the first line of numbers
try:
col_words = line.strip().split(delimiter)
for word in col_words:
float(word)
num_cols = len(col_words)
break
except ValueError:
mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
f.close()
if len(units) != num_cols:
mylog.warning("Malformed or incomplete units header. Arrays will be "
"dimensionless!")
units = ["dimensionless"]*num_cols
arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
delimiter=delimiter, converters=None,
unpack=True, usecols=usecols, ndmin=0)
if usecols is not None:
units = [units[col] for col in usecols]
mylog.info("Array units: %s" % ", ".join(units))
return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
footer='', comments='#'):
r"""
Write YTArrays with unit information to a text file.
Parameters
----------
fname : str
The file to write the YTArrays to.
arrays : list of YTArrays or single YTArray
The array(s) to write to the file.
fmt : str or sequence of strs, optional
A single format (%10.5f), or a sequence of formats.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file, before the
unit header.
footer : str, optional
String that will be written at the end of the file.
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``yt.loadtxt``.
Examples
--------
>>> sp = ds.sphere("c", (100,"kpc"))
>>> a = sp["density"]
>>> b = sp["temperature"]
>>> c = sp["velocity_x"]
>>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
"""
if not isinstance(arrays, list):
arrays = [arrays]
units = []
for array in arrays:
if hasattr(array, "units"):
units.append(str(array.units))
else:
units.append("dimensionless")
if header != '':
header += '\n'
header += " Units\n " + '\t'.join(units)
np.savetxt(fname, np.transpose(arrays), header=header,
fmt=fmt, delimiter=delimiter, footer=footer,
newline='\n', comments=comments)
| en | 0.695476 | YTArray class. #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- # numpy 1.13 or newer # redefine this here to avoid a circular import from yt.funcs # This could be a subclass, so don't call YTArray directly. # This will create a copy of the data in the iterable. # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. # Make sure the other object is a YTArray before we use the `units` # attribute. # handle special case of adding or subtracting with zero or # array filled with zero # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros # Check that other is a YTArray. Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. # let Unit() handle units arg if it's not already a Unit obj. An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True # Input array is an already formed ndarray instance # We first cast to be our class type # Check units type # Nothing provided. Make dimensionless... # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. # Attach the units # # Start unit conversion methods # Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") Convert the array and units to the equivalent cgs units. Convert the array and units to the equivalent mks units. Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray An alias for YTArray.in_units(). See the docstrings of that function for details. Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") Lists the possible equivalencies associated with this YTArray or YTQuantity. Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. Creates a copy of this array with the unit information stripped Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. # Converting from AstroPy Quantity # we have to do this because AstroPy is silly and defines # hour as "h" Creates a new AstroPy quantity with the same unit information. Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() # we have to do this because Pint doesn't recognize # "yr" as "year" # # End unit conversion methods # Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) # Overwrite without deleting if we can get away with it. Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. # # Start convenience methods # Get a copy of the array data as a numpy ndarray Get a view of the array data. Get a YTQuantity with the same unit as this array and a value of 1.0 Get a YTArray filled with ones with the same unit and shape as this array # # Start operation methods # Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. See __add__. See __add__. Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. See __sub__. See __sub__. Negate the data. Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. See __mul__. See __mul__. Divide this YTArray by the object on the right of the `/` operator. See __div__. See __div__. See __div__. See __div__. See __div__. See __div__. Raise this YTArray to some power. Parameters ---------- power : float or dimensionless YTArray. The pow value. # Work around a sympy issue (I think?) # # If I don't do this, super(YTArray, self).__pow__ returns a YTArray # with a unit attribute set to the sympy expression 1/1 rather than # a dimensionless Unit object. Return a YTArray with the abs of the data. # # Start comparison operators. # Test if this is less than the object on the right. # converts if possible Test if this is less than or equal to the object on the right. Test if this is equal to the object on the right. # Check that other is a YTArray. # self is a YTArray, so it can't be None. Test if this is not equal to the object on the right. # Check that the other is a YTArray. Test if this is greater than or equal to other. # Check that the other is a YTArray. Test if this is greater than the object on the right. # Check that the other is a YTArray. # # End comparison operators # # # Begin reduction operators # # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 # numpy version equal to or newer than 1.13 # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 Posify the data. # this needs to be defined for all numpy versions, see # numpy issue #9081 Pickle reduction method See the documentation for the standard library pickle module: http://docs.python.org/2/library/pickle.html Unit metadata is encoded in the zeroth element of third element of the returned tuple, itself a tuple used to restore the state of the ndarray. This is always defined for numpy arrays. Pickle setstate method This is called inside pickle.read() and restores the unit data from the metadata extracted in __reduce__ and then serialized by pickle. # this case happens when we try to load an old pickle file # created before we serialized the unit symbol lookup table # into the pickle file # need to fix up the lut if the pickle was saved prior to PR #1728 # when the pickle format changed copy.deepcopy implementation This is necessary for stdlib deepcopy of arrays and quantities. A scalar associated with a unit. Parameters ---------- input_scalar : an integer or floating point scalar The scalar to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the quantity. Powers must be specified using python syntax (cm**3, not cm^3). registry : A UnitRegistry object The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Examples -------- >>> from yt import YTQuantity >>> a = YTQuantity(1, 'cm') >>> b = YTQuantity(2, 'm') >>> a + b 201.0 cm >>> b + a 2.01 m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTQuantity(12, 'g/cm**3') >>> np.abs(a) 12 g/cm**3 and strip them when it would be annoying to deal with them. >>> print(np.log10(a)) 1.07918124605 YTQuantity is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.quan(5, 'code_length') >>> a.in_cgs() 1.543e+25 cm This is equivalent to: >>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True Concatenate a sequence of arrays. This wrapper around numpy.concatenate preserves units. All input arrays must have the same units. See the documentation of numpy.concatenate for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uconcatenate((A, B)) YTArray([ 1., 2., 3., 2., 3., 4.]) cm Applies the cross product to two YT arrays. This wrapper around numpy.cross preserves units. See the documentation of numpy.cross for full details. Find the sorted unique elements of the two input arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uintersect1d(A, B) YTArray([ 2., 3.]) cm Find the union of two arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uunion1d(A, B) YTArray([ 1., 2., 3., 4.]) cm Matrix or vector norm that preserves units This is a wrapper around np.linalg.norm that preserves units. See the documentation for that function for descriptions of the keyword arguments. The keepdims argument is ignored if the version of numpy installed is older than numpy 1.10.0. Matrix or vector dot product that preserves units This is a wrapper around np.dot that preserves units. Stack arrays in sequence vertically (row wise) while preserving units This is a wrapper around np.vstack that preserves units. Stack arrays in sequence horizontally (column wise) while preserving units This is a wrapper around np.hstack that preserves units. Join a sequence of arrays along a new axis while preserving units The axis parameter specifies the index of the new axis in the dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. This is a wrapper around np.stack that preserves units. Load YTArrays with unit information from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : str Filename to read. dtype : data-type, optional Data-type of the resulting array; default: float. delimiter : str, optional The string used to separate values. By default, this is any whitespace. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. comments : str, optional The character used to indicate the start of a comment; default: '#'. Examples -------- >>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t") # Here we catch the first line of numbers Write YTArrays with unit information to a text file. Parameters ---------- fname : str The file to write the YTArrays to. arrays : list of YTArrays or single YTArray The array(s) to write to the file. fmt : str or sequence of strs, optional A single format (%10.5f), or a sequence of formats. delimiter : str, optional String or character separating columns. header : str, optional String that will be written at the beginning of the file, before the unit header. footer : str, optional String that will be written at the end of the file. comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``yt.loadtxt``. Examples -------- >>> sp = ds.sphere("c", (100,"kpc")) >>> a = sp["density"] >>> b = sp["temperature"] >>> c = sp["velocity_x"] >>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t") | 1.773464 | 2 |
src/posts/api/serializers.py | MahmoudMagdi20/django_rest_blog_api | 0 | 612 | <gh_stars>0
from rest_framework import serializers
from posts.models import Post
class PostCreateUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = [
#'id',
'title',
#'slug',
'content',
'publish',
]
post_detail_url = serializers.HyperlinkedIdentityField(
view_name='posts-api:detail',
lookup_field='slug',
)
class PostDetailSerializer(serializers.ModelSerializer):
url = post_detail_url
user = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
html = serializers.SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'id',
'title',
'slug',
'content',
'publish',
'user',
'image',
'html',
]
def get_html(self, obj):
return obj.get_markdown()
def get_user(self, obj):
return str(obj.user.username)
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
class PostListSerializer(serializers.ModelSerializer):
url = post_detail_url
user = serializers.SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'user',
'title',
'content',
'publish',
]
def get_user(self, obj):
return str(obj.user.username)
| from rest_framework import serializers
from posts.models import Post
class PostCreateUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = [
#'id',
'title',
#'slug',
'content',
'publish',
]
post_detail_url = serializers.HyperlinkedIdentityField(
view_name='posts-api:detail',
lookup_field='slug',
)
class PostDetailSerializer(serializers.ModelSerializer):
url = post_detail_url
user = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
html = serializers.SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'id',
'title',
'slug',
'content',
'publish',
'user',
'image',
'html',
]
def get_html(self, obj):
return obj.get_markdown()
def get_user(self, obj):
return str(obj.user.username)
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
class PostListSerializer(serializers.ModelSerializer):
url = post_detail_url
user = serializers.SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'user',
'title',
'content',
'publish',
]
def get_user(self, obj):
return str(obj.user.username) | it | 0.463807 | #'id', #'slug', | 2.238478 | 2 |
Protheus_WebApp/Modules/SIGAGTP/GTPA036ETestCase.py | 98llm/tir-script-samples | 17 | 613 | <reponame>98llm/tir-script-samples
from tir import Webapp
import unittest
class GTPA036E(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAGTP", "05/08/2020", "T1", "D MG 01 ")
inst.oHelper.Program('GTPA036')
def test_GTPA036E_CT001(self):
self.oHelper.SetButton('Avançar')
self.oHelper.ClickLabel("Arquivo não formatado")
self.oHelper.SetButton('Avançar')
self.oHelper.SetValue('XXX_DATADE', '02/08/2020')
self.oHelper.SetValue('XXX_DATATE', '07/08/2020')
self.oHelper.ScrollGrid(column='Agência', match_value='000048', grid_number=1)
'''self.oHelper.ClickGridCell("", row=2, grid_number=1)'''
self.oHelper.ClickBox("", contents_list='', select_all=False, grid_number=1)
self.oHelper.SetButton('Concluir')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| from tir import Webapp
import unittest
class GTPA036E(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAGTP", "05/08/2020", "T1", "D MG 01 ")
inst.oHelper.Program('GTPA036')
def test_GTPA036E_CT001(self):
self.oHelper.SetButton('Avançar')
self.oHelper.ClickLabel("Arquivo não formatado")
self.oHelper.SetButton('Avançar')
self.oHelper.SetValue('XXX_DATADE', '02/08/2020')
self.oHelper.SetValue('XXX_DATATE', '07/08/2020')
self.oHelper.ScrollGrid(column='Agência', match_value='000048', grid_number=1)
'''self.oHelper.ClickGridCell("", row=2, grid_number=1)'''
self.oHelper.ClickBox("", contents_list='', select_all=False, grid_number=1)
self.oHelper.SetButton('Concluir')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | en | 0.482473 | self.oHelper.ClickGridCell("", row=2, grid_number=1) | 2.607031 | 3 |
code_tmpl/views.py | lovebirdegg/nnms-server | 0 | 614 | <filename>code_tmpl/views.py<gh_stars>0
# @Time : {time}
# @Author : code_generator
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import ListAPIView
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.response import Response
from rest_framework.decorators import api_view,authentication_classes,permission_classes,action
from common.custom import CommonPagination, RbacPermission
from django_filters.rest_framework import DjangoFilterBackend
from django.http import HttpResponse,FileResponse,JsonResponse
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_xops.basic import XopsResponse
from rest_xops.code import *
from django.db.models import Q
from django.apps import apps
from ..models import {model_camel_case_name}
from django.contrib.contenttypes.models import ContentType
from ..serializers.{model_name}_serializers import *
class {model_camel_case_name}View(ModelViewSet):
queryset = {model_camel_case_name}.objects.all()
serializer_class = {model_camel_case_name}Serializer
filter_backends = (DjangoFilterBackend, SearchFilter,OrderingFilter)
pagination_class = CommonPagination
ordering_fields = ('id',)
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
filter_fields = ({filter_fields})
search_fields = ({search_fields}) | <filename>code_tmpl/views.py<gh_stars>0
# @Time : {time}
# @Author : code_generator
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import ListAPIView
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.response import Response
from rest_framework.decorators import api_view,authentication_classes,permission_classes,action
from common.custom import CommonPagination, RbacPermission
from django_filters.rest_framework import DjangoFilterBackend
from django.http import HttpResponse,FileResponse,JsonResponse
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_xops.basic import XopsResponse
from rest_xops.code import *
from django.db.models import Q
from django.apps import apps
from ..models import {model_camel_case_name}
from django.contrib.contenttypes.models import ContentType
from ..serializers.{model_name}_serializers import *
class {model_camel_case_name}View(ModelViewSet):
queryset = {model_camel_case_name}.objects.all()
serializer_class = {model_camel_case_name}Serializer
filter_backends = (DjangoFilterBackend, SearchFilter,OrderingFilter)
pagination_class = CommonPagination
ordering_fields = ('id',)
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
filter_fields = ({filter_fields})
search_fields = ({search_fields}) | fr | 0.250751 | # @Time : {time} # @Author : code_generator | 1.812592 | 2 |
src/collectors/heartbeat/heartbeat.py | art19/netuitive-diamond | 2 | 615 | <reponame>art19/netuitive-diamond<filename>src/collectors/heartbeat/heartbeat.py
# coding=utf-8
"""
Send a value of 1 as a heartbeat every time this collector is invoked.
#### Dependencies
None
#### Usage
Add the collector config as :
enabled = True
path = netuitive
Metrics are collected as :
- metrics.heartbeat
Netuitive Change History
========================
DVG 2016/11/14 Initial version.
"""
import diamond.collector
from diamond.utils.config import load_config as load_server_config
try:
import netuitive
except ImportError:
netuitive = None
class HeartbeatCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(HeartbeatCollector, self).__init__(*args, **kwargs)
self.hostname = self.get_hostname()
self.ttl = self.config['ttl']
self.connection_timeout = 5
if not netuitive:
self.log.error('netuitive import failed. Heartbeat collector disabled')
self.enabled = False
return
try:
self.version = self._get_version()
if 'netuitive_connection_timeout' in self.config:
self.connection_timeout = int(self.config['netuitive_connection_timeout'])
self.api = netuitive.Client(url=self.config['netuitive_url'],
api_key=self.config['netuitive_api_key'],
agent=self.version,
connection_timeout=self.connection_timeout)
except Exception as e:
self.log.debug(e)
def collect(self):
check = netuitive.Check('heartbeat', self.hostname, self.ttl)
self.api.post_check(check)
| # coding=utf-8
"""
Send a value of 1 as a heartbeat every time this collector is invoked.
#### Dependencies
None
#### Usage
Add the collector config as :
enabled = True
path = netuitive
Metrics are collected as :
- metrics.heartbeat
Netuitive Change History
========================
DVG 2016/11/14 Initial version.
"""
import diamond.collector
from diamond.utils.config import load_config as load_server_config
try:
import netuitive
except ImportError:
netuitive = None
class HeartbeatCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(HeartbeatCollector, self).__init__(*args, **kwargs)
self.hostname = self.get_hostname()
self.ttl = self.config['ttl']
self.connection_timeout = 5
if not netuitive:
self.log.error('netuitive import failed. Heartbeat collector disabled')
self.enabled = False
return
try:
self.version = self._get_version()
if 'netuitive_connection_timeout' in self.config:
self.connection_timeout = int(self.config['netuitive_connection_timeout'])
self.api = netuitive.Client(url=self.config['netuitive_url'],
api_key=self.config['netuitive_api_key'],
agent=self.version,
connection_timeout=self.connection_timeout)
except Exception as e:
self.log.debug(e)
def collect(self):
check = netuitive.Check('heartbeat', self.hostname, self.ttl)
self.api.post_check(check) | en | 0.760611 | # coding=utf-8 Send a value of 1 as a heartbeat every time this collector is invoked. #### Dependencies None #### Usage Add the collector config as : enabled = True path = netuitive Metrics are collected as : - metrics.heartbeat Netuitive Change History ======================== DVG 2016/11/14 Initial version. | 2.101159 | 2 |
process_script/stat.py | vitorebatista/AVEMH | 2 | 616 | <reponame>vitorebatista/AVEMH
import numpy as np
import pandas as pd
import sys
markets = ["hangseng", "dax", "ftse", "sp", "nikkei"]
market = markets[int(sys.argv[1])-1]
# read GD data file
dat = pd.read_csv("./num_res/{}.GD.csv".format(market))
# split into two experiments
exp1_GD = dat[dat.columns[:5]]
exp2_GD = dat[dat.columns[5:]]
# calculate statistics
stat1_GD = pd.DataFrame([exp1_GD.min(), exp1_GD.median(), exp1_GD.std()])
stat1_GD.index = ["Best", "Median", "Std."]
stat2_GD = pd.DataFrame([exp2_GD.min(), exp2_GD.median(), exp2_GD.std()])
stat2_GD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_GD = stat1_GD.loc["Median"].sort_values()
best1_GD = list(meds1_GD.index[:2])
meds2_GD = stat2_GD.loc["Median"].sort_values()
best2_GD = list(meds2_GD.index[:2])
print("{}.GD:".format(market), best1_GD[0], best1_GD[1])
# print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error
# read Spacing data file
dat = pd.read_csv("./num_res/{}.Spacing.csv".format(market))
# split into two experiments
exp1_Spacing = dat[dat.columns[:5]]
exp2_Spacing = dat[dat.columns[5:]]
# calculate statistics
stat1_Spacing = pd.DataFrame(
[exp1_Spacing.min(), exp1_Spacing.median(), exp1_Spacing.std()])
stat1_Spacing.index = ["Best", "Median", "Std."]
stat2_Spacing = pd.DataFrame(
[exp2_Spacing.min(), exp2_Spacing.median(), exp2_Spacing.std()])
stat2_Spacing.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Spacing = stat1_Spacing.loc["Median"].sort_values()
best1_Spacing = list(meds1_Spacing.index[:2])
meds2_Spacing = stat2_Spacing.loc["Median"].sort_values()
best2_Spacing = list(meds2_Spacing.index[:2])
print("{}.Spacing:".format(market), best1_Spacing[0], best1_Spacing[1])
# print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error
# read MaxSpread data file
dat = pd.read_csv("./num_res/{}.MaxSpread.csv".format(market))
# split into two experiments
exp1_MaxSpread = dat[dat.columns[:5]]
exp2_MaxSpread = dat[dat.columns[5:]]
# calculate statistics
stat1_MaxSpread = pd.DataFrame(
[exp1_MaxSpread.max(), exp1_MaxSpread.median(), exp1_MaxSpread.std()])
stat1_MaxSpread.index = ["Best", "Median", "Std."]
stat2_MaxSpread = pd.DataFrame(
[exp2_MaxSpread.max(), exp2_MaxSpread.median(), exp2_MaxSpread.std()])
stat2_MaxSpread.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_MaxSpread = stat1_MaxSpread.loc["Median"].sort_values(ascending=False)
best1_MaxSpread = list(meds1_MaxSpread.index[:2])
meds2_MaxSpread = stat2_MaxSpread.loc["Median"].sort_values(ascending=False)
best2_MaxSpread = list(meds2_MaxSpread.index[:2])
print("{}.MaxSpread:".format(market), best1_MaxSpread[0], best1_MaxSpread[1])
# print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error
# read Delta data file
dat = pd.read_csv("./num_res/{}.Delta.csv".format(market))
# split into two experiments
exp1_Delta = dat[dat.columns[:5]]
exp2_Delta = dat[dat.columns[5:]]
# calculate statistics
stat1_Delta = pd.DataFrame(
[exp1_Delta.min(), exp1_Delta.median(), exp1_Delta.std()])
stat1_Delta.index = ["Best", "Median", "Std."]
stat2_Delta = pd.DataFrame(
[exp2_Delta.min(), exp2_Delta.median(), exp2_Delta.std()])
stat2_Delta.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Delta = stat1_Delta.loc["Median"].sort_values()
best1_Delta = list(meds1_Delta.index[:2])
meds2_Delta = stat2_Delta.loc["Median"].sort_values()
best2_Delta = list(meds2_Delta.index[:2])
print("{}.Delta:".format(market), best1_Delta[0], best1_Delta[1])
# print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error
# read IGD data file
dat = pd.read_csv("./num_res/{}.IGD.csv".format(market))
# split into two experiments
exp1_IGD = dat[dat.columns[:5]]
exp2_IGD = dat[dat.columns[5:]]
# calculate statistics
stat1_IGD = pd.DataFrame([exp1_IGD.min(), exp1_IGD.median(), exp1_IGD.std()])
stat1_IGD.index = ["Best", "Median", "Std."]
stat2_IGD = pd.DataFrame([exp2_IGD.min(), exp2_IGD.median(), exp2_IGD.std()])
stat2_IGD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_IGD = stat1_IGD.loc["Median"].sort_values()
best1_IGD = list(meds1_IGD.index[:2])
meds2_IGD = stat2_IGD.loc["Median"].sort_values()
best2_IGD = list(meds2_IGD.index[:2])
print("{}.IGD:".format(market), best1_IGD[0], best1_IGD[1])
# print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error
# read Hypervolume data file
dat = pd.read_csv("./num_res/{}.Hypervolume.csv".format(market))
# split into two experiments
exp1_Hypervolume = dat[dat.columns[:5]]
exp2_Hypervolume = dat[dat.columns[5:]]
# calculate statistics
stat1_Hypervolume = pd.DataFrame(
[exp1_Hypervolume.max(), exp1_Hypervolume.median(), exp1_Hypervolume.std()])
stat1_Hypervolume.index = ["Best", "Median", "Std."]
stat2_Hypervolume = pd.DataFrame(
[exp2_Hypervolume.max(), exp2_Hypervolume.median(), exp2_Hypervolume.std()])
stat2_Hypervolume.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Hypervolume = stat1_Hypervolume.loc["Median"].sort_values(
ascending=False)
best1_Hypervolume = list(meds1_Hypervolume.index[:2])
meds2_Hypervolume = stat2_Hypervolume.loc["Median"].sort_values(
ascending=False)
best2_Hypervolume = list(meds2_Hypervolume.index[:2])
print("{}.Hypervolume:".format(market),
best1_Hypervolume[0], best1_Hypervolume[1])
# print("{}.Hypervolume:".format(market),
# best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error
print("{}\n----------------------------------------------".format(market))
pd.options.display.float_format = '{:.2e}'.format
stat1_overall = pd.concat(
[stat1_GD, stat1_Spacing, stat1_MaxSpread, stat1_Delta, stat1_IGD, stat1_Hypervolume])
stat2_overall = pd.concat(
[stat2_GD, stat2_Spacing, stat2_MaxSpread, stat2_Delta, stat2_IGD, stat2_Hypervolume])
arrays = [["GD", "GD", "GD", "Spacing", "Spacing", "Spacing", "MaxSpread", "MaxSpread", "MaxSpread",
"Delta", "Delta", "Delta", "IGD", "IGD", "IGD", "Hypervolume", "Hypervolume", "Hypervolume"],
stat1_overall.index
]
index = pd.MultiIndex.from_arrays(arrays, names=["Metric", ""])
stat1_overall.index = index
stat2_overall.index = index
print(stat1_overall)
print("----------------------------------------------")
print(stat2_overall)
| import numpy as np
import pandas as pd
import sys
markets = ["hangseng", "dax", "ftse", "sp", "nikkei"]
market = markets[int(sys.argv[1])-1]
# read GD data file
dat = pd.read_csv("./num_res/{}.GD.csv".format(market))
# split into two experiments
exp1_GD = dat[dat.columns[:5]]
exp2_GD = dat[dat.columns[5:]]
# calculate statistics
stat1_GD = pd.DataFrame([exp1_GD.min(), exp1_GD.median(), exp1_GD.std()])
stat1_GD.index = ["Best", "Median", "Std."]
stat2_GD = pd.DataFrame([exp2_GD.min(), exp2_GD.median(), exp2_GD.std()])
stat2_GD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_GD = stat1_GD.loc["Median"].sort_values()
best1_GD = list(meds1_GD.index[:2])
meds2_GD = stat2_GD.loc["Median"].sort_values()
best2_GD = list(meds2_GD.index[:2])
print("{}.GD:".format(market), best1_GD[0], best1_GD[1])
# print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error
# read Spacing data file
dat = pd.read_csv("./num_res/{}.Spacing.csv".format(market))
# split into two experiments
exp1_Spacing = dat[dat.columns[:5]]
exp2_Spacing = dat[dat.columns[5:]]
# calculate statistics
stat1_Spacing = pd.DataFrame(
[exp1_Spacing.min(), exp1_Spacing.median(), exp1_Spacing.std()])
stat1_Spacing.index = ["Best", "Median", "Std."]
stat2_Spacing = pd.DataFrame(
[exp2_Spacing.min(), exp2_Spacing.median(), exp2_Spacing.std()])
stat2_Spacing.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Spacing = stat1_Spacing.loc["Median"].sort_values()
best1_Spacing = list(meds1_Spacing.index[:2])
meds2_Spacing = stat2_Spacing.loc["Median"].sort_values()
best2_Spacing = list(meds2_Spacing.index[:2])
print("{}.Spacing:".format(market), best1_Spacing[0], best1_Spacing[1])
# print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error
# read MaxSpread data file
dat = pd.read_csv("./num_res/{}.MaxSpread.csv".format(market))
# split into two experiments
exp1_MaxSpread = dat[dat.columns[:5]]
exp2_MaxSpread = dat[dat.columns[5:]]
# calculate statistics
stat1_MaxSpread = pd.DataFrame(
[exp1_MaxSpread.max(), exp1_MaxSpread.median(), exp1_MaxSpread.std()])
stat1_MaxSpread.index = ["Best", "Median", "Std."]
stat2_MaxSpread = pd.DataFrame(
[exp2_MaxSpread.max(), exp2_MaxSpread.median(), exp2_MaxSpread.std()])
stat2_MaxSpread.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_MaxSpread = stat1_MaxSpread.loc["Median"].sort_values(ascending=False)
best1_MaxSpread = list(meds1_MaxSpread.index[:2])
meds2_MaxSpread = stat2_MaxSpread.loc["Median"].sort_values(ascending=False)
best2_MaxSpread = list(meds2_MaxSpread.index[:2])
print("{}.MaxSpread:".format(market), best1_MaxSpread[0], best1_MaxSpread[1])
# print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error
# read Delta data file
dat = pd.read_csv("./num_res/{}.Delta.csv".format(market))
# split into two experiments
exp1_Delta = dat[dat.columns[:5]]
exp2_Delta = dat[dat.columns[5:]]
# calculate statistics
stat1_Delta = pd.DataFrame(
[exp1_Delta.min(), exp1_Delta.median(), exp1_Delta.std()])
stat1_Delta.index = ["Best", "Median", "Std."]
stat2_Delta = pd.DataFrame(
[exp2_Delta.min(), exp2_Delta.median(), exp2_Delta.std()])
stat2_Delta.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Delta = stat1_Delta.loc["Median"].sort_values()
best1_Delta = list(meds1_Delta.index[:2])
meds2_Delta = stat2_Delta.loc["Median"].sort_values()
best2_Delta = list(meds2_Delta.index[:2])
print("{}.Delta:".format(market), best1_Delta[0], best1_Delta[1])
# print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error
# read IGD data file
dat = pd.read_csv("./num_res/{}.IGD.csv".format(market))
# split into two experiments
exp1_IGD = dat[dat.columns[:5]]
exp2_IGD = dat[dat.columns[5:]]
# calculate statistics
stat1_IGD = pd.DataFrame([exp1_IGD.min(), exp1_IGD.median(), exp1_IGD.std()])
stat1_IGD.index = ["Best", "Median", "Std."]
stat2_IGD = pd.DataFrame([exp2_IGD.min(), exp2_IGD.median(), exp2_IGD.std()])
stat2_IGD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_IGD = stat1_IGD.loc["Median"].sort_values()
best1_IGD = list(meds1_IGD.index[:2])
meds2_IGD = stat2_IGD.loc["Median"].sort_values()
best2_IGD = list(meds2_IGD.index[:2])
print("{}.IGD:".format(market), best1_IGD[0], best1_IGD[1])
# print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error
# read Hypervolume data file
dat = pd.read_csv("./num_res/{}.Hypervolume.csv".format(market))
# split into two experiments
exp1_Hypervolume = dat[dat.columns[:5]]
exp2_Hypervolume = dat[dat.columns[5:]]
# calculate statistics
stat1_Hypervolume = pd.DataFrame(
[exp1_Hypervolume.max(), exp1_Hypervolume.median(), exp1_Hypervolume.std()])
stat1_Hypervolume.index = ["Best", "Median", "Std."]
stat2_Hypervolume = pd.DataFrame(
[exp2_Hypervolume.max(), exp2_Hypervolume.median(), exp2_Hypervolume.std()])
stat2_Hypervolume.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Hypervolume = stat1_Hypervolume.loc["Median"].sort_values(
ascending=False)
best1_Hypervolume = list(meds1_Hypervolume.index[:2])
meds2_Hypervolume = stat2_Hypervolume.loc["Median"].sort_values(
ascending=False)
best2_Hypervolume = list(meds2_Hypervolume.index[:2])
print("{}.Hypervolume:".format(market),
best1_Hypervolume[0], best1_Hypervolume[1])
# print("{}.Hypervolume:".format(market),
# best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error
print("{}\n----------------------------------------------".format(market))
pd.options.display.float_format = '{:.2e}'.format
stat1_overall = pd.concat(
[stat1_GD, stat1_Spacing, stat1_MaxSpread, stat1_Delta, stat1_IGD, stat1_Hypervolume])
stat2_overall = pd.concat(
[stat2_GD, stat2_Spacing, stat2_MaxSpread, stat2_Delta, stat2_IGD, stat2_Hypervolume])
arrays = [["GD", "GD", "GD", "Spacing", "Spacing", "Spacing", "MaxSpread", "MaxSpread", "MaxSpread",
"Delta", "Delta", "Delta", "IGD", "IGD", "IGD", "Hypervolume", "Hypervolume", "Hypervolume"],
stat1_overall.index
]
index = pd.MultiIndex.from_arrays(arrays, names=["Metric", ""])
stat1_overall.index = index
stat2_overall.index = index
print(stat1_overall)
print("----------------------------------------------")
print(stat2_overall) | en | 0.617272 | # read GD data file # split into two experiments # calculate statistics # find best and second best algorithm # print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error # read Spacing data file # split into two experiments # calculate statistics # find best and second best algorithm # print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error # read MaxSpread data file # split into two experiments # calculate statistics # find best and second best algorithm # print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error # read Delta data file # split into two experiments # calculate statistics # find best and second best algorithm # print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error # read IGD data file # split into two experiments # calculate statistics # find best and second best algorithm # print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error # read Hypervolume data file # split into two experiments # calculate statistics # find best and second best algorithm # print("{}.Hypervolume:".format(market), # best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error | 3.203329 | 3 |
Scripts/calc_Utilities.py | zmlabe/ThicknessSensitivity | 1 | 617 | <gh_stars>1-10
"""
Functions are useful untilities for SITperturb experiments
Notes
-----
Author : <NAME>
Date : 13 August 2017
Usage
-----
[1] calcDecJan(varx,vary,lat,lon,level,levsq)
[2] calcDecJanFeb(varx,vary,lat,lon,level,levsq)
[3] calc_indttest(varx,vary)
[4] calc_weightedAve(var,lats)
[5] calc_spatialCorr(varx,vary,lats,lons,weight)
[6] calc_RMSE(varx,vary,lats,lons,weight)
[7] calc_spatialCorrHeight(varx,vary,lats,lons,weight)
[8] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq)
"""
def calcDecJan(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_dj : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_dj : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_dj = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djappendf = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_dj = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djappendf = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (ON,DJ,FM)!')
print('*Completed: Finished calcDecJan function!')
return varx_dj,vary_dj
###############################################################################
###############################################################################
###############################################################################
def calcDecJanFeb(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January-February
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_djf : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_djf : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_djf,vary_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_djf = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
djfappendh = np.append(djfappendh1,varxravel[13+i,:,:])
djfappendf = np.append(djfappendf1,varyravel[13+i,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_djf = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
djfappendh = np.append(djfappendh1,
varxravel[13+i,:,:,:])
djfappendf = np.append(djfappendf1,
varyravel[13+i,:,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (DJF)!')
print('*Completed: Finished calcDecJanFeb function!')
return varx_djf,vary_djf
###############################################################################
###############################################################################
###############################################################################
def calc_indttest(varx,vary):
"""
Function calculates statistical difference for 2 independent
sample t-test
Parameters
----------
varx : 3d array
vary : 3d array
Returns
-------
stat = calculated t-statistic
pvalue = two-tailed p-value
Usage
-----
stat,pvalue = calc_ttest(varx,vary)
"""
print('\n>>> Using calc_ttest function!')
### Import modules
import numpy as np
import scipy.stats as sts
### 2-independent sample t-test
stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')
### Significant at 95% confidence level
pvalue[np.where(pvalue >= 0.05)] = np.nan
pvalue[np.where(pvalue < 0.05)] = 1.
print('*Completed: Finished calc_ttest function!')
return stat,pvalue
###############################################################################
###############################################################################
###############################################################################
def calc_weightedAve(var,lats):
"""
Area weights sit array 5d [ens,year,month,lat,lon] into [ens,year,month]
Parameters
----------
var : 5d,4d,3d array of a gridded variable
lats : 2d array of latitudes
Returns
-------
meanvar : weighted average for 3d,2d,1d array
Usage
-----
meanvar = calc_weightedAve(var,lats)
"""
print('\n>>> Using calc_weightedAve function!')
### Import modules
import numpy as np
### Calculate weighted average for various dimensional arrays
if var.ndim == 5:
meanvar = np.empty((var.shape[0],var.shape[1],var.shape[2]))
for ens in range(var.shape[0]):
for i in range(var.shape[1]):
for j in range(var.shape[2]):
varq = var[ens,i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[ens,i,j] = np.nansum(varmask*areamask) \
/np.sum(areamask)
elif var.ndim == 4:
meanvar = np.empty((var.shape[0],var.shape[1]))
for i in range(var.shape[0]):
for j in range(var.shape[1]):
varq = var[i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i,j] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 3:
meanvar = np.empty((var.shape[0]))
for i in range(var.shape[0]):
varq = var[i,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 2:
meanvar = np.empty((var.shape[0]))
varq = var[:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar = np.nansum(varmask*areamask)/np.sum(areamask)
else:
print(ValueError('Variable has the wrong dimensions!'))
print('Completed: Weighted variable average!')
print('*Completed: Finished calc_weightedAve function!')
return meanvar
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorr(varx,vary,lats,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient
Parameters
----------
varx : 2d array
vary : 2d array
lats : 1d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorr(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorr function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_SpatialCorr function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_RMSE(varx,vary,lats,lons,weight):
"""
Calculates root mean square weighted average
Parameters
----------
varx : 2d array
vary : 2d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
rmse : 1d array
Usage
-----
rmse = calc_RMSE(varx,vary,lats,lons)
"""
print('\n>>> Using calc_RMSE function!')
### Import modules
import numpy as np
from sklearn.metrics import mean_squared_error
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
### Calculate rmse
sq_err = (varx - vary)**2
rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw))
elif weight == 'no':
### Root mean square error from sklearn (not weighted)
rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel()))
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_RMSE function!')
return rmse
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeight(varx,vary,levs,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels)
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorrHeight function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeight function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeightLev(varx,vary,levs,lons,weight,levelq):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels). Change the
weighting for different level correlations
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
levelq : string (all, tropo, strato)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons,levels)
"""
print('\n>>> Using calc_spatialCorrHeightLev function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
if levelq == 'all':
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'tropo':
gwq = np.array([1.0,1.0,1.0,1.0,0.5,0.5,0.5,0.2,0.2,0.,0.,0.,
0.,0.,0.,0.,0.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'strato':
gwq = np.array([0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.5,1.,1.,1.,1.
,1.,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeightLev function!')
return corrcoef | """
Functions are useful untilities for SITperturb experiments
Notes
-----
Author : <NAME>
Date : 13 August 2017
Usage
-----
[1] calcDecJan(varx,vary,lat,lon,level,levsq)
[2] calcDecJanFeb(varx,vary,lat,lon,level,levsq)
[3] calc_indttest(varx,vary)
[4] calc_weightedAve(var,lats)
[5] calc_spatialCorr(varx,vary,lats,lons,weight)
[6] calc_RMSE(varx,vary,lats,lons,weight)
[7] calc_spatialCorrHeight(varx,vary,lats,lons,weight)
[8] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq)
"""
def calcDecJan(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_dj : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_dj : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_dj = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djappendf = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_dj = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djappendf = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (ON,DJ,FM)!')
print('*Completed: Finished calcDecJan function!')
return varx_dj,vary_dj
###############################################################################
###############################################################################
###############################################################################
def calcDecJanFeb(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January-February
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_djf : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_djf : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_djf,vary_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_djf = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
djfappendh = np.append(djfappendh1,varxravel[13+i,:,:])
djfappendf = np.append(djfappendf1,varyravel[13+i,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_djf = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
djfappendh = np.append(djfappendh1,
varxravel[13+i,:,:,:])
djfappendf = np.append(djfappendf1,
varyravel[13+i,:,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (DJF)!')
print('*Completed: Finished calcDecJanFeb function!')
return varx_djf,vary_djf
###############################################################################
###############################################################################
###############################################################################
def calc_indttest(varx,vary):
"""
Function calculates statistical difference for 2 independent
sample t-test
Parameters
----------
varx : 3d array
vary : 3d array
Returns
-------
stat = calculated t-statistic
pvalue = two-tailed p-value
Usage
-----
stat,pvalue = calc_ttest(varx,vary)
"""
print('\n>>> Using calc_ttest function!')
### Import modules
import numpy as np
import scipy.stats as sts
### 2-independent sample t-test
stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')
### Significant at 95% confidence level
pvalue[np.where(pvalue >= 0.05)] = np.nan
pvalue[np.where(pvalue < 0.05)] = 1.
print('*Completed: Finished calc_ttest function!')
return stat,pvalue
###############################################################################
###############################################################################
###############################################################################
def calc_weightedAve(var,lats):
"""
Area weights sit array 5d [ens,year,month,lat,lon] into [ens,year,month]
Parameters
----------
var : 5d,4d,3d array of a gridded variable
lats : 2d array of latitudes
Returns
-------
meanvar : weighted average for 3d,2d,1d array
Usage
-----
meanvar = calc_weightedAve(var,lats)
"""
print('\n>>> Using calc_weightedAve function!')
### Import modules
import numpy as np
### Calculate weighted average for various dimensional arrays
if var.ndim == 5:
meanvar = np.empty((var.shape[0],var.shape[1],var.shape[2]))
for ens in range(var.shape[0]):
for i in range(var.shape[1]):
for j in range(var.shape[2]):
varq = var[ens,i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[ens,i,j] = np.nansum(varmask*areamask) \
/np.sum(areamask)
elif var.ndim == 4:
meanvar = np.empty((var.shape[0],var.shape[1]))
for i in range(var.shape[0]):
for j in range(var.shape[1]):
varq = var[i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i,j] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 3:
meanvar = np.empty((var.shape[0]))
for i in range(var.shape[0]):
varq = var[i,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 2:
meanvar = np.empty((var.shape[0]))
varq = var[:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar = np.nansum(varmask*areamask)/np.sum(areamask)
else:
print(ValueError('Variable has the wrong dimensions!'))
print('Completed: Weighted variable average!')
print('*Completed: Finished calc_weightedAve function!')
return meanvar
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorr(varx,vary,lats,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient
Parameters
----------
varx : 2d array
vary : 2d array
lats : 1d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorr(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorr function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_SpatialCorr function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_RMSE(varx,vary,lats,lons,weight):
"""
Calculates root mean square weighted average
Parameters
----------
varx : 2d array
vary : 2d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
rmse : 1d array
Usage
-----
rmse = calc_RMSE(varx,vary,lats,lons)
"""
print('\n>>> Using calc_RMSE function!')
### Import modules
import numpy as np
from sklearn.metrics import mean_squared_error
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
### Calculate rmse
sq_err = (varx - vary)**2
rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw))
elif weight == 'no':
### Root mean square error from sklearn (not weighted)
rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel()))
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_RMSE function!')
return rmse
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeight(varx,vary,levs,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels)
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorrHeight function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeight function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeightLev(varx,vary,levs,lons,weight,levelq):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels). Change the
weighting for different level correlations
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
levelq : string (all, tropo, strato)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons,levels)
"""
print('\n>>> Using calc_spatialCorrHeightLev function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
if levelq == 'all':
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'tropo':
gwq = np.array([1.0,1.0,1.0,1.0,0.5,0.5,0.5,0.2,0.2,0.,0.,0.,
0.,0.,0.,0.,0.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'strato':
gwq = np.array([0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.5,1.,1.,1.,1.
,1.,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeightLev function!')
return corrcoef | en | 0.224294 | Functions are useful untilities for SITperturb experiments Notes ----- Author : <NAME> Date : 13 August 2017 Usage ----- [1] calcDecJan(varx,vary,lat,lon,level,levsq) [2] calcDecJanFeb(varx,vary,lat,lon,level,levsq) [3] calc_indttest(varx,vary) [4] calc_weightedAve(var,lats) [5] calc_spatialCorr(varx,vary,lats,lons,weight) [6] calc_RMSE(varx,vary,lats,lons,weight) [7] calc_spatialCorrHeight(varx,vary,lats,lons,weight) [8] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq) Function calculates average for December-January Parameters ---------- varx : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] vary : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] lat : 1d numpy array latitudes lon : 1d numpy array longitudes level : string Height of variable (surface or profile) levsq : integer number of levels Returns ------- varx_dj : 3d array or 4d array [year,lat,lon] or [year,lev,lat,lon] vary_dj : 3d array [year,lat,lon] or [year,lev,lat,lon] Usage ----- varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq) ### Import modules ### Reshape for 3d variables ### Reshape for 4d variables ############################################################################### ############################################################################### ############################################################################### Function calculates average for December-January-February Parameters ---------- varx : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] vary : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] lat : 1d numpy array latitudes lon : 1d numpy array longitudes level : string Height of variable (surface or profile) levsq : integer number of levels Returns ------- varx_djf : 3d array or 4d array [year,lat,lon] or [year,lev,lat,lon] vary_djf : 3d array [year,lat,lon] or [year,lev,lat,lon] Usage ----- varx_djf,vary_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq) ### Import modules ### Reshape for 3d variables ### Reshape for 4d variables ############################################################################### ############################################################################### ############################################################################### Function calculates statistical difference for 2 independent sample t-test Parameters ---------- varx : 3d array vary : 3d array Returns ------- stat = calculated t-statistic pvalue = two-tailed p-value Usage ----- stat,pvalue = calc_ttest(varx,vary) ### Import modules ### 2-independent sample t-test ### Significant at 95% confidence level ############################################################################### ############################################################################### ############################################################################### Area weights sit array 5d [ens,year,month,lat,lon] into [ens,year,month] Parameters ---------- var : 5d,4d,3d array of a gridded variable lats : 2d array of latitudes Returns ------- meanvar : weighted average for 3d,2d,1d array Usage ----- meanvar = calc_weightedAve(var,lats) ### Import modules ### Calculate weighted average for various dimensional arrays ############################################################################### ############################################################################### ############################################################################### Calculates spatial correlation from pearson correlation coefficient Parameters ---------- varx : 2d array vary : 2d array lats : 1d array lons : 1d array of latitude weight : string (yes or no) Returns ------- corrcoef : 1d array of correlation coefficient (pearson r) Usage ----- corrcoef = calc_spatialCorr(varx,vary,lats,lons) ### Import modules # Computed weighted correlation coefficient ### mask ### Create 2d meshgrid for weights ### Create 2d array of weights based on latitude Weighted Mean Weighted Covariance Weighted Correlation ### Correlation coefficient from numpy function (not weighted) ############################################################################### ############################################################################### ############################################################################### Calculates root mean square weighted average Parameters ---------- varx : 2d array vary : 2d array lons : 1d array of latitude weight : string (yes or no) Returns ------- rmse : 1d array Usage ----- rmse = calc_RMSE(varx,vary,lats,lons) ### Import modules # Computed weighted correlation coefficient ### mask ### Create 2d meshgrid for weights ### Create 2d array of weights based on latitude ### Calculate rmse ### Root mean square error from sklearn (not weighted) ############################################################################### ############################################################################### ############################################################################### Calculates spatial correlation from pearson correlation coefficient for grids over vertical height (17 pressure coordinate levels) Parameters ---------- varx : 2d array vary : 2d array levs : 1d array of levels lons : 1d array of latitude weight : string (yes or no) Returns ------- corrcoef : 1d array of correlation coefficient (pearson r) Usage ----- corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons) ### Import modules # Computed weighted correlation coefficient ### Create 2d meshgrid for weights ### Create 2d array of weights based on latitude Weighted Mean Weighted Covariance Weighted Correlation ### Correlation coefficient from numpy function (not weighted) ############################################################################### ############################################################################### ############################################################################### Calculates spatial correlation from pearson correlation coefficient for grids over vertical height (17 pressure coordinate levels). Change the weighting for different level correlations Parameters ---------- varx : 2d array vary : 2d array levs : 1d array of levels lons : 1d array of latitude weight : string (yes or no) levelq : string (all, tropo, strato) Returns ------- corrcoef : 1d array of correlation coefficient (pearson r) Usage ----- corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons,levels) ### Import modules # Computed weighted correlation coefficient ### Create 2d meshgrid for weights ### Create 2d array of weights based on latitude Weighted Mean Weighted Covariance Weighted Correlation ### Correlation coefficient from numpy function (not weighted) | 3.155408 | 3 |
tests/python/unittest/test_tir_schedule_compute_inline.py | xiebaiyuan/tvm | 0 | 618 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
from tvm import tir
from tvm.script import ty
# pylint: disable=no-member,invalid-name,unused-variable
@tvm.script.tir
def elementwise(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_multi_producer_consumer(a: ty.handle, c: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0 # B has two consumers
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + 1.0
with tir.block([128, 128], "D") as [vi, vj]:
D[vi, vj] = B[vi, vj] + 2.0 + C[vi, vj] # D has two producers
@tvm.script.tir
def elementwise_multi_consumer_inlined(a: ty.handle, c: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
with tir.block([128, 128], "D") as [vi, vj]:
D[vi, vj] = A[vi, vj] * 2.0 + 2.0 + C[vi, vj]
@tvm.script.tir
def elementwise_standalone(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] + 1.0
@tvm.script.tir
def elementwise_standalone_dce(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] + 1.0
@tvm.script.tir
def elementwise_under_loop(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
B = tir.alloc_buffer((128, 128))
for i in tir.serial(0, 128):
for j in tir.serial(0, 128):
with tir.block([128, 128], "B") as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in tir.serial(0, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, j)
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@tvm.script.tir
def fail_multi_reader_writer(a: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.alloc_buffer((128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
C[vi, vj] = A[vi, vj] + 2.0
with tir.block([128, 128], "C") as [vi, vj]:
D[vi, vj] = B[vi, vj] + C[vi, vj]
@tvm.script.tir
def elementwise_multi_reverse_loads(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = (B[vi, vj] + 1.0) * (B[vi, vj] * 2.0) + 3.0
@tvm.script.tir
def elementwise_multi_reverse_loads_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
C[vi, vj] = (A[vi, vj] * 2.0 + 1.0) * (A[vi, vj] * 2.0 * 2.0) + 3.0
@tvm.script.tir
def opaque_access_load(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
tir.reads(B[0:128, 0:128])
tir.writes(C[0:128, 0:128])
C[vi, vj] = tir.load("float32", B.data, vi * 128 + vj) + 1.0
@tvm.script.tir
def opaque_access_store(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
tir.reads(B[0:128, 0:128])
tir.writes(C[0:128, 0:128])
tir.store(C.data, vi * 128 + vj, B[vi, vj] + 1.0)
C[vi, vj] = tir.load("float32", B.data, vi * 16 + vj) + 1.0
@tvm.script.tir
def buffer_matched(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
Bb = tir.match_buffer(B[vi : vi + 1, vj], (1, 1))
C[vi, vj] = Bb[0, 0] + 1.0
@tvm.script.tir
def elementwise_predicate(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
for i, j in tir.grid(128, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.where(B[i, j] < 10.0)
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_predicate_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
for i, j in tir.grid(128, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.where(A[i, j] * 2.0 < 10.0)
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@tvm.script.tir
def elementwise_multi_loads(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 126], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + B[vi, vj + 1] + B[vi, vj + 2]
@tvm.script.tir
def elementwise_multi_loads_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 126], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + A[vi, vj + 1] * 2.0 + A[vi, vj + 2] * 2.0
# pylint: enable=no-member,invalid-name,unused-variable
def test_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_multi_consumer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
block_d = sch.get_block("D")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
assert sch.get(block_d).name_hint == "D"
def test_compute_inline_fail_multi_writer():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True, error_render_level="detail")
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_reverse_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_fail_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_b)
def test_reverse_compute_inline_fail_multi_producer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_d = sch.get_block("D")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_d)
def test_reverse_compute_inline_fail_multi_reader():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mode=True)
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"])
def test_reverse_compute_fail_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_opaque_access_load():
sch = tir.Schedule(opaque_access_load, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_opaque_access_store():
sch = tir.Schedule(opaque_access_store, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_buffer_matched():
sch = tir.Schedule(buffer_matched, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_compute_inline_predicate():
sch = tir.Schedule(elementwise_predicate, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"])
def test_compute_inline_multi_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"])
if __name__ == "__main__":
test_compute_inline_elementwise()
test_compute_inline_under_loop()
test_compute_inline_as_dce()
test_compute_inline_multi_consumer()
test_compute_inline_fail_multi_writer()
test_reverse_compute_inline_elementwise()
test_reverse_compute_inline_under_loop()
test_reverse_compute_inline_fail_as_dce()
test_reverse_compute_inline_fail_multi_producer()
test_reverse_compute_inline_fail_multi_reader()
test_reverse_compute_multi_reverse_loads()
test_reverse_compute_fail_multi_reverse_loads()
test_opaque_access_load()
test_opaque_access_store()
test_buffer_matched()
test_compute_inline_predicate()
test_compute_inline_multi_loads()
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
from tvm import tir
from tvm.script import ty
# pylint: disable=no-member,invalid-name,unused-variable
@tvm.script.tir
def elementwise(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_multi_producer_consumer(a: ty.handle, c: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0 # B has two consumers
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + 1.0
with tir.block([128, 128], "D") as [vi, vj]:
D[vi, vj] = B[vi, vj] + 2.0 + C[vi, vj] # D has two producers
@tvm.script.tir
def elementwise_multi_consumer_inlined(a: ty.handle, c: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
with tir.block([128, 128], "D") as [vi, vj]:
D[vi, vj] = A[vi, vj] * 2.0 + 2.0 + C[vi, vj]
@tvm.script.tir
def elementwise_standalone(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] + 1.0
@tvm.script.tir
def elementwise_standalone_dce(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] + 1.0
@tvm.script.tir
def elementwise_under_loop(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
B = tir.alloc_buffer((128, 128))
for i in tir.serial(0, 128):
for j in tir.serial(0, 128):
with tir.block([128, 128], "B") as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in tir.serial(0, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, j)
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@tvm.script.tir
def fail_multi_reader_writer(a: ty.handle, d: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.alloc_buffer((128, 128))
D = tir.match_buffer(d, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
C[vi, vj] = A[vi, vj] + 2.0
with tir.block([128, 128], "C") as [vi, vj]:
D[vi, vj] = B[vi, vj] + C[vi, vj]
@tvm.script.tir
def elementwise_multi_reverse_loads(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = (B[vi, vj] + 1.0) * (B[vi, vj] * 2.0) + 3.0
@tvm.script.tir
def elementwise_multi_reverse_loads_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
C[vi, vj] = (A[vi, vj] * 2.0 + 1.0) * (A[vi, vj] * 2.0 * 2.0) + 3.0
@tvm.script.tir
def opaque_access_load(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
tir.reads(B[0:128, 0:128])
tir.writes(C[0:128, 0:128])
C[vi, vj] = tir.load("float32", B.data, vi * 128 + vj) + 1.0
@tvm.script.tir
def opaque_access_store(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
tir.reads(B[0:128, 0:128])
tir.writes(C[0:128, 0:128])
tir.store(C.data, vi * 128 + vj, B[vi, vj] + 1.0)
C[vi, vj] = tir.load("float32", B.data, vi * 16 + vj) + 1.0
@tvm.script.tir
def buffer_matched(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 128], "C") as [vi, vj]:
Bb = tir.match_buffer(B[vi : vi + 1, vj], (1, 1))
C[vi, vj] = Bb[0, 0] + 1.0
@tvm.script.tir
def elementwise_predicate(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
for i, j in tir.grid(128, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.where(B[i, j] < 10.0)
C[vi, vj] = B[vi, vj] + 1.0
@tvm.script.tir
def elementwise_predicate_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
for i, j in tir.grid(128, 128):
with tir.block([128, 128], "C") as [vi, vj]:
tir.where(A[i, j] * 2.0 < 10.0)
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@tvm.script.tir
def elementwise_multi_loads(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
B = tir.alloc_buffer((128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * 2.0
with tir.block([128, 126], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + B[vi, vj + 1] + B[vi, vj + 2]
@tvm.script.tir
def elementwise_multi_loads_inlined(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128))
C = tir.match_buffer(c, (128, 128))
with tir.block([128, 126], "C") as [vi, vj]:
C[vi, vj] = A[vi, vj] * 2.0 + A[vi, vj + 1] * 2.0 + A[vi, vj + 2] * 2.0
# pylint: enable=no-member,invalid-name,unused-variable
def test_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_multi_consumer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
block_d = sch.get_block("D")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
assert sch.get(block_d).name_hint == "D"
def test_compute_inline_fail_multi_writer():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True, error_render_level="detail")
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_reverse_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_fail_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_b)
def test_reverse_compute_inline_fail_multi_producer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_d = sch.get_block("D")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_d)
def test_reverse_compute_inline_fail_multi_reader():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mode=True)
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"])
def test_reverse_compute_fail_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_opaque_access_load():
sch = tir.Schedule(opaque_access_load, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_opaque_access_store():
sch = tir.Schedule(opaque_access_store, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_buffer_matched():
sch = tir.Schedule(buffer_matched, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_compute_inline_predicate():
sch = tir.Schedule(elementwise_predicate, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"])
def test_compute_inline_multi_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"])
if __name__ == "__main__":
test_compute_inline_elementwise()
test_compute_inline_under_loop()
test_compute_inline_as_dce()
test_compute_inline_multi_consumer()
test_compute_inline_fail_multi_writer()
test_reverse_compute_inline_elementwise()
test_reverse_compute_inline_under_loop()
test_reverse_compute_inline_fail_as_dce()
test_reverse_compute_inline_fail_multi_producer()
test_reverse_compute_inline_fail_multi_reader()
test_reverse_compute_multi_reverse_loads()
test_reverse_compute_fail_multi_reverse_loads()
test_opaque_access_load()
test_opaque_access_store()
test_buffer_matched()
test_compute_inline_predicate()
test_compute_inline_multi_loads()
| en | 0.834231 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring # pylint: disable=no-member,invalid-name,unused-variable # B has two consumers # D has two producers # pylint: enable=no-member,invalid-name,unused-variable | 1.82175 | 2 |
cwl_flask.py | Sage-Bionetworks/workflow-service | 1 | 619 | from flask import Flask, Response, request, redirect
import subprocess
import tempfile
import json
import yaml
import signal
import threading
import time
import copy
app = Flask(__name__)
jobs_lock = threading.Lock()
jobs = []
class Job(threading.Thread):
def __init__(self, jobid, path, inputobj):
super(Job, self).__init__()
self.jobid = jobid
self.path = path
self.inputobj = inputobj
self.updatelock = threading.Lock()
self.begin()
def begin(self):
loghandle, self.logname = tempfile.mkstemp()
with self.updatelock:
self.outdir = tempfile.mkdtemp()
self.proc = subprocess.Popen(["cwl-runner", self.path, "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=loghandle,
close_fds=True,
cwd=self.outdir)
self.status = {
"id": "%sjobs/%i" % (request.url_root, self.jobid),
"log": "%sjobs/%i/log" % (request.url_root, self.jobid),
"run": self.path,
"state": "Running",
"input": json.loads(self.inputobj),
"output": None}
def run(self):
self.stdoutdata, self.stderrdata = self.proc.communicate(self.inputobj)
if self.proc.returncode == 0:
outobj = yaml.load(self.stdoutdata, Loader=yaml.FullLoader)
with self.updatelock:
self.status["state"] = "Success"
self.status["output"] = outobj
else:
with self.updatelock:
self.status["state"] = "Failed"
def getstatus(self):
with self.updatelock:
return self.status.copy()
def cancel(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGQUIT)
with self.updatelock:
self.status["state"] = "Canceled"
def pause(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGTSTP)
with self.updatelock:
self.status["state"] = "Paused"
def resume(self):
if self.status["state"] == "Paused":
self.proc.send_signal(signal.SIGCONT)
with self.updatelock:
self.status["state"] = "Running"
@app.route("/run", methods=['POST'])
def runworkflow():
path = request.args["wf"]
with jobs_lock:
jobid = len(jobs)
job = Job(jobid, path, request.stream.read())
job.start()
jobs.append(job)
return redirect("/jobs/%i" % jobid, code=303)
@app.route("/jobs/<int:jobid>", methods=['GET', 'POST'])
def jobcontrol(jobid):
with jobs_lock:
job = jobs[jobid]
if request.method == 'POST':
action = request.args.get("action")
if action:
if action == "cancel":
job.cancel()
elif action == "pause":
job.pause()
elif action == "resume":
job.resume()
status = job.getstatus()
return json.dumps(status, indent=4), 200, ""
def logspooler(job):
with open(job.logname, "r") as f:
while True:
r = f.read(4096)
if r:
yield r
else:
with job.updatelock:
if job.status["state"] != "Running":
break
time.sleep(1)
@app.route("/jobs/<int:jobid>/log", methods=['GET'])
def getlog(jobid):
with jobs_lock:
job = jobs[jobid]
return Response(logspooler(job))
@app.route("/jobs", methods=['GET'])
def getjobs():
with jobs_lock:
jobscopy = copy.copy(jobs)
def spool(jc):
yield "["
first = True
for j in jc:
if first:
yield json.dumps(j.getstatus(), indent=4)
first = False
else:
yield ", " + json.dumps(j.getstatus(), indent=4)
yield "]"
return Response(spool(jobscopy))
if __name__ == "__main__":
# app.debug = True
app.run()
| from flask import Flask, Response, request, redirect
import subprocess
import tempfile
import json
import yaml
import signal
import threading
import time
import copy
app = Flask(__name__)
jobs_lock = threading.Lock()
jobs = []
class Job(threading.Thread):
def __init__(self, jobid, path, inputobj):
super(Job, self).__init__()
self.jobid = jobid
self.path = path
self.inputobj = inputobj
self.updatelock = threading.Lock()
self.begin()
def begin(self):
loghandle, self.logname = tempfile.mkstemp()
with self.updatelock:
self.outdir = tempfile.mkdtemp()
self.proc = subprocess.Popen(["cwl-runner", self.path, "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=loghandle,
close_fds=True,
cwd=self.outdir)
self.status = {
"id": "%sjobs/%i" % (request.url_root, self.jobid),
"log": "%sjobs/%i/log" % (request.url_root, self.jobid),
"run": self.path,
"state": "Running",
"input": json.loads(self.inputobj),
"output": None}
def run(self):
self.stdoutdata, self.stderrdata = self.proc.communicate(self.inputobj)
if self.proc.returncode == 0:
outobj = yaml.load(self.stdoutdata, Loader=yaml.FullLoader)
with self.updatelock:
self.status["state"] = "Success"
self.status["output"] = outobj
else:
with self.updatelock:
self.status["state"] = "Failed"
def getstatus(self):
with self.updatelock:
return self.status.copy()
def cancel(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGQUIT)
with self.updatelock:
self.status["state"] = "Canceled"
def pause(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGTSTP)
with self.updatelock:
self.status["state"] = "Paused"
def resume(self):
if self.status["state"] == "Paused":
self.proc.send_signal(signal.SIGCONT)
with self.updatelock:
self.status["state"] = "Running"
@app.route("/run", methods=['POST'])
def runworkflow():
path = request.args["wf"]
with jobs_lock:
jobid = len(jobs)
job = Job(jobid, path, request.stream.read())
job.start()
jobs.append(job)
return redirect("/jobs/%i" % jobid, code=303)
@app.route("/jobs/<int:jobid>", methods=['GET', 'POST'])
def jobcontrol(jobid):
with jobs_lock:
job = jobs[jobid]
if request.method == 'POST':
action = request.args.get("action")
if action:
if action == "cancel":
job.cancel()
elif action == "pause":
job.pause()
elif action == "resume":
job.resume()
status = job.getstatus()
return json.dumps(status, indent=4), 200, ""
def logspooler(job):
with open(job.logname, "r") as f:
while True:
r = f.read(4096)
if r:
yield r
else:
with job.updatelock:
if job.status["state"] != "Running":
break
time.sleep(1)
@app.route("/jobs/<int:jobid>/log", methods=['GET'])
def getlog(jobid):
with jobs_lock:
job = jobs[jobid]
return Response(logspooler(job))
@app.route("/jobs", methods=['GET'])
def getjobs():
with jobs_lock:
jobscopy = copy.copy(jobs)
def spool(jc):
yield "["
first = True
for j in jc:
if first:
yield json.dumps(j.getstatus(), indent=4)
first = False
else:
yield ", " + json.dumps(j.getstatus(), indent=4)
yield "]"
return Response(spool(jobscopy))
if __name__ == "__main__":
# app.debug = True
app.run()
| en | 0.418136 | # app.debug = True | 2.087993 | 2 |
cs15211/ReverseBits.py | JulyKikuAkita/PythonPrac | 1 | 620 | <reponame>JulyKikuAkita/PythonPrac
__source__ = 'https://leetcode.com/problems/reverse-bits/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/reverse-bits.py
# Time : O(n)
# Space: O(1)
# Bit Manipulation
#
# Description: Leetcode # 190. Reverse Bits
#
# Reverse bits of a given 32 bits unsigned integer.
#
# For example, given input 43261596 (represented in binary as 00000010100101000001111010011100),
# return 964176192 (represented in binary as 00111001011110000010100101000000).
#
# Follow up:
# If this function is called many times, how would you optimize it?
#
# Companies
# Apple Airbnb
# Related Topics
# Bit Manipulation
# Similar Questions
# Number of 1 Bits
#
import unittest
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
result = 0
for i in xrange(32):
result <<= 1
result |= n & 1
n >>= 1
return result
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().reverseBits(1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
int ret = 0;
for (int i = 0; i < 32; i++) {
if ((n & 1) != 0) {
ret |= 1; //same as // res += n & 1
}
n >>>= 1; // padding 0 on the left side
if (i < 31) { // CATCH: for last digit, don't shift!
ret <<= 1;
}
}
return ret;
}
}
We first intitialize result to 0. We then iterate from 0 to 31 (an integer has 32 bits).
In each iteration: We first shift result to the left by 1 bit.
Then, if the last digit of input n is 1, we add 1 to result.
To find the last digit of n, we just do: (n & 1)
Example, if n=5 (101), n&1 = 101 & 001 = 001 = 1; however, if n = 2 (10), n&1 = 10 & 01 = 0).
Finally, we update n by shifting it to the right by 1 (n >>= 1)
At the end of the iteration, we return result.
Example, if input n = 13 (represented in binary as
0000_0000_0000_0000_0000_0000_0000_1101, the "_" is for readability),
calling reverseBits(13) should return:
1011_0000_0000_0000_0000_0000_0000_0000
Here is how our algorithm would work for input n = 13:
Initially, result = 0 = 0000_0000_0000_0000_0000_0000_0000_0000,
n = 13 = 0000_0000_0000_0000_0000_0000_0000_1101
Starting for loop:
i = 0:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0000.
n&1 = 0000_0000_0000_0000_0000_0000_0000_1101 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0000 +
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0110.
We then go to the next iteration.
i = 1:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0010;
n&1 = 0000_0000_0000_0000_0000_0000_0000_0110 &
0000_0000_0000_0000_0000_0000_0000_0001
= 0000_0000_0000_0000_0000_0000_0000_0000 = 0;
therefore we don't increment result.
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0011.
We then go to the next iteration.
i = 2:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0100.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0011 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0100 +
0000_0000_0000_0000_0000_0000_0000_0001 =
result = 0000_0000_0000_0000_0000_0000_0000_0101
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0001.
We then go to the next iteration.
i = 3:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_1010.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0001 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
= 0000_0000_0000_0000_0000_0000_0000_1011
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0000 = 0.
Now, from here to the end of the iteration, n is 0,
so (n&1) will always be 0 and n >>=1 will not change n.
The only change will be for result <<=1, i.e. shifting result to the left by 1 digit.
Since there we have i=4 to i = 31 iterations left, this will result
in padding 28 0's to the right of result. i.e at the end,
we get result = 1011_0000_0000_0000_0000_0000_0000_0000
This is exactly what we expected to get
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
if (n == 0) return 0;
int result = 0;
for (int i = 0; i < 32; i++) {
result <<= 1;
if ((n & 1) == 1) result++;
n >>= 1;
}
return result;
}
}
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
n = ((n & 0x55555555) << 1) | ((n & 0xAAAAAAAA) >>> 1);
n = ((n & 0x33333333) << 2) | ((n & 0xCCCCCCCC) >>> 2);
n = ((n & 0x0F0F0F0F) << 4) | ((n & 0xF0F0F0F0) >>> 4);
n = ((n & 0x00FF00FF) << 8) | ((n & 0xFF00FF00) >>> 8);
return (n >>> 16) | (n << 16);
}
}
'''
| __source__ = 'https://leetcode.com/problems/reverse-bits/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/reverse-bits.py
# Time : O(n)
# Space: O(1)
# Bit Manipulation
#
# Description: Leetcode # 190. Reverse Bits
#
# Reverse bits of a given 32 bits unsigned integer.
#
# For example, given input 43261596 (represented in binary as 00000010100101000001111010011100),
# return 964176192 (represented in binary as 00111001011110000010100101000000).
#
# Follow up:
# If this function is called many times, how would you optimize it?
#
# Companies
# Apple Airbnb
# Related Topics
# Bit Manipulation
# Similar Questions
# Number of 1 Bits
#
import unittest
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
result = 0
for i in xrange(32):
result <<= 1
result |= n & 1
n >>= 1
return result
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().reverseBits(1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
int ret = 0;
for (int i = 0; i < 32; i++) {
if ((n & 1) != 0) {
ret |= 1; //same as // res += n & 1
}
n >>>= 1; // padding 0 on the left side
if (i < 31) { // CATCH: for last digit, don't shift!
ret <<= 1;
}
}
return ret;
}
}
We first intitialize result to 0. We then iterate from 0 to 31 (an integer has 32 bits).
In each iteration: We first shift result to the left by 1 bit.
Then, if the last digit of input n is 1, we add 1 to result.
To find the last digit of n, we just do: (n & 1)
Example, if n=5 (101), n&1 = 101 & 001 = 001 = 1; however, if n = 2 (10), n&1 = 10 & 01 = 0).
Finally, we update n by shifting it to the right by 1 (n >>= 1)
At the end of the iteration, we return result.
Example, if input n = 13 (represented in binary as
0000_0000_0000_0000_0000_0000_0000_1101, the "_" is for readability),
calling reverseBits(13) should return:
1011_0000_0000_0000_0000_0000_0000_0000
Here is how our algorithm would work for input n = 13:
Initially, result = 0 = 0000_0000_0000_0000_0000_0000_0000_0000,
n = 13 = 0000_0000_0000_0000_0000_0000_0000_1101
Starting for loop:
i = 0:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0000.
n&1 = 0000_0000_0000_0000_0000_0000_0000_1101 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0000 +
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0110.
We then go to the next iteration.
i = 1:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0010;
n&1 = 0000_0000_0000_0000_0000_0000_0000_0110 &
0000_0000_0000_0000_0000_0000_0000_0001
= 0000_0000_0000_0000_0000_0000_0000_0000 = 0;
therefore we don't increment result.
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0011.
We then go to the next iteration.
i = 2:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0100.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0011 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0100 +
0000_0000_0000_0000_0000_0000_0000_0001 =
result = 0000_0000_0000_0000_0000_0000_0000_0101
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0001.
We then go to the next iteration.
i = 3:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_1010.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0001 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
= 0000_0000_0000_0000_0000_0000_0000_1011
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0000 = 0.
Now, from here to the end of the iteration, n is 0,
so (n&1) will always be 0 and n >>=1 will not change n.
The only change will be for result <<=1, i.e. shifting result to the left by 1 digit.
Since there we have i=4 to i = 31 iterations left, this will result
in padding 28 0's to the right of result. i.e at the end,
we get result = 1011_0000_0000_0000_0000_0000_0000_0000
This is exactly what we expected to get
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
if (n == 0) return 0;
int result = 0;
for (int i = 0; i < 32; i++) {
result <<= 1;
if ((n & 1) == 1) result++;
n >>= 1;
}
return result;
}
}
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
n = ((n & 0x55555555) << 1) | ((n & 0xAAAAAAAA) >>> 1);
n = ((n & 0x33333333) << 2) | ((n & 0xCCCCCCCC) >>> 2);
n = ((n & 0x0F0F0F0F) << 4) | ((n & 0xF0F0F0F0) >>> 4);
n = ((n & 0x00FF00FF) << 8) | ((n & 0xFF00FF00) >>> 8);
return (n >>> 16) | (n << 16);
}
}
''' | en | 0.478955 | # https://github.com/kamyu104/LeetCode/blob/master/Python/reverse-bits.py # Time : O(n) # Space: O(1) # Bit Manipulation # # Description: Leetcode # 190. Reverse Bits # # Reverse bits of a given 32 bits unsigned integer. # # For example, given input 43261596 (represented in binary as 00000010100101000001111010011100), # return 964176192 (represented in binary as 00111001011110000010100101000000). # # Follow up: # If this function is called many times, how would you optimize it? # # Companies # Apple Airbnb # Related Topics # Bit Manipulation # Similar Questions # Number of 1 Bits # # @param n, an integer # @return an integer # Thought: # 1ms 100% class Solution { // you need treat n as an unsigned value public int reverseBits(int n) { int ret = 0; for (int i = 0; i < 32; i++) { if ((n & 1) != 0) { ret |= 1; //same as // res += n & 1 } n >>>= 1; // padding 0 on the left side if (i < 31) { // CATCH: for last digit, don't shift! ret <<= 1; } } return ret; } } We first intitialize result to 0. We then iterate from 0 to 31 (an integer has 32 bits). In each iteration: We first shift result to the left by 1 bit. Then, if the last digit of input n is 1, we add 1 to result. To find the last digit of n, we just do: (n & 1) Example, if n=5 (101), n&1 = 101 & 001 = 001 = 1; however, if n = 2 (10), n&1 = 10 & 01 = 0). Finally, we update n by shifting it to the right by 1 (n >>= 1) At the end of the iteration, we return result. Example, if input n = 13 (represented in binary as 0000_0000_0000_0000_0000_0000_0000_1101, the "_" is for readability), calling reverseBits(13) should return: 1011_0000_0000_0000_0000_0000_0000_0000 Here is how our algorithm would work for input n = 13: Initially, result = 0 = 0000_0000_0000_0000_0000_0000_0000_0000, n = 13 = 0000_0000_0000_0000_0000_0000_0000_1101 Starting for loop: i = 0: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0000. n&1 = 0000_0000_0000_0000_0000_0000_0000_1101 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 therefore result = result + 1 = 0000_0000_0000_0000_0000_0000_0000_0000 + 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 We right shift n by 1 (n >>= 1) to get: n = 0000_0000_0000_0000_0000_0000_0000_0110. We then go to the next iteration. i = 1: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0010; n&1 = 0000_0000_0000_0000_0000_0000_0000_0110 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0000 = 0; therefore we don't increment result. We right shift n by 1 (n >>= 1) to get: n = 0000_0000_0000_0000_0000_0000_0000_0011. We then go to the next iteration. i = 2: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0100. n&1 = 0000_0000_0000_0000_0000_0000_0000_0011 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 therefore result = result + 1 = 0000_0000_0000_0000_0000_0000_0000_0100 + 0000_0000_0000_0000_0000_0000_0000_0001 = result = 0000_0000_0000_0000_0000_0000_0000_0101 We right shift n by 1 to get: n = 0000_0000_0000_0000_0000_0000_0000_0001. We then go to the next iteration. i = 3: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_1010. n&1 = 0000_0000_0000_0000_0000_0000_0000_0001 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 therefore result = result + 1 = = 0000_0000_0000_0000_0000_0000_0000_1011 We right shift n by 1 to get: n = 0000_0000_0000_0000_0000_0000_0000_0000 = 0. Now, from here to the end of the iteration, n is 0, so (n&1) will always be 0 and n >>=1 will not change n. The only change will be for result <<=1, i.e. shifting result to the left by 1 digit. Since there we have i=4 to i = 31 iterations left, this will result in padding 28 0's to the right of result. i.e at the end, we get result = 1011_0000_0000_0000_0000_0000_0000_0000 This is exactly what we expected to get # 1ms 100% class Solution { // you need treat n as an unsigned value public int reverseBits(int n) { if (n == 0) return 0; int result = 0; for (int i = 0; i < 32; i++) { result <<= 1; if ((n & 1) == 1) result++; n >>= 1; } return result; } } # 1ms 100% class Solution { // you need treat n as an unsigned value public int reverseBits(int n) { n = ((n & 0x55555555) << 1) | ((n & 0xAAAAAAAA) >>> 1); n = ((n & 0x33333333) << 2) | ((n & 0xCCCCCCCC) >>> 2); n = ((n & 0x0F0F0F0F) << 4) | ((n & 0xF0F0F0F0) >>> 4); n = ((n & 0x00FF00FF) << 8) | ((n & 0xFF00FF00) >>> 8); return (n >>> 16) | (n << 16); } } | 3.725356 | 4 |
tibanna/_version.py | freezecoder/tibanna | 0 | 621 | <gh_stars>0
"""Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.16.1"
| """Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.16.1" | en | 0.861531 | Version information. # The following line *must* be the last in the module, exactly as formatted: | 1.120316 | 1 |
selective_merge_pdf.py | vs-slavchev/selective_merge_pdf | 0 | 622 | from sys import argv
from PyPDF2 import PdfFileReader, PdfFileWriter
import re
range_pattern = re.compile(r'(\d+)(\.\.|-)(\d+)')
comma_pattern = re.compile('\d+(,\d+)*')
def pages_args_to_array(pages_str):
groups = range_pattern.search(pages_str)
if groups:
start = int(groups.group(1))
end = int(groups.group(3))
return list(range(start, end + 1))
elif comma_pattern.search(pages_str):
return [int(d) for d in pages_str.split(',')]
else:
raise Exception('pages should be like 1,2,3 or 1-3, but was {}'
.format(pages_str))
if __name__ == '__main__':
assert(len(argv) > 1), "usage examle:\npython3 selective_merge_pdf.py file1.pdf 1-3 file2.pdf 3,4,10 file1.pdf 50"
assert(len(argv) % 2 == 1), "invalid arguments; supply page numbers after each pdf name"
files_names = argv[1::2]
pages_args = argv[2::2]
pdf_writer = PdfFileWriter()
for file_name, pages in zip(files_names, pages_args):
pdf_reader = PdfFileReader(file_name)
last_page_index = pdf_reader.getNumPages()
pages = pages_args_to_array(pages)
pages_to_add = list(filter(lambda i: i >= 0 and i <= last_page_index, pages))
for page in pages_to_add:
pdf_writer.addPage(pdf_reader.getPage(page - 1))
with open("merged.pdf", 'wb') as out:
pdf_writer.write(out)
| from sys import argv
from PyPDF2 import PdfFileReader, PdfFileWriter
import re
range_pattern = re.compile(r'(\d+)(\.\.|-)(\d+)')
comma_pattern = re.compile('\d+(,\d+)*')
def pages_args_to_array(pages_str):
groups = range_pattern.search(pages_str)
if groups:
start = int(groups.group(1))
end = int(groups.group(3))
return list(range(start, end + 1))
elif comma_pattern.search(pages_str):
return [int(d) for d in pages_str.split(',')]
else:
raise Exception('pages should be like 1,2,3 or 1-3, but was {}'
.format(pages_str))
if __name__ == '__main__':
assert(len(argv) > 1), "usage examle:\npython3 selective_merge_pdf.py file1.pdf 1-3 file2.pdf 3,4,10 file1.pdf 50"
assert(len(argv) % 2 == 1), "invalid arguments; supply page numbers after each pdf name"
files_names = argv[1::2]
pages_args = argv[2::2]
pdf_writer = PdfFileWriter()
for file_name, pages in zip(files_names, pages_args):
pdf_reader = PdfFileReader(file_name)
last_page_index = pdf_reader.getNumPages()
pages = pages_args_to_array(pages)
pages_to_add = list(filter(lambda i: i >= 0 and i <= last_page_index, pages))
for page in pages_to_add:
pdf_writer.addPage(pdf_reader.getPage(page - 1))
with open("merged.pdf", 'wb') as out:
pdf_writer.write(out)
| none | 1 | 3.25886 | 3 |
|
vp/scoring.py | romack77/vp-toolbox | 10 | 623 | import math
from vp import geom_tools
def horizon_error(ground_truth_horizon, detected_horizon, image_dims):
"""Calculates error in a detected horizon.
This measures the max distance between the detected horizon line and
the ground truth horizon line, within the image's x-axis, and
normalized by image height.
Args:
ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line.
detected_horizon: Tuple with (slope, intercept) for the detected horizon line.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
Float, or None if a horizon is missing altogether.
"""
if ground_truth_horizon is None or detected_horizon is None:
return None
def gt(x):
return ground_truth_horizon[0] * x + ground_truth_horizon[1]
def dt(x):
return detected_horizon[0] * x + detected_horizon[1]
width, height = image_dims
return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def vp_direction_error(ground_truth_vps, detected_vps, image_dims):
"""Measures error in direction from center of detected vanishing points.
Each detected VP is matched with its closest unclaimed ground truth VP.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
List with float degrees of error for each ground truth VP.
Error is None for missing VPs.
"""
principal_point = (image_dims[0] // 2, image_dims[1] // 2)
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
gt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], gt_vp[0], gt_vp[1]))
dt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], dt_vp[0], dt_vp[1]))
angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180)
point_pair_dists.append((angle_diff, gt_vp, dt_vp))
point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0])
gt_vp_to_error = {}
seen_dt_vps = set()
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps:
continue
gt_vp_to_error[gt_vp] = distance
seen_dt_vps.add(dt_vp)
return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps]
def location_accuracy_error(ground_truth_vps, detected_vps):
"""Measures average error in the location of detected vanishing points.
"Missed" or "extra" VPs do not count against the score.
Based on log distance of detected vp from ground truth vp.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
Returns:
Float, error.
"""
if len(ground_truth_vps) == 0 or len(detected_vps) == 0:
return 0
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
distance = geom_tools.point_to_point_dist(gt_vp, dt_vp)
point_pair_dists.append((distance, gt_vp, dt_vp))
sorted(point_pair_dists, key=lambda k: k[0])
seen_gt_vps = set()
seen_dt_vps = set()
total_error = 0
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps:
continue
seen_gt_vps.add(gt_vp)
seen_dt_vps.add(dt_vp)
if distance > 0:
total_error += math.log(distance)
return total_error / min(len(detected_vps), len(ground_truth_vps))
def num_model_detection_error(ground_truth_vps, detected_vps):
"""Measures error in the number of detected vanishing points.
Returns:
Integer, positive when there are too many VPs, negative
when there are too few.
"""
return len(detected_vps) - len(ground_truth_vps)
| import math
from vp import geom_tools
def horizon_error(ground_truth_horizon, detected_horizon, image_dims):
"""Calculates error in a detected horizon.
This measures the max distance between the detected horizon line and
the ground truth horizon line, within the image's x-axis, and
normalized by image height.
Args:
ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line.
detected_horizon: Tuple with (slope, intercept) for the detected horizon line.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
Float, or None if a horizon is missing altogether.
"""
if ground_truth_horizon is None or detected_horizon is None:
return None
def gt(x):
return ground_truth_horizon[0] * x + ground_truth_horizon[1]
def dt(x):
return detected_horizon[0] * x + detected_horizon[1]
width, height = image_dims
return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def vp_direction_error(ground_truth_vps, detected_vps, image_dims):
"""Measures error in direction from center of detected vanishing points.
Each detected VP is matched with its closest unclaimed ground truth VP.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
List with float degrees of error for each ground truth VP.
Error is None for missing VPs.
"""
principal_point = (image_dims[0] // 2, image_dims[1] // 2)
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
gt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], gt_vp[0], gt_vp[1]))
dt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], dt_vp[0], dt_vp[1]))
angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180)
point_pair_dists.append((angle_diff, gt_vp, dt_vp))
point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0])
gt_vp_to_error = {}
seen_dt_vps = set()
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps:
continue
gt_vp_to_error[gt_vp] = distance
seen_dt_vps.add(dt_vp)
return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps]
def location_accuracy_error(ground_truth_vps, detected_vps):
"""Measures average error in the location of detected vanishing points.
"Missed" or "extra" VPs do not count against the score.
Based on log distance of detected vp from ground truth vp.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
Returns:
Float, error.
"""
if len(ground_truth_vps) == 0 or len(detected_vps) == 0:
return 0
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
distance = geom_tools.point_to_point_dist(gt_vp, dt_vp)
point_pair_dists.append((distance, gt_vp, dt_vp))
sorted(point_pair_dists, key=lambda k: k[0])
seen_gt_vps = set()
seen_dt_vps = set()
total_error = 0
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps:
continue
seen_gt_vps.add(gt_vp)
seen_dt_vps.add(dt_vp)
if distance > 0:
total_error += math.log(distance)
return total_error / min(len(detected_vps), len(ground_truth_vps))
def num_model_detection_error(ground_truth_vps, detected_vps):
"""Measures error in the number of detected vanishing points.
Returns:
Integer, positive when there are too many VPs, negative
when there are too few.
"""
return len(detected_vps) - len(ground_truth_vps)
| en | 0.87305 | Calculates error in a detected horizon. This measures the max distance between the detected horizon line and the ground truth horizon line, within the image's x-axis, and normalized by image height. Args: ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line. detected_horizon: Tuple with (slope, intercept) for the detected horizon line. image_dims: Tuple of integers, (width, height) of the image, in pixels. Returns: Float, or None if a horizon is missing altogether. Measures error in direction from center of detected vanishing points. Each detected VP is matched with its closest unclaimed ground truth VP. Args: ground_truth_vps: List of ground truth VP point tuples. detected_vps: List of detected VP point tuples. image_dims: Tuple of integers, (width, height) of the image, in pixels. Returns: List with float degrees of error for each ground truth VP. Error is None for missing VPs. Measures average error in the location of detected vanishing points. "Missed" or "extra" VPs do not count against the score. Based on log distance of detected vp from ground truth vp. Args: ground_truth_vps: List of ground truth VP point tuples. detected_vps: List of detected VP point tuples. Returns: Float, error. Measures error in the number of detected vanishing points. Returns: Integer, positive when there are too many VPs, negative when there are too few. | 3.063778 | 3 |
compositional-rl-benchmark/composition/spinningup_training/train_mtl_ppo.py | collassubmission91/CompoSuite-Code | 0 | 624 | import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.core import MLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
args = parser.parse_args()
np.random.seed(args.seed)
task_list = np.random.choice(256, num_procs(), replace=False)
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
# args.exp_name = "t:" + str(args.task_id) + "_name:" + args.exp_name + "_robot:" + str(args.robot) + "_task:" + str(args.task) + "_object:" + str(args.object) + "_obstacle:" + str(args.obstacle)
args.exp_name = 'MTL_{}'.format(len(task_list))
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l, log_std_init=args.log_std_init), seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
| import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.core import MLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
args = parser.parse_args()
np.random.seed(args.seed)
task_list = np.random.choice(256, num_procs(), replace=False)
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
# args.exp_name = "t:" + str(args.task_id) + "_name:" + args.exp_name + "_robot:" + str(args.robot) + "_task:" + str(args.task) + "_object:" + str(args.object) + "_obstacle:" + str(args.obstacle)
args.exp_name = 'MTL_{}'.format(len(task_list))
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l, log_std_init=args.log_std_init), seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
| en | 0.167474 | # args.exp_name = "t:" + str(args.task_id) + "_name:" + args.exp_name + "_robot:" + str(args.robot) + "_task:" + str(args.task) + "_object:" + str(args.object) + "_obstacle:" + str(args.obstacle) | 1.884084 | 2 |
pypy/interpreter/test/test_generator.py | m4sterchain/mesapy | 381 | 625 | <filename>pypy/interpreter/test/test_generator.py
class AppTestGenerator:
def test_generator(self):
def f():
yield 1
assert f().next() == 1
def test_generator2(self):
def f():
yield 1
g = f()
assert g.next() == 1
raises(StopIteration, g.next)
def test_attributes(self):
def f():
yield 1
assert g.gi_running
g = f()
assert g.gi_code is f.__code__
assert g.__name__ == 'f'
assert g.gi_frame is not None
assert not g.gi_running
g.next()
assert not g.gi_running
raises(StopIteration, g.next)
assert not g.gi_running
assert g.gi_frame is None
assert g.gi_code is f.__code__
assert g.__name__ == 'f'
def test_generator3(self):
def f():
yield 1
g = f()
assert list(g) == [1]
def test_generator4(self):
def f():
yield 1
g = f()
assert [x for x in g] == [1]
def test_generator5(self):
d = {}
exec """if 1:
def f():
v = (yield )
yield v
g = f()
g.next()
""" in d
g = d['g']
assert g.send(42) == 42
def test_throw1(self):
def f():
yield 2
g = f()
# two arguments version
raises(NameError, g.throw, NameError, "Error")
def test_throw2(self):
def f():
yield 2
g = f()
# single argument version
raises(NameError, g.throw, NameError("Error"))
def test_throw3(self):
def f():
try:
yield 1
yield 2
except:
yield 3
g = f()
assert g.next() == 1
assert g.throw(NameError("Error")) == 3
raises(StopIteration, g.next)
def test_throw4(self):
d = {}
exec """if 1:
def f():
try:
yield 1
v = (yield 2)
except:
yield 3
g = f()
""" in d
g = d['g']
assert g.next() == 1
assert g.next() == 2
assert g.throw(NameError("Error")) == 3
raises(StopIteration, g.next)
def test_throw5(self):
def f():
try:
yield 1
except:
x = 3
try:
yield x
except:
pass
g = f()
g.next()
# String exceptions are not allowed anymore
raises(TypeError, g.throw, "Error")
assert g.throw(Exception) == 3
raises(StopIteration, g.throw, Exception)
def test_throw6(self):
def f():
yield 2
g = f()
raises(NameError, g.throw, NameError, "Error", None)
def test_throw_fail(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, NameError("Error"), "error")
def test_throw_fail2(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, list())
def test_throw_fail3(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, NameError("Error"), None, "not tb object")
def test_throw_finishes_generator(self):
def f():
yield 1
g = f()
assert g.gi_frame is not None
raises(ValueError, g.throw, ValueError)
assert g.gi_frame is None
def test_throw_bug(self):
def f():
try:
x.throw(IndexError) # => "generator already executing"
except ValueError:
yield 1
x = f()
res = list(x)
assert res == [1]
def test_throw_on_finished_generator(self):
def f():
yield 1
g = f()
res = g.next()
assert res == 1
raises(StopIteration, g.next)
raises(NameError, g.throw, NameError)
def test_close(self):
def f():
yield 1
g = f()
assert g.close() is None
def test_close2(self):
def f():
try:
yield 1
except GeneratorExit:
raise StopIteration
g = f()
g.next()
assert g.close() is None
def test_close3(self):
def f():
try:
yield 1
except GeneratorExit:
raise NameError
g = f()
g.next()
raises(NameError, g.close)
def test_close_fail(self):
def f():
try:
yield 1
except GeneratorExit:
yield 2
g = f()
g.next()
raises(RuntimeError, g.close)
def test_close_on_collect(self):
## we need to exec it, else it won't run on python2.4
d = {}
exec """
def f():
try:
yield
finally:
f.x = 42
""".strip() in d
g = d['f']()
g.next()
del g
import gc
gc.collect()
assert d['f'].x == 42
def test_generator_raises_typeerror(self):
def f():
yield 1
g = f()
raises(TypeError, g.send) # one argument required
raises(TypeError, g.send, 1) # not started, must send None
def test_generator_explicit_stopiteration(self):
def f():
yield 1
raise StopIteration
g = f()
assert [x for x in g] == [1]
def test_generator_propagate_stopiteration(self):
def f():
it = iter([1])
while 1: yield it.next()
g = f()
assert [x for x in g] == [1]
def test_generator_restart(self):
def g():
i = me.next()
yield i
me = g()
raises(ValueError, me.next)
def test_generator_expression(self):
exec "res = sum(i*i for i in range(5))"
assert res == 30
def test_generator_expression_2(self):
d = {}
exec """
def f():
total = sum(i for i in [x for x in z])
return total, x
z = [1, 2, 7]
res = f()
""" in d
assert d['res'] == (10, 7)
def test_repr(self):
def myFunc():
yield 1
g = myFunc()
r = repr(g)
assert r.startswith("<generator object myFunc at 0x")
assert list(g) == [1]
assert repr(g) == r
def test_unpackiterable_gen(self):
g = (i*i for i in range(-5, 3))
assert set(g) == set([0, 1, 4, 9, 16, 25])
assert set(g) == set()
assert set(i for i in range(0)) == set()
def test_explicit_stop_iteration_unpackiterable(self):
def f():
yield 1
raise StopIteration
assert tuple(f()) == (1,)
def test_exception_is_cleared_by_yield(self):
def f():
try:
foobar
except NameError:
yield 5
raise # should raise "no active exception to re-raise"
gen = f()
next(gen) # --> 5
try:
next(gen)
except TypeError:
pass
def test_multiple_invalid_sends(self):
def mygen():
yield 42
g = mygen()
raises(TypeError, g.send, 2)
raises(TypeError, g.send, 2)
def test_should_not_inline(space):
from pypy.interpreter.generator import should_not_inline
w_co = space.appexec([], '''():
def g(x):
yield x + 5
return g.__code__
''')
assert should_not_inline(w_co) == False
w_co = space.appexec([], '''():
def g(x):
yield x + 5
yield x + 6
return g.__code__
''')
assert should_not_inline(w_co) == True
| <filename>pypy/interpreter/test/test_generator.py
class AppTestGenerator:
def test_generator(self):
def f():
yield 1
assert f().next() == 1
def test_generator2(self):
def f():
yield 1
g = f()
assert g.next() == 1
raises(StopIteration, g.next)
def test_attributes(self):
def f():
yield 1
assert g.gi_running
g = f()
assert g.gi_code is f.__code__
assert g.__name__ == 'f'
assert g.gi_frame is not None
assert not g.gi_running
g.next()
assert not g.gi_running
raises(StopIteration, g.next)
assert not g.gi_running
assert g.gi_frame is None
assert g.gi_code is f.__code__
assert g.__name__ == 'f'
def test_generator3(self):
def f():
yield 1
g = f()
assert list(g) == [1]
def test_generator4(self):
def f():
yield 1
g = f()
assert [x for x in g] == [1]
def test_generator5(self):
d = {}
exec """if 1:
def f():
v = (yield )
yield v
g = f()
g.next()
""" in d
g = d['g']
assert g.send(42) == 42
def test_throw1(self):
def f():
yield 2
g = f()
# two arguments version
raises(NameError, g.throw, NameError, "Error")
def test_throw2(self):
def f():
yield 2
g = f()
# single argument version
raises(NameError, g.throw, NameError("Error"))
def test_throw3(self):
def f():
try:
yield 1
yield 2
except:
yield 3
g = f()
assert g.next() == 1
assert g.throw(NameError("Error")) == 3
raises(StopIteration, g.next)
def test_throw4(self):
d = {}
exec """if 1:
def f():
try:
yield 1
v = (yield 2)
except:
yield 3
g = f()
""" in d
g = d['g']
assert g.next() == 1
assert g.next() == 2
assert g.throw(NameError("Error")) == 3
raises(StopIteration, g.next)
def test_throw5(self):
def f():
try:
yield 1
except:
x = 3
try:
yield x
except:
pass
g = f()
g.next()
# String exceptions are not allowed anymore
raises(TypeError, g.throw, "Error")
assert g.throw(Exception) == 3
raises(StopIteration, g.throw, Exception)
def test_throw6(self):
def f():
yield 2
g = f()
raises(NameError, g.throw, NameError, "Error", None)
def test_throw_fail(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, NameError("Error"), "error")
def test_throw_fail2(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, list())
def test_throw_fail3(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, NameError("Error"), None, "not tb object")
def test_throw_finishes_generator(self):
def f():
yield 1
g = f()
assert g.gi_frame is not None
raises(ValueError, g.throw, ValueError)
assert g.gi_frame is None
def test_throw_bug(self):
def f():
try:
x.throw(IndexError) # => "generator already executing"
except ValueError:
yield 1
x = f()
res = list(x)
assert res == [1]
def test_throw_on_finished_generator(self):
def f():
yield 1
g = f()
res = g.next()
assert res == 1
raises(StopIteration, g.next)
raises(NameError, g.throw, NameError)
def test_close(self):
def f():
yield 1
g = f()
assert g.close() is None
def test_close2(self):
def f():
try:
yield 1
except GeneratorExit:
raise StopIteration
g = f()
g.next()
assert g.close() is None
def test_close3(self):
def f():
try:
yield 1
except GeneratorExit:
raise NameError
g = f()
g.next()
raises(NameError, g.close)
def test_close_fail(self):
def f():
try:
yield 1
except GeneratorExit:
yield 2
g = f()
g.next()
raises(RuntimeError, g.close)
def test_close_on_collect(self):
## we need to exec it, else it won't run on python2.4
d = {}
exec """
def f():
try:
yield
finally:
f.x = 42
""".strip() in d
g = d['f']()
g.next()
del g
import gc
gc.collect()
assert d['f'].x == 42
def test_generator_raises_typeerror(self):
def f():
yield 1
g = f()
raises(TypeError, g.send) # one argument required
raises(TypeError, g.send, 1) # not started, must send None
def test_generator_explicit_stopiteration(self):
def f():
yield 1
raise StopIteration
g = f()
assert [x for x in g] == [1]
def test_generator_propagate_stopiteration(self):
def f():
it = iter([1])
while 1: yield it.next()
g = f()
assert [x for x in g] == [1]
def test_generator_restart(self):
def g():
i = me.next()
yield i
me = g()
raises(ValueError, me.next)
def test_generator_expression(self):
exec "res = sum(i*i for i in range(5))"
assert res == 30
def test_generator_expression_2(self):
d = {}
exec """
def f():
total = sum(i for i in [x for x in z])
return total, x
z = [1, 2, 7]
res = f()
""" in d
assert d['res'] == (10, 7)
def test_repr(self):
def myFunc():
yield 1
g = myFunc()
r = repr(g)
assert r.startswith("<generator object myFunc at 0x")
assert list(g) == [1]
assert repr(g) == r
def test_unpackiterable_gen(self):
g = (i*i for i in range(-5, 3))
assert set(g) == set([0, 1, 4, 9, 16, 25])
assert set(g) == set()
assert set(i for i in range(0)) == set()
def test_explicit_stop_iteration_unpackiterable(self):
def f():
yield 1
raise StopIteration
assert tuple(f()) == (1,)
def test_exception_is_cleared_by_yield(self):
def f():
try:
foobar
except NameError:
yield 5
raise # should raise "no active exception to re-raise"
gen = f()
next(gen) # --> 5
try:
next(gen)
except TypeError:
pass
def test_multiple_invalid_sends(self):
def mygen():
yield 42
g = mygen()
raises(TypeError, g.send, 2)
raises(TypeError, g.send, 2)
def test_should_not_inline(space):
from pypy.interpreter.generator import should_not_inline
w_co = space.appexec([], '''():
def g(x):
yield x + 5
return g.__code__
''')
assert should_not_inline(w_co) == False
w_co = space.appexec([], '''():
def g(x):
yield x + 5
yield x + 6
return g.__code__
''')
assert should_not_inline(w_co) == True
| en | 0.79193 | if 1: def f(): v = (yield ) yield v g = f() g.next() # two arguments version # single argument version if 1: def f(): try: yield 1 v = (yield 2) except: yield 3 g = f() # String exceptions are not allowed anymore # => "generator already executing" ## we need to exec it, else it won't run on python2.4 def f(): try: yield finally: f.x = 42 # one argument required # not started, must send None def f(): total = sum(i for i in [x for x in z]) return total, x z = [1, 2, 7] res = f() # should raise "no active exception to re-raise" # --> 5 (): def g(x): yield x + 5 return g.__code__ (): def g(x): yield x + 5 yield x + 6 return g.__code__ | 2.480357 | 2 |
igvm/cli.py | innogames/igvm | 14 | 626 | """igvm - The command line interface
Copyright (c) 2017 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser, _SubParsersAction
from logging import StreamHandler, root as root_logger
import time
from fabric.network import disconnect_all
from igvm.commands import (
change_address,
disk_set,
evacuate,
host_info,
mem_set,
vcpu_set,
vm_build,
vm_delete,
vm_migrate,
vm_rename,
vm_restart,
vm_start,
vm_stop,
vm_sync, vm_define,
)
from igvm.libvirt import close_virtconns
class ColorFormatters():
BOLD = '\033[1m{}\033[0m'
WARNING = '\033[1;33m{}\033[0m'
ERROR = '\033[1;31m{}\033[0m'
CRITICAL = '\033[1;41m{}\033[0m'
class IGVMArgumentParser(ArgumentParser):
def format_help(self):
if not any(isinstance(a, _SubParsersAction) for a in self._actions):
return super(IGVMArgumentParser, self).format_help()
out = []
out.append(ColorFormatters.BOLD.format(__doc__))
out.append('Available commands:\n')
subparsers_actions = [
action for action in self._actions
if isinstance(action, _SubParsersAction)
]
# There will probably only be one subparser_action, but better safe
# than sorry.
for subparsers_action in subparsers_actions:
# Get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
out.append(ColorFormatters.BOLD.format(choice))
if subparser.get_default('func').__doc__:
out.append('\n'.join(
'\t{}'.format(l.strip()) for l in subparser
.get_default('func').__doc__.strip().splitlines()
))
out.append('\n\t{}'.format(subparser.format_usage()))
return '\n'.join(out)
class IGVMLogHandler(StreamHandler):
"""Extend StreamHandler to format messages short-cutting Formatters"""
def __init__(self, *args, **kwargs):
super(IGVMLogHandler, self).__init__(*args, **kwargs)
self.isatty = self.stream.isatty()
def format(self, record):
level = record.levelname
msg = '{}: {}: {}'.format(level, record.name, record.getMessage())
if self.isatty and level in vars(ColorFormatters):
msg = getattr(ColorFormatters, level).format(msg)
return msg
def parse_args():
top_parser = IGVMArgumentParser('igvm')
top_parser.add_argument('--silent', '-s', action='count', default=0)
top_parser.add_argument('--verbose', '-v', action='count', default=0)
subparsers = top_parser.add_subparsers(help='Actions')
subparser = subparsers.add_parser(
'build',
description=vm_build.__doc__,
)
subparser.set_defaults(func=vm_build)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--postboot',
metavar='postboot_script',
help='Run postboot_script on the guest after first boot',
)
subparser.add_argument(
'--skip-puppet',
action='store_false',
dest='run_puppet',
help='Skip running puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow building on a Host which has the state online_reserved',
)
subparser.add_argument(
'--rebuild',
dest='rebuild',
action='store_true',
help='Rebuild already defined VM or build it if not defined',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'migrate',
description=vm_migrate.__doc__,
)
subparser.set_defaults(func=vm_migrate)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'hypervisor_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--run-puppet',
action='store_true',
help='Run puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Force offline migration',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
choices=('drbd', 'netcat', 'xfs'),
help=(
'Specify drbd (default), netcat or xfs transport to migrate '
'disk image'
),
)
subparser.add_argument(
'--no-shutdown',
action='store_true',
help=(
'Don\'t shutdown VM during offline migration, igvm will wait for'
' operator to shut down VM for 24h.'
),
)
subparser.add_argument(
'--enforce-vm-env',
dest='enforce_vm_env',
action='store_true',
help='Build or migrate VM only to a HV with the same environment of VM'
)
subparser.add_argument(
'--disk-size',
dest='disk_size',
type=int,
help='Resize disk of migrated VM. Expects new size in GiB. '
'Works only with --offline --offline-transport=xfs',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'change-address',
description=disk_set.__doc__,
)
subparser.set_defaults(func=change_address)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_address',
help=(
'New IPv4 address of VM'
)
)
subparser.add_argument(
'--offline',
action='store_true',
help='Perform IP address change offline',
)
subparser.add_argument(
'--migrate',
action='store_true',
help='Migrate VM to new HV while changing IP address',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
help=(
'Specify drbd (default) or netcat transport to migrate disk image'
),
)
subparser = subparsers.add_parser(
'disk-set',
description=disk_set.__doc__,
)
subparser.set_defaults(func=disk_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New disk size with an optional unit (default GiB). '
'Can be specified relative with "+". Only integers are allowed'
)
)
subparser = subparsers.add_parser(
'mem-set',
description=mem_set.__doc__,
)
subparser.set_defaults(func=mem_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New memory size with optional unit (default is MiB).'
'Only integers are allowed.'
),
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change memory, and restart VM',
)
subparser = subparsers.add_parser(
'vcpu-set',
description=vcpu_set.__doc__,
)
subparser.set_defaults(func=vcpu_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'count',
type=int,
help='New number of CPUs',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change CPUs, and restart VM',
)
subparser = subparsers.add_parser(
'start',
description=vm_start.__doc__,
)
subparser.set_defaults(func=vm_start)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--unretire',
nargs='?',
const='maintenance',
help='Unretire a VM, set it to given state, maintenance by default',
)
subparser = subparsers.add_parser(
'stop',
description=vm_stop.__doc__,
)
subparser.set_defaults(func=vm_stop)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Retire VM after stopping it',
)
subparser = subparsers.add_parser(
'restart',
description=vm_restart.__doc__,
)
subparser.set_defaults(func=vm_restart)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--no-redefine',
action='store_true',
help='Do not redefine the domain to use latest hypervisor settings',
)
subparser = subparsers.add_parser(
'delete',
description=vm_delete.__doc__,
)
subparser.set_defaults(func=vm_delete)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Set VM state to "retired" on Serveradmin instead of deleting',
)
subparser = subparsers.add_parser(
'info',
description=host_info.__doc__,
)
subparser.set_defaults(func=host_info)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'sync',
description=vm_sync.__doc__,
)
subparser.set_defaults(func=vm_sync)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'rename',
description=vm_rename.__doc__,
)
subparser.set_defaults(func=vm_rename)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_hostname',
help='New hostname',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, if running',
)
subparser = subparsers.add_parser(
'evacuate',
description=evacuate.__doc__,
)
subparser.set_defaults(func=evacuate)
subparser.add_argument(
'hv_hostname',
help='Hostname of the hypervisor',
)
subparser.add_argument(
'dst_hv_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--dry-run',
action='store_true',
help='Do not migrate but just print what would be done'
)
subparser.add_argument(
'--offline',
nargs='*',
help='Migrate VMs matching the given serveradmin function offline',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migrating to a host which has the state online_reserved',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'define',
description=vm_define.__doc__,
)
subparser.set_defaults(func=vm_define)
subparser.add_argument('vm_hostname', help='Hostname of the guest system')
return vars(top_parser.parse_args())
def main():
args = parse_args()
configure_root_logger(args.pop('silent'), args.pop('verbose'))
try:
args.pop('func')(**args)
finally:
# Fabric requires the disconnect function to be called after every
# use. We are also taking our chance to disconnect from
# the hypervisors.
disconnect_all()
close_virtconns()
# The underlying library of Fabric, Paramiko, raises an error, on
# destruction right after the disconnect function is called. We are
# sleeping for a little while to avoid this.
time.sleep(0.1)
def configure_root_logger(silent, verbose):
root_logger.addHandler(IGVMLogHandler())
# We are summing up the silent and verbose arguments in here. It
# is not really meaningful to use them both, but giving an error is not
# better. See Python logging library documentation [1] for the levels.
# Paramiko is overly verbose. We configure it for one level higher.
#
# [1] https://docs.python.org/library/logging.html#logging-levels
level = 20 + (silent - verbose) * 10
root_logger.setLevel(level)
root_logger.getChild('paramiko').setLevel(level + 10)
| """igvm - The command line interface
Copyright (c) 2017 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser, _SubParsersAction
from logging import StreamHandler, root as root_logger
import time
from fabric.network import disconnect_all
from igvm.commands import (
change_address,
disk_set,
evacuate,
host_info,
mem_set,
vcpu_set,
vm_build,
vm_delete,
vm_migrate,
vm_rename,
vm_restart,
vm_start,
vm_stop,
vm_sync, vm_define,
)
from igvm.libvirt import close_virtconns
class ColorFormatters():
BOLD = '\033[1m{}\033[0m'
WARNING = '\033[1;33m{}\033[0m'
ERROR = '\033[1;31m{}\033[0m'
CRITICAL = '\033[1;41m{}\033[0m'
class IGVMArgumentParser(ArgumentParser):
def format_help(self):
if not any(isinstance(a, _SubParsersAction) for a in self._actions):
return super(IGVMArgumentParser, self).format_help()
out = []
out.append(ColorFormatters.BOLD.format(__doc__))
out.append('Available commands:\n')
subparsers_actions = [
action for action in self._actions
if isinstance(action, _SubParsersAction)
]
# There will probably only be one subparser_action, but better safe
# than sorry.
for subparsers_action in subparsers_actions:
# Get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
out.append(ColorFormatters.BOLD.format(choice))
if subparser.get_default('func').__doc__:
out.append('\n'.join(
'\t{}'.format(l.strip()) for l in subparser
.get_default('func').__doc__.strip().splitlines()
))
out.append('\n\t{}'.format(subparser.format_usage()))
return '\n'.join(out)
class IGVMLogHandler(StreamHandler):
"""Extend StreamHandler to format messages short-cutting Formatters"""
def __init__(self, *args, **kwargs):
super(IGVMLogHandler, self).__init__(*args, **kwargs)
self.isatty = self.stream.isatty()
def format(self, record):
level = record.levelname
msg = '{}: {}: {}'.format(level, record.name, record.getMessage())
if self.isatty and level in vars(ColorFormatters):
msg = getattr(ColorFormatters, level).format(msg)
return msg
def parse_args():
top_parser = IGVMArgumentParser('igvm')
top_parser.add_argument('--silent', '-s', action='count', default=0)
top_parser.add_argument('--verbose', '-v', action='count', default=0)
subparsers = top_parser.add_subparsers(help='Actions')
subparser = subparsers.add_parser(
'build',
description=vm_build.__doc__,
)
subparser.set_defaults(func=vm_build)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--postboot',
metavar='postboot_script',
help='Run postboot_script on the guest after first boot',
)
subparser.add_argument(
'--skip-puppet',
action='store_false',
dest='run_puppet',
help='Skip running puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow building on a Host which has the state online_reserved',
)
subparser.add_argument(
'--rebuild',
dest='rebuild',
action='store_true',
help='Rebuild already defined VM or build it if not defined',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'migrate',
description=vm_migrate.__doc__,
)
subparser.set_defaults(func=vm_migrate)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'hypervisor_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--run-puppet',
action='store_true',
help='Run puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Force offline migration',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
choices=('drbd', 'netcat', 'xfs'),
help=(
'Specify drbd (default), netcat or xfs transport to migrate '
'disk image'
),
)
subparser.add_argument(
'--no-shutdown',
action='store_true',
help=(
'Don\'t shutdown VM during offline migration, igvm will wait for'
' operator to shut down VM for 24h.'
),
)
subparser.add_argument(
'--enforce-vm-env',
dest='enforce_vm_env',
action='store_true',
help='Build or migrate VM only to a HV with the same environment of VM'
)
subparser.add_argument(
'--disk-size',
dest='disk_size',
type=int,
help='Resize disk of migrated VM. Expects new size in GiB. '
'Works only with --offline --offline-transport=xfs',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'change-address',
description=disk_set.__doc__,
)
subparser.set_defaults(func=change_address)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_address',
help=(
'New IPv4 address of VM'
)
)
subparser.add_argument(
'--offline',
action='store_true',
help='Perform IP address change offline',
)
subparser.add_argument(
'--migrate',
action='store_true',
help='Migrate VM to new HV while changing IP address',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
help=(
'Specify drbd (default) or netcat transport to migrate disk image'
),
)
subparser = subparsers.add_parser(
'disk-set',
description=disk_set.__doc__,
)
subparser.set_defaults(func=disk_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New disk size with an optional unit (default GiB). '
'Can be specified relative with "+". Only integers are allowed'
)
)
subparser = subparsers.add_parser(
'mem-set',
description=mem_set.__doc__,
)
subparser.set_defaults(func=mem_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New memory size with optional unit (default is MiB).'
'Only integers are allowed.'
),
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change memory, and restart VM',
)
subparser = subparsers.add_parser(
'vcpu-set',
description=vcpu_set.__doc__,
)
subparser.set_defaults(func=vcpu_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'count',
type=int,
help='New number of CPUs',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change CPUs, and restart VM',
)
subparser = subparsers.add_parser(
'start',
description=vm_start.__doc__,
)
subparser.set_defaults(func=vm_start)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--unretire',
nargs='?',
const='maintenance',
help='Unretire a VM, set it to given state, maintenance by default',
)
subparser = subparsers.add_parser(
'stop',
description=vm_stop.__doc__,
)
subparser.set_defaults(func=vm_stop)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Retire VM after stopping it',
)
subparser = subparsers.add_parser(
'restart',
description=vm_restart.__doc__,
)
subparser.set_defaults(func=vm_restart)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--no-redefine',
action='store_true',
help='Do not redefine the domain to use latest hypervisor settings',
)
subparser = subparsers.add_parser(
'delete',
description=vm_delete.__doc__,
)
subparser.set_defaults(func=vm_delete)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Set VM state to "retired" on Serveradmin instead of deleting',
)
subparser = subparsers.add_parser(
'info',
description=host_info.__doc__,
)
subparser.set_defaults(func=host_info)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'sync',
description=vm_sync.__doc__,
)
subparser.set_defaults(func=vm_sync)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'rename',
description=vm_rename.__doc__,
)
subparser.set_defaults(func=vm_rename)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_hostname',
help='New hostname',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, if running',
)
subparser = subparsers.add_parser(
'evacuate',
description=evacuate.__doc__,
)
subparser.set_defaults(func=evacuate)
subparser.add_argument(
'hv_hostname',
help='Hostname of the hypervisor',
)
subparser.add_argument(
'dst_hv_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--dry-run',
action='store_true',
help='Do not migrate but just print what would be done'
)
subparser.add_argument(
'--offline',
nargs='*',
help='Migrate VMs matching the given serveradmin function offline',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migrating to a host which has the state online_reserved',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'define',
description=vm_define.__doc__,
)
subparser.set_defaults(func=vm_define)
subparser.add_argument('vm_hostname', help='Hostname of the guest system')
return vars(top_parser.parse_args())
def main():
args = parse_args()
configure_root_logger(args.pop('silent'), args.pop('verbose'))
try:
args.pop('func')(**args)
finally:
# Fabric requires the disconnect function to be called after every
# use. We are also taking our chance to disconnect from
# the hypervisors.
disconnect_all()
close_virtconns()
# The underlying library of Fabric, Paramiko, raises an error, on
# destruction right after the disconnect function is called. We are
# sleeping for a little while to avoid this.
time.sleep(0.1)
def configure_root_logger(silent, verbose):
root_logger.addHandler(IGVMLogHandler())
# We are summing up the silent and verbose arguments in here. It
# is not really meaningful to use them both, but giving an error is not
# better. See Python logging library documentation [1] for the levels.
# Paramiko is overly verbose. We configure it for one level higher.
#
# [1] https://docs.python.org/library/logging.html#logging-levels
level = 20 + (silent - verbose) * 10
root_logger.setLevel(level)
root_logger.getChild('paramiko').setLevel(level + 10)
| en | 0.876876 | igvm - The command line interface Copyright (c) 2017 InnoGames GmbH # There will probably only be one subparser_action, but better safe # than sorry. # Get all subparsers and print help Extend StreamHandler to format messages short-cutting Formatters # Fabric requires the disconnect function to be called after every # use. We are also taking our chance to disconnect from # the hypervisors. # The underlying library of Fabric, Paramiko, raises an error, on # destruction right after the disconnect function is called. We are # sleeping for a little while to avoid this. # We are summing up the silent and verbose arguments in here. It # is not really meaningful to use them both, but giving an error is not # better. See Python logging library documentation [1] for the levels. # Paramiko is overly verbose. We configure it for one level higher. # # [1] https://docs.python.org/library/logging.html#logging-levels | 2.409978 | 2 |
test/test_data_processor/test_condition_generation_dataset.py | puraminy/OpenPrompt | 979 | 627 | <reponame>puraminy/OpenPrompt
import os, sys
from os.path import dirname as d
from os.path import abspath, join
root_dir = d(d(d(abspath(__file__))))
sys.path.append(root_dir)
from openprompt.data_utils.conditional_generation_dataset import PROCESSORS
base_path = os.path.join(root_dir, "datasets/CondGen")
def test_WebNLGProcessor():
dataset_name = "webnlg_2017"
dataset_path = os.path.join(base_path, dataset_name)
processor = PROCESSORS[dataset_name.lower()]()
train_dataset = processor.get_train_examples(dataset_path)
valid_dataset = processor.get_train_examples(dataset_path)
test_dataset = processor.get_test_examples(dataset_path)
assert len(train_dataset) == 18025
assert len(valid_dataset) == 18025
assert len(test_dataset) == 4928
assert test_dataset[0].text_a == " | Abilene_Regional_Airport : cityServed : Abilene,_Texas"
assert test_dataset[0].text_b == ""
assert test_dataset[0].tgt_text == "Abilene, Texas is served by the Abilene regional airport."
| import os, sys
from os.path import dirname as d
from os.path import abspath, join
root_dir = d(d(d(abspath(__file__))))
sys.path.append(root_dir)
from openprompt.data_utils.conditional_generation_dataset import PROCESSORS
base_path = os.path.join(root_dir, "datasets/CondGen")
def test_WebNLGProcessor():
dataset_name = "webnlg_2017"
dataset_path = os.path.join(base_path, dataset_name)
processor = PROCESSORS[dataset_name.lower()]()
train_dataset = processor.get_train_examples(dataset_path)
valid_dataset = processor.get_train_examples(dataset_path)
test_dataset = processor.get_test_examples(dataset_path)
assert len(train_dataset) == 18025
assert len(valid_dataset) == 18025
assert len(test_dataset) == 4928
assert test_dataset[0].text_a == " | Abilene_Regional_Airport : cityServed : Abilene,_Texas"
assert test_dataset[0].text_b == ""
assert test_dataset[0].tgt_text == "Abilene, Texas is served by the Abilene regional airport." | none | 1 | 2.305458 | 2 |
|
BaseTools/Source/Python/Common/BuildToolError.py | JayLeeCompal/EDKII_Git | 1 | 628 | ## @file
# Standardized Error Hanlding infrastructures.
#
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
FILE_OPEN_FAILURE = 1
FILE_WRITE_FAILURE = 2
FILE_PARSE_FAILURE = 3
FILE_READ_FAILURE = 4
FILE_CREATE_FAILURE = 5
FILE_CHECKSUM_FAILURE = 6
FILE_COMPRESS_FAILURE = 7
FILE_DECOMPRESS_FAILURE = 8
FILE_MOVE_FAILURE = 9
FILE_DELETE_FAILURE = 10
FILE_COPY_FAILURE = 11
FILE_POSITIONING_FAILURE = 12
FILE_ALREADY_EXIST = 13
FILE_NOT_FOUND = 14
FILE_TYPE_MISMATCH = 15
FILE_CASE_MISMATCH = 16
FILE_DUPLICATED = 17
FILE_UNKNOWN_ERROR = 0x0FFF
OPTION_UNKNOWN = 0x1000
OPTION_MISSING = 0x1001
OPTION_CONFLICT = 0x1002
OPTION_VALUE_INVALID = 0x1003
OPTION_DEPRECATED = 0x1004
OPTION_NOT_SUPPORTED = 0x1005
OPTION_UNKNOWN_ERROR = 0x1FFF
PARAMETER_INVALID = 0x2000
PARAMETER_MISSING = 0x2001
PARAMETER_UNKNOWN_ERROR =0x2FFF
FORMAT_INVALID = 0x3000
FORMAT_NOT_SUPPORTED = 0x3001
FORMAT_UNKNOWN = 0x3002
FORMAT_UNKNOWN_ERROR = 0x3FFF
RESOURCE_NOT_AVAILABLE = 0x4000
RESOURCE_ALLOCATE_FAILURE = 0x4001
RESOURCE_FULL = 0x4002
RESOURCE_OVERFLOW = 0x4003
RESOURCE_UNDERRUN = 0x4004
RESOURCE_UNKNOWN_ERROR = 0x4FFF
ATTRIBUTE_NOT_AVAILABLE = 0x5000
ATTRIBUTE_GET_FAILURE = 0x5001
ATTRIBUTE_SET_FAILURE = 0x5002
ATTRIBUTE_UPDATE_FAILURE = 0x5003
ATTRIBUTE_ACCESS_DENIED = 0x5004
ATTRIBUTE_UNKNOWN_ERROR = 0x5FFF
IO_NOT_READY = 0x6000
IO_BUSY = 0x6001
IO_TIMEOUT = 0x6002
IO_UNKNOWN_ERROR = 0x6FFF
COMMAND_FAILURE = 0x7000
PERMISSION_FAILURE = 0x8000
CODE_ERROR = 0xC0DE
AUTOGEN_ERROR = 0xF000
PARSER_ERROR = 0xF001
BUILD_ERROR = 0xF002
GENFDS_ERROR = 0xF003
ECC_ERROR = 0xF004
EOT_ERROR = 0xF005
DDC_ERROR = 0xF009
WARNING_AS_ERROR = 0xF006
MIGRATION_ERROR = 0xF010
PCD_VALIDATION_INFO_ERROR = 0xF011
PCD_VARIABLE_ATTRIBUTES_ERROR = 0xF012
PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR = 0xF013
ABORT_ERROR = 0xFFFE
UNKNOWN_ERROR = 0xFFFF
## Error message of each error code
gErrorMessage = {
FILE_NOT_FOUND : "File/directory not found in workspace",
FILE_OPEN_FAILURE : "File open failure",
FILE_WRITE_FAILURE : "File write failure",
FILE_PARSE_FAILURE : "File parse failure",
FILE_READ_FAILURE : "File read failure",
FILE_CREATE_FAILURE : "File create failure",
FILE_CHECKSUM_FAILURE : "Invalid checksum of file",
FILE_COMPRESS_FAILURE : "File compress failure",
FILE_DECOMPRESS_FAILURE : "File decompress failure",
FILE_MOVE_FAILURE : "File move failure",
FILE_DELETE_FAILURE : "File delete failure",
FILE_COPY_FAILURE : "File copy failure",
FILE_POSITIONING_FAILURE: "Failed to seeking position",
FILE_ALREADY_EXIST : "File or directory already exists",
FILE_TYPE_MISMATCH : "Incorrect file type",
FILE_CASE_MISMATCH : "File name case mismatch",
FILE_DUPLICATED : "Duplicated file found",
FILE_UNKNOWN_ERROR : "Unknown error encountered on file",
OPTION_UNKNOWN : "Unknown option",
OPTION_MISSING : "Missing option",
OPTION_CONFLICT : "Conflict options",
OPTION_VALUE_INVALID : "Invalid value of option",
OPTION_DEPRECATED : "Deprecated option",
OPTION_NOT_SUPPORTED : "Unsupported option",
OPTION_UNKNOWN_ERROR : "Unknown error when processing options",
PARAMETER_INVALID : "Invalid parameter",
PARAMETER_MISSING : "Missing parameter",
PARAMETER_UNKNOWN_ERROR : "Unknown error in parameters",
FORMAT_INVALID : "Invalid syntax/format",
FORMAT_NOT_SUPPORTED : "Not supported syntax/format",
FORMAT_UNKNOWN : "Unknown format",
FORMAT_UNKNOWN_ERROR : "Unknown error in syntax/format ",
RESOURCE_NOT_AVAILABLE : "Not available",
RESOURCE_ALLOCATE_FAILURE : "Allocate failure",
RESOURCE_FULL : "Full",
RESOURCE_OVERFLOW : "Overflow",
RESOURCE_UNDERRUN : "Underrun",
RESOURCE_UNKNOWN_ERROR : "Unknown error",
ATTRIBUTE_NOT_AVAILABLE : "Not available",
ATTRIBUTE_GET_FAILURE : "Failed to retrieve",
ATTRIBUTE_SET_FAILURE : "Failed to set",
ATTRIBUTE_UPDATE_FAILURE: "Failed to update",
ATTRIBUTE_ACCESS_DENIED : "Access denied",
ATTRIBUTE_UNKNOWN_ERROR : "Unknown error when accessing",
COMMAND_FAILURE : "Failed to execute command",
IO_NOT_READY : "Not ready",
IO_BUSY : "Busy",
IO_TIMEOUT : "Timeout",
IO_UNKNOWN_ERROR : "Unknown error in IO operation",
UNKNOWN_ERROR : "Unknown error",
}
## Exception indicating a fatal error
class FatalError(Exception):
pass
if __name__ == "__main__":
pass
| ## @file
# Standardized Error Hanlding infrastructures.
#
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
FILE_OPEN_FAILURE = 1
FILE_WRITE_FAILURE = 2
FILE_PARSE_FAILURE = 3
FILE_READ_FAILURE = 4
FILE_CREATE_FAILURE = 5
FILE_CHECKSUM_FAILURE = 6
FILE_COMPRESS_FAILURE = 7
FILE_DECOMPRESS_FAILURE = 8
FILE_MOVE_FAILURE = 9
FILE_DELETE_FAILURE = 10
FILE_COPY_FAILURE = 11
FILE_POSITIONING_FAILURE = 12
FILE_ALREADY_EXIST = 13
FILE_NOT_FOUND = 14
FILE_TYPE_MISMATCH = 15
FILE_CASE_MISMATCH = 16
FILE_DUPLICATED = 17
FILE_UNKNOWN_ERROR = 0x0FFF
OPTION_UNKNOWN = 0x1000
OPTION_MISSING = 0x1001
OPTION_CONFLICT = 0x1002
OPTION_VALUE_INVALID = 0x1003
OPTION_DEPRECATED = 0x1004
OPTION_NOT_SUPPORTED = 0x1005
OPTION_UNKNOWN_ERROR = 0x1FFF
PARAMETER_INVALID = 0x2000
PARAMETER_MISSING = 0x2001
PARAMETER_UNKNOWN_ERROR =0x2FFF
FORMAT_INVALID = 0x3000
FORMAT_NOT_SUPPORTED = 0x3001
FORMAT_UNKNOWN = 0x3002
FORMAT_UNKNOWN_ERROR = 0x3FFF
RESOURCE_NOT_AVAILABLE = 0x4000
RESOURCE_ALLOCATE_FAILURE = 0x4001
RESOURCE_FULL = 0x4002
RESOURCE_OVERFLOW = 0x4003
RESOURCE_UNDERRUN = 0x4004
RESOURCE_UNKNOWN_ERROR = 0x4FFF
ATTRIBUTE_NOT_AVAILABLE = 0x5000
ATTRIBUTE_GET_FAILURE = 0x5001
ATTRIBUTE_SET_FAILURE = 0x5002
ATTRIBUTE_UPDATE_FAILURE = 0x5003
ATTRIBUTE_ACCESS_DENIED = 0x5004
ATTRIBUTE_UNKNOWN_ERROR = 0x5FFF
IO_NOT_READY = 0x6000
IO_BUSY = 0x6001
IO_TIMEOUT = 0x6002
IO_UNKNOWN_ERROR = 0x6FFF
COMMAND_FAILURE = 0x7000
PERMISSION_FAILURE = 0x8000
CODE_ERROR = 0xC0DE
AUTOGEN_ERROR = 0xF000
PARSER_ERROR = 0xF001
BUILD_ERROR = 0xF002
GENFDS_ERROR = 0xF003
ECC_ERROR = 0xF004
EOT_ERROR = 0xF005
DDC_ERROR = 0xF009
WARNING_AS_ERROR = 0xF006
MIGRATION_ERROR = 0xF010
PCD_VALIDATION_INFO_ERROR = 0xF011
PCD_VARIABLE_ATTRIBUTES_ERROR = 0xF012
PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR = 0xF013
ABORT_ERROR = 0xFFFE
UNKNOWN_ERROR = 0xFFFF
## Error message of each error code
gErrorMessage = {
FILE_NOT_FOUND : "File/directory not found in workspace",
FILE_OPEN_FAILURE : "File open failure",
FILE_WRITE_FAILURE : "File write failure",
FILE_PARSE_FAILURE : "File parse failure",
FILE_READ_FAILURE : "File read failure",
FILE_CREATE_FAILURE : "File create failure",
FILE_CHECKSUM_FAILURE : "Invalid checksum of file",
FILE_COMPRESS_FAILURE : "File compress failure",
FILE_DECOMPRESS_FAILURE : "File decompress failure",
FILE_MOVE_FAILURE : "File move failure",
FILE_DELETE_FAILURE : "File delete failure",
FILE_COPY_FAILURE : "File copy failure",
FILE_POSITIONING_FAILURE: "Failed to seeking position",
FILE_ALREADY_EXIST : "File or directory already exists",
FILE_TYPE_MISMATCH : "Incorrect file type",
FILE_CASE_MISMATCH : "File name case mismatch",
FILE_DUPLICATED : "Duplicated file found",
FILE_UNKNOWN_ERROR : "Unknown error encountered on file",
OPTION_UNKNOWN : "Unknown option",
OPTION_MISSING : "Missing option",
OPTION_CONFLICT : "Conflict options",
OPTION_VALUE_INVALID : "Invalid value of option",
OPTION_DEPRECATED : "Deprecated option",
OPTION_NOT_SUPPORTED : "Unsupported option",
OPTION_UNKNOWN_ERROR : "Unknown error when processing options",
PARAMETER_INVALID : "Invalid parameter",
PARAMETER_MISSING : "Missing parameter",
PARAMETER_UNKNOWN_ERROR : "Unknown error in parameters",
FORMAT_INVALID : "Invalid syntax/format",
FORMAT_NOT_SUPPORTED : "Not supported syntax/format",
FORMAT_UNKNOWN : "Unknown format",
FORMAT_UNKNOWN_ERROR : "Unknown error in syntax/format ",
RESOURCE_NOT_AVAILABLE : "Not available",
RESOURCE_ALLOCATE_FAILURE : "Allocate failure",
RESOURCE_FULL : "Full",
RESOURCE_OVERFLOW : "Overflow",
RESOURCE_UNDERRUN : "Underrun",
RESOURCE_UNKNOWN_ERROR : "Unknown error",
ATTRIBUTE_NOT_AVAILABLE : "Not available",
ATTRIBUTE_GET_FAILURE : "Failed to retrieve",
ATTRIBUTE_SET_FAILURE : "Failed to set",
ATTRIBUTE_UPDATE_FAILURE: "Failed to update",
ATTRIBUTE_ACCESS_DENIED : "Access denied",
ATTRIBUTE_UNKNOWN_ERROR : "Unknown error when accessing",
COMMAND_FAILURE : "Failed to execute command",
IO_NOT_READY : "Not ready",
IO_BUSY : "Busy",
IO_TIMEOUT : "Timeout",
IO_UNKNOWN_ERROR : "Unknown error in IO operation",
UNKNOWN_ERROR : "Unknown error",
}
## Exception indicating a fatal error
class FatalError(Exception):
pass
if __name__ == "__main__":
pass
| en | 0.731907 | ## @file # Standardized Error Hanlding infrastructures. # # Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR> # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## Error message of each error code ## Exception indicating a fatal error | 1.198117 | 1 |
datasets/SUN397EncodbleDataset.py | allenai/ViRB | 26 | 629 | import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import glob
from PIL import Image
import random
class SUN397EncodableDataset(Dataset):
"""SUN397 encodable dataset class"""
def __init__(self, train=True):
super().__init__()
path = 'data/SUN397/train/*/*.jpg' if train else 'data/SUN397/test/*/*.jpg'
self.data = list(glob.glob(path))
random.shuffle(self.data)
cats = list(set([path.split("/")[3] for path in self.data]))
cats.sort()
self.labels = torch.LongTensor([cats.index(path.split("/")[3]) for path in self.data])
self.preprocessor = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if len(self.encoded_data) == 0:
return self.preprocessor(Image.open(self.data[idx]).convert('RGB')), self.labels[idx]
return self.encoded_data[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
def num_classes(self):
return int(max(self.labels) + 1)
| import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import glob
from PIL import Image
import random
class SUN397EncodableDataset(Dataset):
"""SUN397 encodable dataset class"""
def __init__(self, train=True):
super().__init__()
path = 'data/SUN397/train/*/*.jpg' if train else 'data/SUN397/test/*/*.jpg'
self.data = list(glob.glob(path))
random.shuffle(self.data)
cats = list(set([path.split("/")[3] for path in self.data]))
cats.sort()
self.labels = torch.LongTensor([cats.index(path.split("/")[3]) for path in self.data])
self.preprocessor = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if len(self.encoded_data) == 0:
return self.preprocessor(Image.open(self.data[idx]).convert('RGB')), self.labels[idx]
return self.encoded_data[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
def num_classes(self):
return int(max(self.labels) + 1)
| en | 0.335955 | SUN397 encodable dataset class | 2.532932 | 3 |
cybox/common/location.py | tirkarthi/python-cybox | 40 | 630 | <reponame>tirkarthi/python-cybox<filename>cybox/common/location.py
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields
import cybox
import cybox.bindings.cybox_common as common_binding
class LocationFactory(entities.EntityFactory):
@classmethod
def entity_class(cls, key):
return cybox.lookup_extension(key, default=Location)
class Location(entities.Entity):
_binding = common_binding
_binding_class = common_binding.LocationType
_namespace = 'http://cybox.mitre.org/common-2'
_XSI_TYPE = None # overridden by subclasses
id_ = fields.IdrefField("id")
idref = fields.IdrefField("idref")
name = fields.TypedField("Name")
def to_dict(self):
d = super(Location, self).to_dict()
if self._XSI_TYPE:
d["xsi:type"] = self._XSI_TYPE
return d
@staticmethod
def lookup_class(xsi_type):
return cybox.lookup_extension(xsi_type, default=Location)
| # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields
import cybox
import cybox.bindings.cybox_common as common_binding
class LocationFactory(entities.EntityFactory):
@classmethod
def entity_class(cls, key):
return cybox.lookup_extension(key, default=Location)
class Location(entities.Entity):
_binding = common_binding
_binding_class = common_binding.LocationType
_namespace = 'http://cybox.mitre.org/common-2'
_XSI_TYPE = None # overridden by subclasses
id_ = fields.IdrefField("id")
idref = fields.IdrefField("idref")
name = fields.TypedField("Name")
def to_dict(self):
d = super(Location, self).to_dict()
if self._XSI_TYPE:
d["xsi:type"] = self._XSI_TYPE
return d
@staticmethod
def lookup_class(xsi_type):
return cybox.lookup_extension(xsi_type, default=Location) | en | 0.871963 | # Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. # overridden by subclasses | 2.278958 | 2 |
scripts/bam-stats.py | varlociraptor/prosic-evaluation | 2 | 631 | <gh_stars>1-10
#!/usr/bin/env python
import sys
import numpy as np
import pandas as pd
import pysam
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import seaborn as sns
from functools import partial
tumor = pysam.AlignmentFile(snakemake.input[0], "rb")
normal = pysam.AlignmentFile(snakemake.input[1], "rb")
softclips = []
for i, rec in enumerate(normal):
if rec.is_supplementary or rec.is_unmapped:
continue
is_first_read = rec.pos < rec.mpos
get_clip = lambda c: c[1] if c[0] == 4 else None
clip_left = get_clip(rec.cigartuples[0])
if clip_left is not None:
softclips.append([clip_left, True, is_first_read])
clip_right = get_clip(rec.cigartuples[-1])
if clip_right is not None:
softclips.append([clip_right, False, is_first_read])
if i == 10000000:
break
softclips = pd.DataFrame(softclips, columns=["len", "left", "first_in_pair"])
def plot(*args, **kwargs):
softclips = args[0]
plt.hist(softclips, normed=True)
q95 = np.percentile(softclips, 99)
plt.plot([q95, q95], [0, 1.0], "--k")
m = max(softclips)
plt.plot([m, m], [0, 1.0], ":k")
plt.text(m, 1, "max={}".format(m), horizontalalignment="right", verticalalignment="top")
g = sns.FacetGrid(softclips, col="left", row="first_in_pair")
g = g.map(plot, "len")
plt.savefig(snakemake.output[0])
| #!/usr/bin/env python
import sys
import numpy as np
import pandas as pd
import pysam
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import seaborn as sns
from functools import partial
tumor = pysam.AlignmentFile(snakemake.input[0], "rb")
normal = pysam.AlignmentFile(snakemake.input[1], "rb")
softclips = []
for i, rec in enumerate(normal):
if rec.is_supplementary or rec.is_unmapped:
continue
is_first_read = rec.pos < rec.mpos
get_clip = lambda c: c[1] if c[0] == 4 else None
clip_left = get_clip(rec.cigartuples[0])
if clip_left is not None:
softclips.append([clip_left, True, is_first_read])
clip_right = get_clip(rec.cigartuples[-1])
if clip_right is not None:
softclips.append([clip_right, False, is_first_read])
if i == 10000000:
break
softclips = pd.DataFrame(softclips, columns=["len", "left", "first_in_pair"])
def plot(*args, **kwargs):
softclips = args[0]
plt.hist(softclips, normed=True)
q95 = np.percentile(softclips, 99)
plt.plot([q95, q95], [0, 1.0], "--k")
m = max(softclips)
plt.plot([m, m], [0, 1.0], ":k")
plt.text(m, 1, "max={}".format(m), horizontalalignment="right", verticalalignment="top")
g = sns.FacetGrid(softclips, col="left", row="first_in_pair")
g = g.map(plot, "len")
plt.savefig(snakemake.output[0]) | ru | 0.26433 | #!/usr/bin/env python | 1.982844 | 2 |
app/rss_feeder_api/migrations/0003_auto_20200813_1623.py | RSaab/rss-scraper | 0 | 632 | <reponame>RSaab/rss-scraper<gh_stars>0
# Generated by Django 3.1 on 2020-08-13 16:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('rss_feeder_api', '0002_feed_subtitle'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'ordering': ('-updated_at',), 'verbose_name_plural': 'entries'},
),
migrations.AlterModelOptions(
name='feed',
options={'ordering': ('-updated_at',), 'verbose_name': 'Feed', 'verbose_name_plural': 'Feeds'},
),
migrations.AddField(
model_name='entry',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterUniqueTogether(
name='entry',
unique_together={('guid',)},
),
]
| # Generated by Django 3.1 on 2020-08-13 16:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('rss_feeder_api', '0002_feed_subtitle'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'ordering': ('-updated_at',), 'verbose_name_plural': 'entries'},
),
migrations.AlterModelOptions(
name='feed',
options={'ordering': ('-updated_at',), 'verbose_name': 'Feed', 'verbose_name_plural': 'Feeds'},
),
migrations.AddField(
model_name='entry',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterUniqueTogether(
name='entry',
unique_together={('guid',)},
),
] | en | 0.783605 | # Generated by Django 3.1 on 2020-08-13 16:23 | 1.790674 | 2 |
AdversarialSampleGeneratorV11/AdversarialSampleGeneratorV11/ResNetConstructor.py | MetaMain/BewareAdvML | 1 | 633 | import tensorflow
from tensorflow import keras
Model = keras.models.Model
Dense = keras.layers.Dense
Activation = keras.layers.Activation
Flatten = keras.layers.Flatten
BatchNormalization= keras.layers.BatchNormalization
Conv2D = tensorflow.keras.layers.Conv2D
AveragePooling2D = keras.layers.AveragePooling2D
Input=keras.layers.Input
l2=keras.regularizers.l2
from tensorflow.keras import backend
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v2(input, complexityParameter, num_classes=10, dataset='cifar10'):
depth = complexityParameter * 9 + 2
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = input
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = tensorflow.keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
final_features = Flatten()(x)
logits = Dense(num_classes, kernel_initializer='he_normal')(final_features)
outputs = Activation('softmax')(logits)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, inputs, outputs, logits, final_features | import tensorflow
from tensorflow import keras
Model = keras.models.Model
Dense = keras.layers.Dense
Activation = keras.layers.Activation
Flatten = keras.layers.Flatten
BatchNormalization= keras.layers.BatchNormalization
Conv2D = tensorflow.keras.layers.Conv2D
AveragePooling2D = keras.layers.AveragePooling2D
Input=keras.layers.Input
l2=keras.regularizers.l2
from tensorflow.keras import backend
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v2(input, complexityParameter, num_classes=10, dataset='cifar10'):
depth = complexityParameter * 9 + 2
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = input
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = tensorflow.keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
final_features = Flatten()(x)
logits = Dense(num_classes, kernel_initializer='he_normal')(final_features)
outputs = Activation('softmax')(logits)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, inputs, outputs, logits, final_features | en | 0.70138 | 2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer # Start model definition. # Instantiate the stack of residual units # first layer and first stage # first layer but not first stage # downsample # bottleneck residual unit # linear projection residual shortcut connection to match # changed dims # Add classifier on top. # v2 has BN-ReLU before Pooling # Instantiate model. | 3.098358 | 3 |
ttl2json.py | the-norman-sicily-project/genealogical-trees | 1 | 634 | #!/usr/bin/env python3
import sys
import json
import rdflib
import rdflib.plugins.sparql as sparql
RELS_TO_DRAW = ['isWifeOf', 'isMotherOf', 'isFatherOf', 'isHusbandOf', 'isSpouseOf']
RELS_TO_INFER = ['hasGrandParent', 'isGrandParentOf', 'hasGreatGrandParent',
'isGreatGrandParentOf', 'isUncleOf', 'hasUncle',
'isGreatUncleOf', 'hasGreatUncle', 'isAuntOf', 'hasAunt',
'isGreatAuntOf', 'hasGreatAunt',
'isBrotherOf', 'isSisterOf', 'isSiblingOf',
'isFirstCousinOf', 'isSecondCousinOf', 'isThirdCousinOf']
RELS_OF_INTEREST = RELS_TO_DRAW + RELS_TO_INFER
try:
workpath = sys.argv[1]
except IndexError:
sys.exit("No path defined!")
try:
recursion_limit = int(sys.argv[2])
except IndexError:
recursion_limit = 0
if recursion_limit > 0:
sys.setrecursionlimit(recursion_limit)
g = rdflib.Graph()
g.parse(workpath, format="turtle")
fhkb_str = "http://www.example.com/genealogy.owl#"
schema_str = "https://schema.org/"
FHKB = rdflib.Namespace(fhkb_str)
SCHEMA_ORG = rdflib.Namespace(schema_str)
def dump(uriref):
if uriref.__contains__('#'):
return uriref.split('#')[-1]
return uriref.split('/')[-1]
graph = {}
graph['nodes'] = []
graph['edges'] = []
nodes = {}
q = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
}
ORDER BY ?person""")
for rel in RELS_OF_INTEREST:
pred = rdflib.URIRef("{}{}".format(fhkb_str, rel))
relation_query_results = g.query(q, initBindings={'pred': pred})
for (subj, pred, obj) in relation_query_results:
graph['edges'].append(
{
'data': {
'group': 'edges',
'id': f'{dump(subj)}-{dump(pred)}-{dump(obj)}',
'source': dump(subj),
'target': dump(obj),
'type': dump(pred)
}
})
q_details = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
FILTER NOT EXISTS {
?person ?testPred ?obj .
VALUES ?testPred {
fhkb:isWifeOf
fhkb:isMotherOf
fhkb:isFatherOf
fhkb:isHusbandOf
fhkb:isSpouseOf
fhkb:hasGrandParent
fhkb:isGrandParentOf
fhkb:hasGreatGrandParent
fhkb:isGreatGrandParentOf
fhkb:isUncleOf
fhkb:hasUncle
fhkb:isGreatUncleOf
fhkb:hasGreatUncle
fhkb:isAuntOf
fhkb:hasAunt
fhkb:isGreatAuntOf
fhkb:hasGreatAunt
fhkb:isBrotherOf
fhkb:isSisterOf
fhkb:isSiblingOf
fhkb:isFirstCousinOf
fhkb:isSecondCousinOf
fhkb:isThirdCousinOf
fhkb:hasRelation
fhkb:isPartnerIn
fhkb:isMalePartnerIn
fhkb:isFemalePartnerIn
fhkb:isBloodrelationOf
}
}
}
ORDER BY ?person"""
)
person_query_results = g.query(q_details)
for (subj, pred, obj) in person_query_results:
node = nodes.get(dump(subj), {
'data': {
'label': '',
'degree': 0,
'size': 10,
'alternateNames': [],
'honorificPrefixes': [],
'honorificSuffixes': [],
'images': [],
'id': dump(subj),
}})
if pred == FHKB.Sex:
node['data'][dump(pred)] = dump(obj)
elif pred.startswith(SCHEMA_ORG):
if dump(pred) == 'honorificSuffix':
node['data']['honorificSuffixes'].append(obj)
elif dump(pred) == 'honorificPrefix':
node['data']['honorificPrefixes'].append(obj)
elif dump(pred) == 'alternateName':
node['data']['alternateNames'].append(obj)
elif dump(pred) == 'image':
node['data']['images'].append(obj)
else:
node['data'][dump(pred)] = obj
elif pred == rdflib.RDFS.label:
node['data']['label'] = obj
else:
continue
nodes[dump(subj)] = node
graph['nodes'] = list(nodes.values())
print(json.dumps(graph, indent=0))
sys.exit(0)
| #!/usr/bin/env python3
import sys
import json
import rdflib
import rdflib.plugins.sparql as sparql
RELS_TO_DRAW = ['isWifeOf', 'isMotherOf', 'isFatherOf', 'isHusbandOf', 'isSpouseOf']
RELS_TO_INFER = ['hasGrandParent', 'isGrandParentOf', 'hasGreatGrandParent',
'isGreatGrandParentOf', 'isUncleOf', 'hasUncle',
'isGreatUncleOf', 'hasGreatUncle', 'isAuntOf', 'hasAunt',
'isGreatAuntOf', 'hasGreatAunt',
'isBrotherOf', 'isSisterOf', 'isSiblingOf',
'isFirstCousinOf', 'isSecondCousinOf', 'isThirdCousinOf']
RELS_OF_INTEREST = RELS_TO_DRAW + RELS_TO_INFER
try:
workpath = sys.argv[1]
except IndexError:
sys.exit("No path defined!")
try:
recursion_limit = int(sys.argv[2])
except IndexError:
recursion_limit = 0
if recursion_limit > 0:
sys.setrecursionlimit(recursion_limit)
g = rdflib.Graph()
g.parse(workpath, format="turtle")
fhkb_str = "http://www.example.com/genealogy.owl#"
schema_str = "https://schema.org/"
FHKB = rdflib.Namespace(fhkb_str)
SCHEMA_ORG = rdflib.Namespace(schema_str)
def dump(uriref):
if uriref.__contains__('#'):
return uriref.split('#')[-1]
return uriref.split('/')[-1]
graph = {}
graph['nodes'] = []
graph['edges'] = []
nodes = {}
q = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
}
ORDER BY ?person""")
for rel in RELS_OF_INTEREST:
pred = rdflib.URIRef("{}{}".format(fhkb_str, rel))
relation_query_results = g.query(q, initBindings={'pred': pred})
for (subj, pred, obj) in relation_query_results:
graph['edges'].append(
{
'data': {
'group': 'edges',
'id': f'{dump(subj)}-{dump(pred)}-{dump(obj)}',
'source': dump(subj),
'target': dump(obj),
'type': dump(pred)
}
})
q_details = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
FILTER NOT EXISTS {
?person ?testPred ?obj .
VALUES ?testPred {
fhkb:isWifeOf
fhkb:isMotherOf
fhkb:isFatherOf
fhkb:isHusbandOf
fhkb:isSpouseOf
fhkb:hasGrandParent
fhkb:isGrandParentOf
fhkb:hasGreatGrandParent
fhkb:isGreatGrandParentOf
fhkb:isUncleOf
fhkb:hasUncle
fhkb:isGreatUncleOf
fhkb:hasGreatUncle
fhkb:isAuntOf
fhkb:hasAunt
fhkb:isGreatAuntOf
fhkb:hasGreatAunt
fhkb:isBrotherOf
fhkb:isSisterOf
fhkb:isSiblingOf
fhkb:isFirstCousinOf
fhkb:isSecondCousinOf
fhkb:isThirdCousinOf
fhkb:hasRelation
fhkb:isPartnerIn
fhkb:isMalePartnerIn
fhkb:isFemalePartnerIn
fhkb:isBloodrelationOf
}
}
}
ORDER BY ?person"""
)
person_query_results = g.query(q_details)
for (subj, pred, obj) in person_query_results:
node = nodes.get(dump(subj), {
'data': {
'label': '',
'degree': 0,
'size': 10,
'alternateNames': [],
'honorificPrefixes': [],
'honorificSuffixes': [],
'images': [],
'id': dump(subj),
}})
if pred == FHKB.Sex:
node['data'][dump(pred)] = dump(obj)
elif pred.startswith(SCHEMA_ORG):
if dump(pred) == 'honorificSuffix':
node['data']['honorificSuffixes'].append(obj)
elif dump(pred) == 'honorificPrefix':
node['data']['honorificPrefixes'].append(obj)
elif dump(pred) == 'alternateName':
node['data']['alternateNames'].append(obj)
elif dump(pred) == 'image':
node['data']['images'].append(obj)
else:
node['data'][dump(pred)] = obj
elif pred == rdflib.RDFS.label:
node['data']['label'] = obj
else:
continue
nodes[dump(subj)] = node
graph['nodes'] = list(nodes.values())
print(json.dumps(graph, indent=0))
sys.exit(0)
| ru | 0.112824 | #!/usr/bin/env python3 #" PREFIX fhkb:<http://www.example.com/genealogy.owl#> SELECT ?person ?pred ?obj WHERE { ?person a fhkb:Person ; ?pred ?obj . } ORDER BY ?person PREFIX fhkb:<http://www.example.com/genealogy.owl#> SELECT ?person ?pred ?obj WHERE { ?person a fhkb:Person ; ?pred ?obj . FILTER NOT EXISTS { ?person ?testPred ?obj . VALUES ?testPred { fhkb:isWifeOf fhkb:isMotherOf fhkb:isFatherOf fhkb:isHusbandOf fhkb:isSpouseOf fhkb:hasGrandParent fhkb:isGrandParentOf fhkb:hasGreatGrandParent fhkb:isGreatGrandParentOf fhkb:isUncleOf fhkb:hasUncle fhkb:isGreatUncleOf fhkb:hasGreatUncle fhkb:isAuntOf fhkb:hasAunt fhkb:isGreatAuntOf fhkb:hasGreatAunt fhkb:isBrotherOf fhkb:isSisterOf fhkb:isSiblingOf fhkb:isFirstCousinOf fhkb:isSecondCousinOf fhkb:isThirdCousinOf fhkb:hasRelation fhkb:isPartnerIn fhkb:isMalePartnerIn fhkb:isFemalePartnerIn fhkb:isBloodrelationOf } } } ORDER BY ?person | 2.335846 | 2 |
tests/rest/client/test_login.py | BearerPipelineTest/synapse-1 | 0 | 635 | # Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import urllib.parse
from typing import Any, Dict, List, Optional, Union
from unittest.mock import Mock
from urllib.parse import urlencode
import pymacaroons
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
import synapse.rest.admin
from synapse.appservice import ApplicationService
from synapse.rest.client import devices, login, logout, register
from synapse.rest.client.account import WhoamiRestServlet
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
from synapse.types import create_requester
from synapse.util import Clock
from tests import unittest
from tests.handlers.test_oidc import HAS_OIDC
from tests.handlers.test_saml import has_saml2
from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG
from tests.server import FakeChannel
from tests.test_utils.html_parsers import TestHtmlParser
from tests.unittest import HomeserverTestCase, override_config, skip_unless
try:
import jwt
HAS_JWT = True
except ImportError:
HAS_JWT = False
# synapse server name: used to populate public_baseurl in some tests
SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse"
# public_baseurl for some tests. It uses an http:// scheme because
# FakeChannel.isSecure() returns False, so synapse will see the requested uri as
# http://..., so using http in the public_baseurl stops Synapse trying to redirect to
# https://....
BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,)
# CAS server used in some tests
CAS_SERVER = "https://fake.test"
# just enough to tell pysaml2 where to redirect to
SAML_SERVER = "https://test.saml.server/idp/sso"
TEST_SAML_METADATA = """
<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata">
<md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="%(SAML_SERVER)s"/>
</md:IDPSSODescriptor>
</md:EntityDescriptor>
""" % {
"SAML_SERVER": SAML_SERVER,
}
LOGIN_URL = b"/_matrix/client/r0/login"
TEST_URL = b"/_matrix/client/r0/account/whoami"
# a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is +
TEST_CLIENT_REDIRECT_URL = 'https://x?<ab c>&q"+%3D%2B"="fö%26=o"'
# the query params in TEST_CLIENT_REDIRECT_URL
EXPECTED_CLIENT_REDIRECT_URL_PARAMS = [("<ab c>", ""), ('q" =+"', '"fö&=o"')]
# (possibly experimental) login flows we expect to appear in the list after the normal
# ones
ADDITIONAL_LOGIN_FLOWS = [
{"type": "m.login.application_service"},
{"type": "uk.half-shot.msc2778.login.application_service"},
]
class LoginRestServletTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
logout.register_servlets,
devices.register_servlets,
lambda hs, http_server: WhoamiRestServlet(hs).register(http_server),
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.hs = self.setup_test_homeserver()
self.hs.config.registration.enable_registration = True
self.hs.config.registration.registrations_require_3pid = []
self.hs.config.registration.auto_join_rooms = []
self.hs.config.captcha.enable_registration_captcha = False
return self.hs
@override_config(
{
"rc_login": {
"address": {"per_second": 0.17, "burst_count": 5},
# Prevent the account login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"account": {"per_second": 10000, "burst_count": 10000},
}
}
)
def test_POST_ratelimiting_per_address(self) -> None:
# Create different users so we're sure not to be bothered by the per-user
# ratelimiter.
for i in range(0, 6):
self.register_user("kermit" + str(i), "monkey")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit" + str(i)},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"200", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit" + str(i)},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config(
{
"rc_login": {
"account": {"per_second": 0.17, "burst_count": 5},
# Prevent the address login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
}
}
)
def test_POST_ratelimiting_per_account(self) -> None:
self.register_user("kermit", "monkey")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"200", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config(
{
"rc_login": {
# Prevent the address login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
"failed_attempts": {"per_second": 0.17, "burst_count": 5},
}
}
)
def test_POST_ratelimiting_per_account_failed_attempts(self) -> None:
self.register_user("kermit", "<PASSWORD>")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"403", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"403", channel.result)
@override_config({"session_lifetime": "24h"})
def test_soft_logout(self) -> None:
self.register_user("kermit", "monkey")
# we shouldn't be able to make requests without an access token
channel = self.make_request(b"GET", TEST_URL)
self.assertEqual(channel.result["code"], b"401", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN")
# log in as normal
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.code, 200, channel.result)
access_token = channel.json_body["access_token"]
device_id = channel.json_body["device_id"]
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
#
# test behaviour after deleting the expired device
#
# we now log in as a different device
access_token_2 = self.login("kermit", "monkey")
# more requests with the expired token should still return a soft-logout
self.reactor.advance(3600)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# ... but if we delete that device, it will be a proper logout
self._delete_device(access_token_2, "kermit", "monkey", device_id)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], False)
def _delete_device(
self, access_token: str, user_id: str, password: str, device_id: str
) -> None:
"""Perform the UI-Auth to delete a device"""
channel = self.make_request(
b"DELETE", "devices/" + device_id, access_token=access_token
)
self.assertEqual(channel.code, 401, channel.result)
# check it's a UI-Auth fail
self.assertEqual(
set(channel.json_body.keys()),
{"flows", "params", "session"},
channel.result,
)
auth = {
"type": "m.login.password",
# https://github.com/matrix-org/synapse/issues/5665
# "identifier": {"type": "m.id.user", "user": user_id},
"user": user_id,
"password": password,
"session": channel.json_body["session"],
}
channel = self.make_request(
b"DELETE",
"devices/" + device_id,
access_token=access_token,
content={"auth": auth},
)
self.assertEqual(channel.code, 200, channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None:
self.register_user("kermit", "monkey")
# log in as normal
access_token = self.login("kermit", "monkey")
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard logout this session
channel = self.make_request(b"POST", "/logout", access_token=access_token)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out(
self,
) -> None:
self.register_user("kermit", "monkey")
# log in as normal
access_token = self.login("kermit", "monkey")
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard log out all of the user's sessions
channel = self.make_request(b"POST", "/logout/all", access_token=access_token)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_with_overly_long_device_id_fails(self) -> None:
self.register_user("mickey", "<PASSWORD>")
# create a device_id longer than 512 characters
device_id = "yolo" * 512
body = {
"type": "m.login.password",
"user": "mickey",
"password": "<PASSWORD>",
"device_id": device_id,
}
# make a login request with the bad device_id
channel = self.make_request(
"POST",
"/_matrix/client/v3/login",
json.dumps(body).encode("utf8"),
custom_headers=None,
)
# test that the login fails with the correct error code
self.assertEqual(channel.code, 400)
self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC")
class MultiSSOTestCase(unittest.HomeserverTestCase):
"""Tests for homeservers with multiple SSO providers enabled"""
servlets = [
login.register_servlets,
]
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["public_baseurl"] = BASE_URL
config["cas_config"] = {
"enabled": True,
"server_url": CAS_SERVER,
"service_url": "https://matrix.goodserver.com:8448",
}
config["saml2_config"] = {
"sp_config": {
"metadata": {"inline": [TEST_SAML_METADATA]},
# use the XMLSecurity backend to avoid relying on xmlsec1
"crypto_backend": "XMLSecurity",
},
}
# default OIDC provider
config["oidc_config"] = TEST_OIDC_CONFIG
# additional OIDC providers
config["oidc_providers"] = [
{
"idp_id": "idp1",
"idp_name": "IDP1",
"discover": False,
"issuer": "https://issuer1",
"client_id": "test-client-id",
"client_secret": "test-client-secret",
"scopes": ["profile"],
"authorization_endpoint": "https://issuer1/auth",
"token_endpoint": "https://issuer1/token",
"userinfo_endpoint": "https://issuer1/userinfo",
"user_mapping_provider": {
"config": {"localpart_template": "{{ user.sub }}"}
},
}
]
return config
def create_resource_dict(self) -> Dict[str, Resource]:
d = super().create_resource_dict()
d.update(build_synapse_client_resource_tree(self.hs))
return d
def test_get_login_flows(self) -> None:
"""GET /login should return password and SSO flows"""
channel = self.make_request("GET", "/_matrix/client/r0/login")
self.assertEqual(channel.code, 200, channel.result)
expected_flow_types = [
"m.login.cas",
"m.login.sso",
"m.login.token",
"m.login.password",
] + [f["type"] for f in ADDITIONAL_LOGIN_FLOWS]
self.assertCountEqual(
[f["type"] for f in channel.json_body["flows"]], expected_flow_types
)
flows = {flow["type"]: flow for flow in channel.json_body["flows"]}
self.assertCountEqual(
flows["m.login.sso"]["identity_providers"],
[
{"id": "cas", "name": "CAS"},
{"id": "saml", "name": "SAML"},
{"id": "oidc-idp1", "name": "IDP1"},
{"id": "oidc", "name": "OIDC"},
],
)
def test_multi_sso_redirect(self) -> None:
"""/login/sso/redirect should redirect to an identity picker"""
# first hit the redirect url, which should redirect to our idp picker
channel = self._make_sso_redirect_request(None)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
uri = location_headers[0]
# hitting that picker should give us some HTML
channel = self.make_request("GET", uri)
self.assertEqual(channel.code, 200, channel.result)
# parse the form to check it has fields assumed elsewhere in this class
html = channel.result["body"].decode("utf-8")
p = TestHtmlParser()
p.feed(html)
p.close()
# there should be a link for each href
returned_idps: List[str] = []
for link in p.links:
path, query = link.split("?", 1)
self.assertEqual(path, "pick_idp")
params = urllib.parse.parse_qs(query)
self.assertEqual(params["redirectUrl"], [TEST_CLIENT_REDIRECT_URL])
returned_idps.append(params["idp"][0])
self.assertCountEqual(returned_idps, ["cas", "oidc", "oidc-idp1", "saml"])
def test_multi_sso_redirect_to_cas(self) -> None:
"""If CAS is chosen, should redirect to the CAS server"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=cas",
shorthand=False,
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
cas_uri = location_headers[0]
cas_uri_path, cas_uri_query = cas_uri.split("?", 1)
# it should redirect us to the login page of the cas server
self.assertEqual(cas_uri_path, CAS_SERVER + "/login")
# check that the redirectUrl is correctly encoded in the service param - ie, the
# place that CAS will redirect to
cas_uri_params = urllib.parse.parse_qs(cas_uri_query)
service_uri = cas_uri_params["service"][0]
_, service_uri_query = service_uri.split("?", 1)
service_uri_params = urllib.parse.parse_qs(service_uri_query)
self.assertEqual(service_uri_params["redirectUrl"][0], TEST_CLIENT_REDIRECT_URL)
def test_multi_sso_redirect_to_saml(self) -> None:
"""If SAML is chosen, should redirect to the SAML server"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=saml",
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
saml_uri = location_headers[0]
saml_uri_path, saml_uri_query = saml_uri.split("?", 1)
# it should redirect us to the login page of the SAML server
self.assertEqual(saml_uri_path, SAML_SERVER)
# the RelayState is used to carry the client redirect url
saml_uri_params = urllib.parse.parse_qs(saml_uri_query)
relay_state_param = saml_uri_params["RelayState"][0]
self.assertEqual(relay_state_param, TEST_CLIENT_REDIRECT_URL)
def test_login_via_oidc(self) -> None:
"""If OIDC is chosen, should redirect to the OIDC auth endpoint"""
# pick the default OIDC provider
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=oidc",
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
# ... and should have set a cookie including the redirect url
cookie_headers = channel.headers.getRawHeaders("Set-Cookie")
assert cookie_headers
cookies: Dict[str, str] = {}
for h in cookie_headers:
key, value = h.split(";")[0].split("=", maxsplit=1)
cookies[key] = value
oidc_session_cookie = cookies["oidc_session"]
macaroon = pymacaroons.Macaroon.deserialize(oidc_session_cookie)
self.assertEqual(
self._get_value_from_macaroon(macaroon, "client_redirect_url"),
TEST_CLIENT_REDIRECT_URL,
)
channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"})
# that should serve a confirmation page
self.assertEqual(channel.code, 200, channel.result)
content_type_headers = channel.headers.getRawHeaders("Content-Type")
assert content_type_headers
self.assertTrue(content_type_headers[-1].startswith("text/html"))
p = TestHtmlParser()
p.feed(channel.text_body)
p.close()
# ... which should contain our redirect link
self.assertEqual(len(p.links), 1)
path, query = p.links[0].split("?", 1)
self.assertEqual(path, "https://x")
# it will have url-encoded the params properly, so we'll have to parse them
params = urllib.parse.parse_qsl(
query, keep_blank_values=True, strict_parsing=True, errors="strict"
)
self.assertEqual(params[0:2], EXPECTED_CLIENT_REDIRECT_URL_PARAMS)
self.assertEqual(params[2][0], "loginToken")
# finally, submit the matrix login token to the login API, which gives us our
# matrix access token, mxid, and device id.
login_token = params[2][1]
chan = self.make_request(
"POST",
"/login",
content={"type": "m.login.token", "token": login_token},
)
self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@user1:test")
def test_multi_sso_redirect_to_unknown(self) -> None:
"""An unknown IdP should cause a 400"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz",
)
self.assertEqual(channel.code, 400, channel.result)
def test_client_idp_redirect_to_unknown(self) -> None:
"""If the client tries to pick an unknown IdP, return a 404"""
channel = self._make_sso_redirect_request("xxx")
self.assertEqual(channel.code, 404, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND")
def test_client_idp_redirect_to_oidc(self) -> None:
"""If the client pick a known IdP, redirect to it"""
channel = self._make_sso_redirect_request("oidc")
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
def _make_sso_redirect_request(self, idp_prov: Optional[str] = None) -> FakeChannel:
"""Send a request to /_matrix/client/r0/login/sso/redirect
... possibly specifying an IDP provider
"""
endpoint = "/_matrix/client/r0/login/sso/redirect"
if idp_prov is not None:
endpoint += "/" + idp_prov
endpoint += "?redirectUrl=" + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
return self.make_request(
"GET",
endpoint,
custom_headers=[("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME)],
)
@staticmethod
def _get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str:
prefix = key + " = "
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(prefix):
return caveat.caveat_id[len(prefix) :]
raise ValueError("No %s caveat in macaroon" % (key,))
class CASTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.base_url = "https://matrix.goodserver.com/"
self.redirect_path = "_synapse/client/login/sso/redirect/confirm"
config = self.default_config()
config["public_baseurl"] = (
config.get("public_baseurl") or "https://matrix.goodserver.com:8448"
)
config["cas_config"] = {
"enabled": True,
"server_url": CAS_SERVER,
}
cas_user_id = "username"
self.user_id = "@%s:test" % cas_user_id
async def get_raw(uri: str, args: Any) -> bytes:
"""Return an example response payload from a call to the `/proxyValidate`
endpoint of a CAS server, copied from
https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-V2-Specification.html#26-proxyvalidate-cas-20
This needs to be returned by an async function (as opposed to set as the
mock's return value) because the corresponding Synapse code awaits on it.
"""
return (
"""
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>%s</cas:user>
<cas:proxyGrantingTicket>PGTIOU-84678-8a9d...</cas:proxyGrantingTicket>
<cas:proxies>
<cas:proxy>https://proxy2/pgtUrl</cas:proxy>
<cas:proxy>https://proxy1/pgtUrl</cas:proxy>
</cas:proxies>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
% cas_user_id
).encode("utf-8")
mocked_http_client = Mock(spec=["get_raw"])
mocked_http_client.get_raw.side_effect = get_raw
self.hs = self.setup_test_homeserver(
config=config,
proxied_http_client=mocked_http_client,
)
return self.hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.deactivate_account_handler = hs.get_deactivate_account_handler()
def test_cas_redirect_confirm(self) -> None:
"""Tests that the SSO login flow serves a confirmation page before redirecting a
user to the redirect URL.
"""
base_url = "/_matrix/client/r0/login/cas/ticket?redirectUrl"
redirect_url = "https://dodgy-site.com/"
url_parts = list(urllib.parse.urlparse(base_url))
query = dict(urllib.parse.parse_qsl(url_parts[4]))
query.update({"redirectUrl": redirect_url})
query.update({"ticket": "ticket"})
url_parts[4] = urllib.parse.urlencode(query)
cas_ticket_url = urllib.parse.urlunparse(url_parts)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
# Test that the response is HTML.
self.assertEqual(channel.code, 200, channel.result)
content_type_header_value = ""
for header in channel.result.get("headers", []):
if header[0] == b"Content-Type":
content_type_header_value = header[1].decode("utf8")
self.assertTrue(content_type_header_value.startswith("text/html"))
# Test that the body isn't empty.
self.assertTrue(len(channel.result["body"]) > 0)
# And that it contains our redirect link
self.assertIn(redirect_url, channel.result["body"].decode("UTF-8"))
@override_config(
{
"sso": {
"client_whitelist": [
"https://legit-site.com/",
"https://other-site.com/",
]
}
}
)
def test_cas_redirect_whitelisted(self) -> None:
"""Tests that the SSO login flow serves a redirect to a whitelisted url"""
self._test_redirect("https://legit-site.com/")
@override_config({"public_baseurl": "https://example.com"})
def test_cas_redirect_login_fallback(self) -> None:
self._test_redirect("https://example.com/_matrix/static/client/login")
def _test_redirect(self, redirect_url: str) -> None:
"""Tests that the SSO login flow serves a redirect for the given redirect URL."""
cas_ticket_url = (
"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket"
% (urllib.parse.quote(redirect_url))
)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
self.assertEqual(channel.code, 302)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
self.assertEqual(location_headers[0][: len(redirect_url)], redirect_url)
@override_config({"sso": {"client_whitelist": ["https://legit-site.com/"]}})
def test_deactivated_user(self) -> None:
"""Logging in as a deactivated account should error."""
redirect_url = "https://legit-site.com/"
# First login (to create the user).
self._test_redirect(redirect_url)
# Deactivate the account.
self.get_success(
self.deactivate_account_handler.deactivate_account(
self.user_id, False, create_requester(self.user_id)
)
)
# Request the CAS ticket.
cas_ticket_url = (
"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket"
% (urllib.parse.quote(redirect_url))
)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
# Because the user is deactivated they are served an error template.
self.assertEqual(channel.code, 403)
self.assertIn(b"SSO account deactivated", channel.result["body"])
@skip_unless(HAS_JWT, "requires jwt")
class JWTTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
]
jwt_secret = "secret"
jwt_algorithm = "HS256"
base_config = {
"enabled": True,
"secret": jwt_secret,
"algorithm": jwt_algorithm,
}
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
# If jwt_config has been defined (eg via @override_config), don't replace it.
if config.get("jwt_config") is None:
config["jwt_config"] = self.base_config
return config
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
result: Union[str, bytes] = jwt.encode(payload, secret, self.jwt_algorithm)
if isinstance(result, bytes):
return result.decode("ascii")
return result
def jwt_login(self, *args: Any) -> FakeChannel:
params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)}
channel = self.make_request(b"POST", LOGIN_URL, params)
return channel
def test_login_jwt_valid_registered(self) -> None:
self.register_user("kermit", "monkey")
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_valid_unregistered(self) -> None:
channel = self.jwt_login({"sub": "frog"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, "notsecret")
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: Signature verification failed",
)
def test_login_jwt_expired(self) -> None:
channel = self.jwt_login({"sub": "frog", "exp": 864000})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Signature has expired"
)
def test_login_jwt_not_before(self) -> None:
now = int(time.time())
channel = self.jwt_login({"sub": "frog", "nbf": now + 3600})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: The token is not yet valid (nbf)",
)
def test_login_no_sub(self) -> None:
channel = self.jwt_login({"username": "root"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Invalid JWT")
@override_config({"jwt_config": {**base_config, "issuer": "test-issuer"}})
def test_login_iss(self) -> None:
"""Test validating the issuer claim."""
# A valid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "test-issuer"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid issuer"
)
# Not providing an issuer.
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
'JWT validation failed: Token is missing the "iss" claim',
)
def test_login_iss_no_config(self) -> None:
"""Test providing an issuer claim without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "audiences": ["test-audience"]}})
def test_login_aud(self) -> None:
"""Test validating the audience claim."""
# A valid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "test-audience"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid audience"
)
# Not providing an audience.
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
'JWT validation failed: Token is missing the "aud" claim',
)
def test_login_aud_no_config(self) -> None:
"""Test providing an audience without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid audience"
)
def test_login_default_sub(self) -> None:
"""Test reading user ID from the default subject claim."""
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "subject_claim": "username"}})
def test_login_custom_sub(self) -> None:
"""Test reading user ID from a custom subject claim."""
channel = self.jwt_login({"username": "frog"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_no_token(self) -> None:
params = {"type": "org.matrix.login.jwt"}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Token field for JWT is missing")
# The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use
# RSS256, with a public key configured in synapse as "jwt_secret", and tokens
# signed by the private key.
@skip_unless(HAS_JWT, "requires jwt")
class JWTPubKeyTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
]
# This key's pubkey is used as the jwt_secret setting of synapse. Valid
# tokens are signed by this and validated using the pubkey. It is generated
# with `openssl genrsa 512` (not a secure way to generate real keys, but
# good enough for tests!)
jwt_privatekey = "\n".join(
[
"-----BEGIN RSA PRIVATE KEY-----",
"<KEY>",
"<KEY>",
"<KEY>KVaZ/gTOM9+9MwlmhidrUOweKfB/",
"kQIhAPZwHazbjo7dYlJs7wPQz1vd+aHSEH+3uQKIysebkmm3AiEA1nc6mDdmgiUq",
"TpIN8A4MBKmfZMWTLq6z05y/qjKyxb0CIQDYJxCwTEenIaEa4PdoJl+qmXFasVDN",
"ZU0+XtNV7yul0wIhAMI9IhiStIjS2EppBa6RSlk+t1oxh2gUWlIh+YVQfZGRAiEA",
"tqBR7qLZGJ5CVKxWmNhJZGt1QHoUtOch8t9C4IdOZ2g=",
"-----END RSA PRIVATE KEY-----",
]
)
# Generated with `openssl rsa -in foo.key -pubout`, with the the above
# private key placed in foo.key (jwt_privatekey).
jwt_pubkey = "\n".join(
[
"-----BEGIN PUBLIC KEY-----",
"<KEY>",
"<KEY>
"-----END PUBLIC KEY-----",
]
)
# This key is used to sign tokens that shouldn't be accepted by synapse.
# Generated just like jwt_privatekey.
bad_privatekey = "\n".join(
[
"-----BEGIN RSA PRIVATE KEY-----",
"<KEY>",
"gLjmQD3jBUTz+/FndLSBvr3F4OHtGL9O/osCAwEAAQJAJqH0jZJW7Smzo9ShP02L",
"R6HRZcLExZuUrWI+5ZSP7TaZ1uwJzGFspDrunqaVoPobndw/8VsP8HFyKtceC7vY",
"uQIhAPdYInDDSJ8rFKGiy3Ajv5KWISBicjevWHF9dbotmNO9AiEAxrdRJVU+EI9I",
"eB4qRZpY6n4pnwyP0p8f/A3NBaQPG+cCIFlj08aW/PbxNdqYoBdeBA0xDrXKfmbb",
"iwYxBkwL0JCtAiBYmsi94sJn09u2Y4zpuCbJeDPKzWkbuwQh+W1fhIWQJQIhAKR0",
"KydN6cRLvphNQ9c/vBTdlzWxzcSxREpguC7F1J1m",
"-----END RSA PRIVATE KEY-----",
]
)
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["jwt_config"] = {
"enabled": True,
"secret": self.jwt_pubkey,
"algorithm": "RS256",
}
return config
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
result: Union[bytes, str] = jwt.encode(payload, secret, "RS256")
if isinstance(result, bytes):
return result.decode("ascii")
return result
def jwt_login(self, *args: Any) -> FakeChannel:
params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)}
channel = self.make_request(b"POST", LOGIN_URL, params)
return channel
def test_login_jwt_valid(self) -> None:
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, self.bad_privatekey)
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: Signature verification failed",
)
AS_USER = "as_user_alice"
class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
register.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.hs = self.setup_test_homeserver()
self.service = ApplicationService(
id="unique_identifier",
token="some_token",
hostname="example.com",
sender="@asbot:example.com",
namespaces={
ApplicationService.NS_USERS: [
{"regex": r"@as_user.*", "exclusive": False}
],
ApplicationService.NS_ROOMS: [],
ApplicationService.NS_ALIASES: [],
},
)
self.another_service = ApplicationService(
id="another__identifier",
token="another_token",
hostname="example.com",
sender="@as2bot:example.com",
namespaces={
ApplicationService.NS_USERS: [
{"regex": r"@as2_user.*", "exclusive": False}
],
ApplicationService.NS_ROOMS: [],
ApplicationService.NS_ALIASES: [],
},
)
self.hs.get_datastores().main.services_cache.append(self.service)
self.hs.get_datastores().main.services_cache.append(self.another_service)
return self.hs
def test_login_appservice_user(self) -> None:
"""Test that an appservice user can use /login"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_appservice_user_bot(self) -> None:
"""Test that the appservice bot can use /login"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": self.service.sender},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_appservice_wrong_user(self) -> None:
"""Test that non-as users cannot login with the as token"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": "fibble_wibble"},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"403", channel.result)
def test_login_appservice_wrong_as(self) -> None:
"""Test that as users cannot login with wrong as token"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.another_service.token
)
self.assertEqual(channel.result["code"], b"403", channel.result)
def test_login_appservice_no_token(self) -> None:
"""Test that users must provide a token when using the appservice
login method
"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"401", channel.result)
@skip_unless(HAS_OIDC, "requires OIDC")
class UsernamePickerTestCase(HomeserverTestCase):
"""Tests for the username picker flow of SSO login"""
servlets = [login.register_servlets]
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["public_baseurl"] = BASE_URL
config["oidc_config"] = {}
config["oidc_config"].update(TEST_OIDC_CONFIG)
config["oidc_config"]["user_mapping_provider"] = {
"config": {"display_name_template": "{{ user.displayname }}"}
}
# whitelist this client URI so we redirect straight to it rather than
# serving a confirmation page
config["sso"] = {"client_whitelist": ["https://x"]}
return config
def create_resource_dict(self) -> Dict[str, Resource]:
d = super().create_resource_dict()
d.update(build_synapse_client_resource_tree(self.hs))
return d
def test_username_picker(self) -> None:
"""Test the happy path of a username picker flow."""
# do the start of the login flow
channel = self.helper.auth_via_oidc(
{"sub": "tester", "displayname": "Jonny"}, TEST_CLIENT_REDIRECT_URL
)
# that should redirect to the username picker
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
picker_url = location_headers[0]
self.assertEqual(picker_url, "/_synapse/client/pick_username/account_details")
# ... with a username_mapping_session cookie
cookies: Dict[str, str] = {}
channel.extract_cookies(cookies)
self.assertIn("username_mapping_session", cookies)
session_id = cookies["username_mapping_session"]
# introspect the sso handler a bit to check that the username mapping session
# looks ok.
username_mapping_sessions = self.hs.get_sso_handler()._username_mapping_sessions
self.assertIn(
session_id,
username_mapping_sessions,
"session id not found in map",
)
session = username_mapping_sessions[session_id]
self.assertEqual(session.remote_user_id, "tester")
self.assertEqual(session.display_name, "Jonny")
self.assertEqual(session.client_redirect_url, TEST_CLIENT_REDIRECT_URL)
# the expiry time should be about 15 minutes away
expected_expiry = self.clock.time_msec() + (15 * 60 * 1000)
self.assertApproximates(session.expiry_time_ms, expected_expiry, tolerance=1000)
# Now, submit a username to the username picker, which should serve a redirect
# to the completion page
content = urlencode({b"username": b"bobby"}).encode("utf8")
chan = self.make_request(
"POST",
path=picker_url,
content=content,
content_is_form=True,
custom_headers=[
("Cookie", "username_mapping_session=" + session_id),
# old versions of twisted don't do form-parsing without a valid
# content-length header.
("Content-Length", str(len(content))),
],
)
self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
# send a request to the completion page, which should 302 to the client redirectUrl
chan = self.make_request(
"GET",
path=location_headers[0],
custom_headers=[("Cookie", "username_mapping_session=" + session_id)],
)
self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
# ensure that the returned location matches the requested redirect URL
path, query = location_headers[0].split("?", 1)
self.assertEqual(path, "https://x")
# it will have url-encoded the params properly, so we'll have to parse them
params = urllib.parse.parse_qsl(
query, keep_blank_values=True, strict_parsing=True, errors="strict"
)
self.assertEqual(params[0:2], EXPECTED_CLIENT_REDIRECT_URL_PARAMS)
self.assertEqual(params[2][0], "loginToken")
# fish the login token out of the returned redirect uri
login_token = params[2][1]
# finally, submit the matrix login token to the login API, which gives us our
# matrix access token, mxid, and device id.
chan = self.make_request(
"POST",
"/login",
content={"type": "m.login.token", "token": login_token},
)
self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@bobby:test")
| # Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import urllib.parse
from typing import Any, Dict, List, Optional, Union
from unittest.mock import Mock
from urllib.parse import urlencode
import pymacaroons
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
import synapse.rest.admin
from synapse.appservice import ApplicationService
from synapse.rest.client import devices, login, logout, register
from synapse.rest.client.account import WhoamiRestServlet
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
from synapse.types import create_requester
from synapse.util import Clock
from tests import unittest
from tests.handlers.test_oidc import HAS_OIDC
from tests.handlers.test_saml import has_saml2
from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG
from tests.server import FakeChannel
from tests.test_utils.html_parsers import TestHtmlParser
from tests.unittest import HomeserverTestCase, override_config, skip_unless
try:
import jwt
HAS_JWT = True
except ImportError:
HAS_JWT = False
# synapse server name: used to populate public_baseurl in some tests
SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse"
# public_baseurl for some tests. It uses an http:// scheme because
# FakeChannel.isSecure() returns False, so synapse will see the requested uri as
# http://..., so using http in the public_baseurl stops Synapse trying to redirect to
# https://....
BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,)
# CAS server used in some tests
CAS_SERVER = "https://fake.test"
# just enough to tell pysaml2 where to redirect to
SAML_SERVER = "https://test.saml.server/idp/sso"
TEST_SAML_METADATA = """
<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata">
<md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="%(SAML_SERVER)s"/>
</md:IDPSSODescriptor>
</md:EntityDescriptor>
""" % {
"SAML_SERVER": SAML_SERVER,
}
LOGIN_URL = b"/_matrix/client/r0/login"
TEST_URL = b"/_matrix/client/r0/account/whoami"
# a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is +
TEST_CLIENT_REDIRECT_URL = 'https://x?<ab c>&q"+%3D%2B"="fö%26=o"'
# the query params in TEST_CLIENT_REDIRECT_URL
EXPECTED_CLIENT_REDIRECT_URL_PARAMS = [("<ab c>", ""), ('q" =+"', '"fö&=o"')]
# (possibly experimental) login flows we expect to appear in the list after the normal
# ones
ADDITIONAL_LOGIN_FLOWS = [
{"type": "m.login.application_service"},
{"type": "uk.half-shot.msc2778.login.application_service"},
]
class LoginRestServletTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
logout.register_servlets,
devices.register_servlets,
lambda hs, http_server: WhoamiRestServlet(hs).register(http_server),
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.hs = self.setup_test_homeserver()
self.hs.config.registration.enable_registration = True
self.hs.config.registration.registrations_require_3pid = []
self.hs.config.registration.auto_join_rooms = []
self.hs.config.captcha.enable_registration_captcha = False
return self.hs
@override_config(
{
"rc_login": {
"address": {"per_second": 0.17, "burst_count": 5},
# Prevent the account login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"account": {"per_second": 10000, "burst_count": 10000},
}
}
)
def test_POST_ratelimiting_per_address(self) -> None:
# Create different users so we're sure not to be bothered by the per-user
# ratelimiter.
for i in range(0, 6):
self.register_user("kermit" + str(i), "monkey")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit" + str(i)},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"200", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit" + str(i)},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config(
{
"rc_login": {
"account": {"per_second": 0.17, "burst_count": 5},
# Prevent the address login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
}
}
)
def test_POST_ratelimiting_per_account(self) -> None:
self.register_user("kermit", "monkey")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"200", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config(
{
"rc_login": {
# Prevent the address login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
"failed_attempts": {"per_second": 0.17, "burst_count": 5},
}
}
)
def test_POST_ratelimiting_per_account_failed_attempts(self) -> None:
self.register_user("kermit", "<PASSWORD>")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"403", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"403", channel.result)
@override_config({"session_lifetime": "24h"})
def test_soft_logout(self) -> None:
self.register_user("kermit", "monkey")
# we shouldn't be able to make requests without an access token
channel = self.make_request(b"GET", TEST_URL)
self.assertEqual(channel.result["code"], b"401", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN")
# log in as normal
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "<PASSWORD>",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.code, 200, channel.result)
access_token = channel.json_body["access_token"]
device_id = channel.json_body["device_id"]
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
#
# test behaviour after deleting the expired device
#
# we now log in as a different device
access_token_2 = self.login("kermit", "monkey")
# more requests with the expired token should still return a soft-logout
self.reactor.advance(3600)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# ... but if we delete that device, it will be a proper logout
self._delete_device(access_token_2, "kermit", "monkey", device_id)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], False)
def _delete_device(
self, access_token: str, user_id: str, password: str, device_id: str
) -> None:
"""Perform the UI-Auth to delete a device"""
channel = self.make_request(
b"DELETE", "devices/" + device_id, access_token=access_token
)
self.assertEqual(channel.code, 401, channel.result)
# check it's a UI-Auth fail
self.assertEqual(
set(channel.json_body.keys()),
{"flows", "params", "session"},
channel.result,
)
auth = {
"type": "m.login.password",
# https://github.com/matrix-org/synapse/issues/5665
# "identifier": {"type": "m.id.user", "user": user_id},
"user": user_id,
"password": password,
"session": channel.json_body["session"],
}
channel = self.make_request(
b"DELETE",
"devices/" + device_id,
access_token=access_token,
content={"auth": auth},
)
self.assertEqual(channel.code, 200, channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None:
self.register_user("kermit", "monkey")
# log in as normal
access_token = self.login("kermit", "monkey")
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard logout this session
channel = self.make_request(b"POST", "/logout", access_token=access_token)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out(
self,
) -> None:
self.register_user("kermit", "monkey")
# log in as normal
access_token = self.login("kermit", "monkey")
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard log out all of the user's sessions
channel = self.make_request(b"POST", "/logout/all", access_token=access_token)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_with_overly_long_device_id_fails(self) -> None:
self.register_user("mickey", "<PASSWORD>")
# create a device_id longer than 512 characters
device_id = "yolo" * 512
body = {
"type": "m.login.password",
"user": "mickey",
"password": "<PASSWORD>",
"device_id": device_id,
}
# make a login request with the bad device_id
channel = self.make_request(
"POST",
"/_matrix/client/v3/login",
json.dumps(body).encode("utf8"),
custom_headers=None,
)
# test that the login fails with the correct error code
self.assertEqual(channel.code, 400)
self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC")
class MultiSSOTestCase(unittest.HomeserverTestCase):
"""Tests for homeservers with multiple SSO providers enabled"""
servlets = [
login.register_servlets,
]
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["public_baseurl"] = BASE_URL
config["cas_config"] = {
"enabled": True,
"server_url": CAS_SERVER,
"service_url": "https://matrix.goodserver.com:8448",
}
config["saml2_config"] = {
"sp_config": {
"metadata": {"inline": [TEST_SAML_METADATA]},
# use the XMLSecurity backend to avoid relying on xmlsec1
"crypto_backend": "XMLSecurity",
},
}
# default OIDC provider
config["oidc_config"] = TEST_OIDC_CONFIG
# additional OIDC providers
config["oidc_providers"] = [
{
"idp_id": "idp1",
"idp_name": "IDP1",
"discover": False,
"issuer": "https://issuer1",
"client_id": "test-client-id",
"client_secret": "test-client-secret",
"scopes": ["profile"],
"authorization_endpoint": "https://issuer1/auth",
"token_endpoint": "https://issuer1/token",
"userinfo_endpoint": "https://issuer1/userinfo",
"user_mapping_provider": {
"config": {"localpart_template": "{{ user.sub }}"}
},
}
]
return config
def create_resource_dict(self) -> Dict[str, Resource]:
d = super().create_resource_dict()
d.update(build_synapse_client_resource_tree(self.hs))
return d
def test_get_login_flows(self) -> None:
"""GET /login should return password and SSO flows"""
channel = self.make_request("GET", "/_matrix/client/r0/login")
self.assertEqual(channel.code, 200, channel.result)
expected_flow_types = [
"m.login.cas",
"m.login.sso",
"m.login.token",
"m.login.password",
] + [f["type"] for f in ADDITIONAL_LOGIN_FLOWS]
self.assertCountEqual(
[f["type"] for f in channel.json_body["flows"]], expected_flow_types
)
flows = {flow["type"]: flow for flow in channel.json_body["flows"]}
self.assertCountEqual(
flows["m.login.sso"]["identity_providers"],
[
{"id": "cas", "name": "CAS"},
{"id": "saml", "name": "SAML"},
{"id": "oidc-idp1", "name": "IDP1"},
{"id": "oidc", "name": "OIDC"},
],
)
def test_multi_sso_redirect(self) -> None:
"""/login/sso/redirect should redirect to an identity picker"""
# first hit the redirect url, which should redirect to our idp picker
channel = self._make_sso_redirect_request(None)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
uri = location_headers[0]
# hitting that picker should give us some HTML
channel = self.make_request("GET", uri)
self.assertEqual(channel.code, 200, channel.result)
# parse the form to check it has fields assumed elsewhere in this class
html = channel.result["body"].decode("utf-8")
p = TestHtmlParser()
p.feed(html)
p.close()
# there should be a link for each href
returned_idps: List[str] = []
for link in p.links:
path, query = link.split("?", 1)
self.assertEqual(path, "pick_idp")
params = urllib.parse.parse_qs(query)
self.assertEqual(params["redirectUrl"], [TEST_CLIENT_REDIRECT_URL])
returned_idps.append(params["idp"][0])
self.assertCountEqual(returned_idps, ["cas", "oidc", "oidc-idp1", "saml"])
def test_multi_sso_redirect_to_cas(self) -> None:
"""If CAS is chosen, should redirect to the CAS server"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=cas",
shorthand=False,
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
cas_uri = location_headers[0]
cas_uri_path, cas_uri_query = cas_uri.split("?", 1)
# it should redirect us to the login page of the cas server
self.assertEqual(cas_uri_path, CAS_SERVER + "/login")
# check that the redirectUrl is correctly encoded in the service param - ie, the
# place that CAS will redirect to
cas_uri_params = urllib.parse.parse_qs(cas_uri_query)
service_uri = cas_uri_params["service"][0]
_, service_uri_query = service_uri.split("?", 1)
service_uri_params = urllib.parse.parse_qs(service_uri_query)
self.assertEqual(service_uri_params["redirectUrl"][0], TEST_CLIENT_REDIRECT_URL)
def test_multi_sso_redirect_to_saml(self) -> None:
"""If SAML is chosen, should redirect to the SAML server"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=saml",
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
saml_uri = location_headers[0]
saml_uri_path, saml_uri_query = saml_uri.split("?", 1)
# it should redirect us to the login page of the SAML server
self.assertEqual(saml_uri_path, SAML_SERVER)
# the RelayState is used to carry the client redirect url
saml_uri_params = urllib.parse.parse_qs(saml_uri_query)
relay_state_param = saml_uri_params["RelayState"][0]
self.assertEqual(relay_state_param, TEST_CLIENT_REDIRECT_URL)
def test_login_via_oidc(self) -> None:
"""If OIDC is chosen, should redirect to the OIDC auth endpoint"""
# pick the default OIDC provider
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=oidc",
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
# ... and should have set a cookie including the redirect url
cookie_headers = channel.headers.getRawHeaders("Set-Cookie")
assert cookie_headers
cookies: Dict[str, str] = {}
for h in cookie_headers:
key, value = h.split(";")[0].split("=", maxsplit=1)
cookies[key] = value
oidc_session_cookie = cookies["oidc_session"]
macaroon = pymacaroons.Macaroon.deserialize(oidc_session_cookie)
self.assertEqual(
self._get_value_from_macaroon(macaroon, "client_redirect_url"),
TEST_CLIENT_REDIRECT_URL,
)
channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"})
# that should serve a confirmation page
self.assertEqual(channel.code, 200, channel.result)
content_type_headers = channel.headers.getRawHeaders("Content-Type")
assert content_type_headers
self.assertTrue(content_type_headers[-1].startswith("text/html"))
p = TestHtmlParser()
p.feed(channel.text_body)
p.close()
# ... which should contain our redirect link
self.assertEqual(len(p.links), 1)
path, query = p.links[0].split("?", 1)
self.assertEqual(path, "https://x")
# it will have url-encoded the params properly, so we'll have to parse them
params = urllib.parse.parse_qsl(
query, keep_blank_values=True, strict_parsing=True, errors="strict"
)
self.assertEqual(params[0:2], EXPECTED_CLIENT_REDIRECT_URL_PARAMS)
self.assertEqual(params[2][0], "loginToken")
# finally, submit the matrix login token to the login API, which gives us our
# matrix access token, mxid, and device id.
login_token = params[2][1]
chan = self.make_request(
"POST",
"/login",
content={"type": "m.login.token", "token": login_token},
)
self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@user1:test")
def test_multi_sso_redirect_to_unknown(self) -> None:
"""An unknown IdP should cause a 400"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz",
)
self.assertEqual(channel.code, 400, channel.result)
def test_client_idp_redirect_to_unknown(self) -> None:
"""If the client tries to pick an unknown IdP, return a 404"""
channel = self._make_sso_redirect_request("xxx")
self.assertEqual(channel.code, 404, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND")
def test_client_idp_redirect_to_oidc(self) -> None:
"""If the client pick a known IdP, redirect to it"""
channel = self._make_sso_redirect_request("oidc")
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
def _make_sso_redirect_request(self, idp_prov: Optional[str] = None) -> FakeChannel:
"""Send a request to /_matrix/client/r0/login/sso/redirect
... possibly specifying an IDP provider
"""
endpoint = "/_matrix/client/r0/login/sso/redirect"
if idp_prov is not None:
endpoint += "/" + idp_prov
endpoint += "?redirectUrl=" + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
return self.make_request(
"GET",
endpoint,
custom_headers=[("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME)],
)
@staticmethod
def _get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str:
prefix = key + " = "
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(prefix):
return caveat.caveat_id[len(prefix) :]
raise ValueError("No %s caveat in macaroon" % (key,))
class CASTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.base_url = "https://matrix.goodserver.com/"
self.redirect_path = "_synapse/client/login/sso/redirect/confirm"
config = self.default_config()
config["public_baseurl"] = (
config.get("public_baseurl") or "https://matrix.goodserver.com:8448"
)
config["cas_config"] = {
"enabled": True,
"server_url": CAS_SERVER,
}
cas_user_id = "username"
self.user_id = "@%s:test" % cas_user_id
async def get_raw(uri: str, args: Any) -> bytes:
"""Return an example response payload from a call to the `/proxyValidate`
endpoint of a CAS server, copied from
https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-V2-Specification.html#26-proxyvalidate-cas-20
This needs to be returned by an async function (as opposed to set as the
mock's return value) because the corresponding Synapse code awaits on it.
"""
return (
"""
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>%s</cas:user>
<cas:proxyGrantingTicket>PGTIOU-84678-8a9d...</cas:proxyGrantingTicket>
<cas:proxies>
<cas:proxy>https://proxy2/pgtUrl</cas:proxy>
<cas:proxy>https://proxy1/pgtUrl</cas:proxy>
</cas:proxies>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
% cas_user_id
).encode("utf-8")
mocked_http_client = Mock(spec=["get_raw"])
mocked_http_client.get_raw.side_effect = get_raw
self.hs = self.setup_test_homeserver(
config=config,
proxied_http_client=mocked_http_client,
)
return self.hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.deactivate_account_handler = hs.get_deactivate_account_handler()
def test_cas_redirect_confirm(self) -> None:
"""Tests that the SSO login flow serves a confirmation page before redirecting a
user to the redirect URL.
"""
base_url = "/_matrix/client/r0/login/cas/ticket?redirectUrl"
redirect_url = "https://dodgy-site.com/"
url_parts = list(urllib.parse.urlparse(base_url))
query = dict(urllib.parse.parse_qsl(url_parts[4]))
query.update({"redirectUrl": redirect_url})
query.update({"ticket": "ticket"})
url_parts[4] = urllib.parse.urlencode(query)
cas_ticket_url = urllib.parse.urlunparse(url_parts)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
# Test that the response is HTML.
self.assertEqual(channel.code, 200, channel.result)
content_type_header_value = ""
for header in channel.result.get("headers", []):
if header[0] == b"Content-Type":
content_type_header_value = header[1].decode("utf8")
self.assertTrue(content_type_header_value.startswith("text/html"))
# Test that the body isn't empty.
self.assertTrue(len(channel.result["body"]) > 0)
# And that it contains our redirect link
self.assertIn(redirect_url, channel.result["body"].decode("UTF-8"))
@override_config(
{
"sso": {
"client_whitelist": [
"https://legit-site.com/",
"https://other-site.com/",
]
}
}
)
def test_cas_redirect_whitelisted(self) -> None:
"""Tests that the SSO login flow serves a redirect to a whitelisted url"""
self._test_redirect("https://legit-site.com/")
@override_config({"public_baseurl": "https://example.com"})
def test_cas_redirect_login_fallback(self) -> None:
self._test_redirect("https://example.com/_matrix/static/client/login")
def _test_redirect(self, redirect_url: str) -> None:
"""Tests that the SSO login flow serves a redirect for the given redirect URL."""
cas_ticket_url = (
"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket"
% (urllib.parse.quote(redirect_url))
)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
self.assertEqual(channel.code, 302)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
self.assertEqual(location_headers[0][: len(redirect_url)], redirect_url)
@override_config({"sso": {"client_whitelist": ["https://legit-site.com/"]}})
def test_deactivated_user(self) -> None:
"""Logging in as a deactivated account should error."""
redirect_url = "https://legit-site.com/"
# First login (to create the user).
self._test_redirect(redirect_url)
# Deactivate the account.
self.get_success(
self.deactivate_account_handler.deactivate_account(
self.user_id, False, create_requester(self.user_id)
)
)
# Request the CAS ticket.
cas_ticket_url = (
"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket"
% (urllib.parse.quote(redirect_url))
)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
# Because the user is deactivated they are served an error template.
self.assertEqual(channel.code, 403)
self.assertIn(b"SSO account deactivated", channel.result["body"])
@skip_unless(HAS_JWT, "requires jwt")
class JWTTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
]
jwt_secret = "secret"
jwt_algorithm = "HS256"
base_config = {
"enabled": True,
"secret": jwt_secret,
"algorithm": jwt_algorithm,
}
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
# If jwt_config has been defined (eg via @override_config), don't replace it.
if config.get("jwt_config") is None:
config["jwt_config"] = self.base_config
return config
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
result: Union[str, bytes] = jwt.encode(payload, secret, self.jwt_algorithm)
if isinstance(result, bytes):
return result.decode("ascii")
return result
def jwt_login(self, *args: Any) -> FakeChannel:
params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)}
channel = self.make_request(b"POST", LOGIN_URL, params)
return channel
def test_login_jwt_valid_registered(self) -> None:
self.register_user("kermit", "monkey")
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_valid_unregistered(self) -> None:
channel = self.jwt_login({"sub": "frog"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, "notsecret")
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: Signature verification failed",
)
def test_login_jwt_expired(self) -> None:
channel = self.jwt_login({"sub": "frog", "exp": 864000})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Signature has expired"
)
def test_login_jwt_not_before(self) -> None:
now = int(time.time())
channel = self.jwt_login({"sub": "frog", "nbf": now + 3600})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: The token is not yet valid (nbf)",
)
def test_login_no_sub(self) -> None:
channel = self.jwt_login({"username": "root"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Invalid JWT")
@override_config({"jwt_config": {**base_config, "issuer": "test-issuer"}})
def test_login_iss(self) -> None:
"""Test validating the issuer claim."""
# A valid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "test-issuer"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid issuer"
)
# Not providing an issuer.
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
'JWT validation failed: Token is missing the "iss" claim',
)
def test_login_iss_no_config(self) -> None:
"""Test providing an issuer claim without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "audiences": ["test-audience"]}})
def test_login_aud(self) -> None:
"""Test validating the audience claim."""
# A valid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "test-audience"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid audience"
)
# Not providing an audience.
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
'JWT validation failed: Token is missing the "aud" claim',
)
def test_login_aud_no_config(self) -> None:
"""Test providing an audience without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid audience"
)
def test_login_default_sub(self) -> None:
"""Test reading user ID from the default subject claim."""
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "subject_claim": "username"}})
def test_login_custom_sub(self) -> None:
"""Test reading user ID from a custom subject claim."""
channel = self.jwt_login({"username": "frog"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_no_token(self) -> None:
params = {"type": "org.matrix.login.jwt"}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Token field for JWT is missing")
# The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use
# RSS256, with a public key configured in synapse as "jwt_secret", and tokens
# signed by the private key.
@skip_unless(HAS_JWT, "requires jwt")
class JWTPubKeyTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
]
# This key's pubkey is used as the jwt_secret setting of synapse. Valid
# tokens are signed by this and validated using the pubkey. It is generated
# with `openssl genrsa 512` (not a secure way to generate real keys, but
# good enough for tests!)
jwt_privatekey = "\n".join(
[
"-----BEGIN RSA PRIVATE KEY-----",
"<KEY>",
"<KEY>",
"<KEY>KVaZ/gTOM9+9MwlmhidrUOweKfB/",
"kQIhAPZwHazbjo7dYlJs7wPQz1vd+aHSEH+3uQKIysebkmm3AiEA1nc6mDdmgiUq",
"TpIN8A4MBKmfZMWTLq6z05y/qjKyxb0CIQDYJxCwTEenIaEa4PdoJl+qmXFasVDN",
"ZU0+XtNV7yul0wIhAMI9IhiStIjS2EppBa6RSlk+t1oxh2gUWlIh+YVQfZGRAiEA",
"tqBR7qLZGJ5CVKxWmNhJZGt1QHoUtOch8t9C4IdOZ2g=",
"-----END RSA PRIVATE KEY-----",
]
)
# Generated with `openssl rsa -in foo.key -pubout`, with the the above
# private key placed in foo.key (jwt_privatekey).
jwt_pubkey = "\n".join(
[
"-----BEGIN PUBLIC KEY-----",
"<KEY>",
"<KEY>
"-----END PUBLIC KEY-----",
]
)
# This key is used to sign tokens that shouldn't be accepted by synapse.
# Generated just like jwt_privatekey.
bad_privatekey = "\n".join(
[
"-----BEGIN RSA PRIVATE KEY-----",
"<KEY>",
"gLjmQD3jBUTz+/FndLSBvr3F4OHtGL9O/osCAwEAAQJAJqH0jZJW7Smzo9ShP02L",
"R6HRZcLExZuUrWI+5ZSP7TaZ1uwJzGFspDrunqaVoPobndw/8VsP8HFyKtceC7vY",
"uQIhAPdYInDDSJ8rFKGiy3Ajv5KWISBicjevWHF9dbotmNO9AiEAxrdRJVU+EI9I",
"eB4qRZpY6n4pnwyP0p8f/A3NBaQPG+cCIFlj08aW/PbxNdqYoBdeBA0xDrXKfmbb",
"iwYxBkwL0JCtAiBYmsi94sJn09u2Y4zpuCbJeDPKzWkbuwQh+W1fhIWQJQIhAKR0",
"KydN6cRLvphNQ9c/vBTdlzWxzcSxREpguC7F1J1m",
"-----END RSA PRIVATE KEY-----",
]
)
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["jwt_config"] = {
"enabled": True,
"secret": self.jwt_pubkey,
"algorithm": "RS256",
}
return config
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
result: Union[bytes, str] = jwt.encode(payload, secret, "RS256")
if isinstance(result, bytes):
return result.decode("ascii")
return result
def jwt_login(self, *args: Any) -> FakeChannel:
params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)}
channel = self.make_request(b"POST", LOGIN_URL, params)
return channel
def test_login_jwt_valid(self) -> None:
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, self.bad_privatekey)
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: Signature verification failed",
)
AS_USER = "as_user_alice"
class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
register.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.hs = self.setup_test_homeserver()
self.service = ApplicationService(
id="unique_identifier",
token="some_token",
hostname="example.com",
sender="@asbot:example.com",
namespaces={
ApplicationService.NS_USERS: [
{"regex": r"@as_user.*", "exclusive": False}
],
ApplicationService.NS_ROOMS: [],
ApplicationService.NS_ALIASES: [],
},
)
self.another_service = ApplicationService(
id="another__identifier",
token="another_token",
hostname="example.com",
sender="@as2bot:example.com",
namespaces={
ApplicationService.NS_USERS: [
{"regex": r"@as2_user.*", "exclusive": False}
],
ApplicationService.NS_ROOMS: [],
ApplicationService.NS_ALIASES: [],
},
)
self.hs.get_datastores().main.services_cache.append(self.service)
self.hs.get_datastores().main.services_cache.append(self.another_service)
return self.hs
def test_login_appservice_user(self) -> None:
"""Test that an appservice user can use /login"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_appservice_user_bot(self) -> None:
"""Test that the appservice bot can use /login"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": self.service.sender},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_appservice_wrong_user(self) -> None:
"""Test that non-as users cannot login with the as token"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": "fibble_wibble"},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"403", channel.result)
def test_login_appservice_wrong_as(self) -> None:
"""Test that as users cannot login with wrong as token"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.another_service.token
)
self.assertEqual(channel.result["code"], b"403", channel.result)
def test_login_appservice_no_token(self) -> None:
"""Test that users must provide a token when using the appservice
login method
"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"401", channel.result)
@skip_unless(HAS_OIDC, "requires OIDC")
class UsernamePickerTestCase(HomeserverTestCase):
"""Tests for the username picker flow of SSO login"""
servlets = [login.register_servlets]
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["public_baseurl"] = BASE_URL
config["oidc_config"] = {}
config["oidc_config"].update(TEST_OIDC_CONFIG)
config["oidc_config"]["user_mapping_provider"] = {
"config": {"display_name_template": "{{ user.displayname }}"}
}
# whitelist this client URI so we redirect straight to it rather than
# serving a confirmation page
config["sso"] = {"client_whitelist": ["https://x"]}
return config
def create_resource_dict(self) -> Dict[str, Resource]:
d = super().create_resource_dict()
d.update(build_synapse_client_resource_tree(self.hs))
return d
def test_username_picker(self) -> None:
"""Test the happy path of a username picker flow."""
# do the start of the login flow
channel = self.helper.auth_via_oidc(
{"sub": "tester", "displayname": "Jonny"}, TEST_CLIENT_REDIRECT_URL
)
# that should redirect to the username picker
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
picker_url = location_headers[0]
self.assertEqual(picker_url, "/_synapse/client/pick_username/account_details")
# ... with a username_mapping_session cookie
cookies: Dict[str, str] = {}
channel.extract_cookies(cookies)
self.assertIn("username_mapping_session", cookies)
session_id = cookies["username_mapping_session"]
# introspect the sso handler a bit to check that the username mapping session
# looks ok.
username_mapping_sessions = self.hs.get_sso_handler()._username_mapping_sessions
self.assertIn(
session_id,
username_mapping_sessions,
"session id not found in map",
)
session = username_mapping_sessions[session_id]
self.assertEqual(session.remote_user_id, "tester")
self.assertEqual(session.display_name, "Jonny")
self.assertEqual(session.client_redirect_url, TEST_CLIENT_REDIRECT_URL)
# the expiry time should be about 15 minutes away
expected_expiry = self.clock.time_msec() + (15 * 60 * 1000)
self.assertApproximates(session.expiry_time_ms, expected_expiry, tolerance=1000)
# Now, submit a username to the username picker, which should serve a redirect
# to the completion page
content = urlencode({b"username": b"bobby"}).encode("utf8")
chan = self.make_request(
"POST",
path=picker_url,
content=content,
content_is_form=True,
custom_headers=[
("Cookie", "username_mapping_session=" + session_id),
# old versions of twisted don't do form-parsing without a valid
# content-length header.
("Content-Length", str(len(content))),
],
)
self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
# send a request to the completion page, which should 302 to the client redirectUrl
chan = self.make_request(
"GET",
path=location_headers[0],
custom_headers=[("Cookie", "username_mapping_session=" + session_id)],
)
self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
# ensure that the returned location matches the requested redirect URL
path, query = location_headers[0].split("?", 1)
self.assertEqual(path, "https://x")
# it will have url-encoded the params properly, so we'll have to parse them
params = urllib.parse.parse_qsl(
query, keep_blank_values=True, strict_parsing=True, errors="strict"
)
self.assertEqual(params[0:2], EXPECTED_CLIENT_REDIRECT_URL_PARAMS)
self.assertEqual(params[2][0], "loginToken")
# fish the login token out of the returned redirect uri
login_token = params[2][1]
# finally, submit the matrix login token to the login API, which gives us our
# matrix access token, mxid, and device id.
chan = self.make_request(
"POST",
"/login",
content={"type": "m.login.token", "token": login_token},
)
self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@bobby:test")
| en | 0.833902 | # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # synapse server name: used to populate public_baseurl in some tests # public_baseurl for some tests. It uses an http:// scheme because # FakeChannel.isSecure() returns False, so synapse will see the requested uri as # http://..., so using http in the public_baseurl stops Synapse trying to redirect to # https://.... # CAS server used in some tests # just enough to tell pysaml2 where to redirect to <md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"> <md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol"> <md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="%(SAML_SERVER)s"/> </md:IDPSSODescriptor> </md:EntityDescriptor> # a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is + # the query params in TEST_CLIENT_REDIRECT_URL # (possibly experimental) login flows we expect to appear in the list after the normal # ones # Prevent the account login ratelimiter from raising first # # This is normally covered by the default test homeserver config # which sets these values to 10000, but as we're overriding the entire # rc_login dict here, we need to set this manually as well # Create different users so we're sure not to be bothered by the per-user # ratelimiter. # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. # Prevent the address login ratelimiter from raising first # # This is normally covered by the default test homeserver config # which sets these values to 10000, but as we're overriding the entire # rc_login dict here, we need to set this manually as well # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. # Prevent the address login ratelimiter from raising first # # This is normally covered by the default test homeserver config # which sets these values to 10000, but as we're overriding the entire # rc_login dict here, we need to set this manually as well # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. # we shouldn't be able to make requests without an access token # log in as normal # we should now be able to make requests with the access token # time passes # ... and we should be soft-logouted # # test behaviour after deleting the expired device # # we now log in as a different device # more requests with the expired token should still return a soft-logout # ... but if we delete that device, it will be a proper logout Perform the UI-Auth to delete a device # check it's a UI-Auth fail # https://github.com/matrix-org/synapse/issues/5665 # "identifier": {"type": "m.id.user", "user": user_id}, # log in as normal # we should now be able to make requests with the access token # time passes # ... and we should be soft-logouted # Now try to hard logout this session # log in as normal # we should now be able to make requests with the access token # time passes # ... and we should be soft-logouted # Now try to hard log out all of the user's sessions # create a device_id longer than 512 characters # make a login request with the bad device_id # test that the login fails with the correct error code Tests for homeservers with multiple SSO providers enabled # use the XMLSecurity backend to avoid relying on xmlsec1 # default OIDC provider # additional OIDC providers GET /login should return password and SSO flows /login/sso/redirect should redirect to an identity picker # first hit the redirect url, which should redirect to our idp picker # hitting that picker should give us some HTML # parse the form to check it has fields assumed elsewhere in this class # there should be a link for each href If CAS is chosen, should redirect to the CAS server # it should redirect us to the login page of the cas server # check that the redirectUrl is correctly encoded in the service param - ie, the # place that CAS will redirect to If SAML is chosen, should redirect to the SAML server # it should redirect us to the login page of the SAML server # the RelayState is used to carry the client redirect url If OIDC is chosen, should redirect to the OIDC auth endpoint # pick the default OIDC provider # it should redirect us to the auth page of the OIDC server # ... and should have set a cookie including the redirect url # that should serve a confirmation page # ... which should contain our redirect link # it will have url-encoded the params properly, so we'll have to parse them # finally, submit the matrix login token to the login API, which gives us our # matrix access token, mxid, and device id. An unknown IdP should cause a 400 If the client tries to pick an unknown IdP, return a 404 If the client pick a known IdP, redirect to it # it should redirect us to the auth page of the OIDC server Send a request to /_matrix/client/r0/login/sso/redirect ... possibly specifying an IDP provider Return an example response payload from a call to the `/proxyValidate` endpoint of a CAS server, copied from https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-V2-Specification.html#26-proxyvalidate-cas-20 This needs to be returned by an async function (as opposed to set as the mock's return value) because the corresponding Synapse code awaits on it. <cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'> <cas:authenticationSuccess> <cas:user>%s</cas:user> <cas:proxyGrantingTicket>PGTIOU-84678-8a9d...</cas:proxyGrantingTicket> <cas:proxies> <cas:proxy>https://proxy2/pgtUrl</cas:proxy> <cas:proxy>https://proxy1/pgtUrl</cas:proxy> </cas:proxies> </cas:authenticationSuccess> </cas:serviceResponse> Tests that the SSO login flow serves a confirmation page before redirecting a user to the redirect URL. # Get Synapse to call the fake CAS and serve the template. # Test that the response is HTML. # Test that the body isn't empty. # And that it contains our redirect link Tests that the SSO login flow serves a redirect to a whitelisted url Tests that the SSO login flow serves a redirect for the given redirect URL. # Get Synapse to call the fake CAS and serve the template. Logging in as a deactivated account should error. # First login (to create the user). # Deactivate the account. # Request the CAS ticket. # Get Synapse to call the fake CAS and serve the template. # Because the user is deactivated they are served an error template. # If jwt_config has been defined (eg via @override_config), don't replace it. # PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str. Test validating the issuer claim. # A valid issuer. # An invalid issuer. # Not providing an issuer. Test providing an issuer claim without requiring it in the configuration. Test validating the audience claim. # A valid audience. # An invalid audience. # Not providing an audience. Test providing an audience without requiring it in the configuration. Test reading user ID from the default subject claim. Test reading user ID from a custom subject claim. # The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use # RSS256, with a public key configured in synapse as "jwt_secret", and tokens # signed by the private key. # This key's pubkey is used as the jwt_secret setting of synapse. Valid # tokens are signed by this and validated using the pubkey. It is generated # with `openssl genrsa 512` (not a secure way to generate real keys, but # good enough for tests!) # Generated with `openssl rsa -in foo.key -pubout`, with the the above # private key placed in foo.key (jwt_privatekey). # This key is used to sign tokens that shouldn't be accepted by synapse. # Generated just like jwt_privatekey. # PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str. Test that an appservice user can use /login Test that the appservice bot can use /login Test that non-as users cannot login with the as token Test that as users cannot login with wrong as token Test that users must provide a token when using the appservice login method Tests for the username picker flow of SSO login # whitelist this client URI so we redirect straight to it rather than # serving a confirmation page Test the happy path of a username picker flow. # do the start of the login flow # that should redirect to the username picker # ... with a username_mapping_session cookie # introspect the sso handler a bit to check that the username mapping session # looks ok. # the expiry time should be about 15 minutes away # Now, submit a username to the username picker, which should serve a redirect # to the completion page # old versions of twisted don't do form-parsing without a valid # content-length header. # send a request to the completion page, which should 302 to the client redirectUrl # ensure that the returned location matches the requested redirect URL # it will have url-encoded the params properly, so we'll have to parse them # fish the login token out of the returned redirect uri # finally, submit the matrix login token to the login API, which gives us our # matrix access token, mxid, and device id. | 1.656335 | 2 |
rmf_demo_tasks/rmf_demo_tasks/request_delivery.py | Kevinskwk/rmf_demos | 0 | 636 | <gh_stars>0
import argparse
import sys
from time import sleep
import uuid
import rclpy
from rmf_task_msgs.msg import Delivery
def main(argv = sys.argv):
rclpy.init(args=argv)
args_without_ros = rclpy.utilities.remove_ros_args(argv)
'''
# Example request:
task_id: randomid_001
items: [itemA, itemB....]
pickup_place_name: cssd_room
pickup_behavior:
- name: dispenser
- parameters: [request_guid: xxx, target_guid:cssdbot, transporter_type:mir]
dropoff_place_name: ot_prep_room
dropoff_behavior:
- name: dispenser
- parameters: [request_guid: yyy, target_guid:otbot, transporter_type:mir]
'''
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pickup', default='pantry', help='Start waypoint')
parser.add_argument('-d', '--dropoff', default='hardware_2', help='Finish waypoint')
parser.add_argument('-i', '--task-id', help='Task ID', default='', type=str)
parser.add_argument('-r', '--robot-type', help='Type of robot', default='magni')
args = parser.parse_args(args_without_ros[1:])
node = rclpy.create_node('loop_request_publisher')
publisher = node.create_publisher(Delivery, 'delivery_requests', 10)
sleep(0.5)
request = Delivery()
if args.task_id:
request.task_id = args.task_id
else:
request.task_id = 'delivery#' + str(uuid.uuid1())
request.pickup_place_name = args.pickup
request.dropoff_place_name = args.dropoff
for _ in range(5):
publisher.publish(request)
sleep(0.5)
rclpy.shutdown()
print(f'Delivery request submitted to {args.robot_type}')
if __name__ == '__main__':
main(sys.argv)
| import argparse
import sys
from time import sleep
import uuid
import rclpy
from rmf_task_msgs.msg import Delivery
def main(argv = sys.argv):
rclpy.init(args=argv)
args_without_ros = rclpy.utilities.remove_ros_args(argv)
'''
# Example request:
task_id: randomid_001
items: [itemA, itemB....]
pickup_place_name: cssd_room
pickup_behavior:
- name: dispenser
- parameters: [request_guid: xxx, target_guid:cssdbot, transporter_type:mir]
dropoff_place_name: ot_prep_room
dropoff_behavior:
- name: dispenser
- parameters: [request_guid: yyy, target_guid:otbot, transporter_type:mir]
'''
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pickup', default='pantry', help='Start waypoint')
parser.add_argument('-d', '--dropoff', default='hardware_2', help='Finish waypoint')
parser.add_argument('-i', '--task-id', help='Task ID', default='', type=str)
parser.add_argument('-r', '--robot-type', help='Type of robot', default='magni')
args = parser.parse_args(args_without_ros[1:])
node = rclpy.create_node('loop_request_publisher')
publisher = node.create_publisher(Delivery, 'delivery_requests', 10)
sleep(0.5)
request = Delivery()
if args.task_id:
request.task_id = args.task_id
else:
request.task_id = 'delivery#' + str(uuid.uuid1())
request.pickup_place_name = args.pickup
request.dropoff_place_name = args.dropoff
for _ in range(5):
publisher.publish(request)
sleep(0.5)
rclpy.shutdown()
print(f'Delivery request submitted to {args.robot_type}')
if __name__ == '__main__':
main(sys.argv) | en | 0.387129 | # Example request: task_id: randomid_001 items: [itemA, itemB....] pickup_place_name: cssd_room pickup_behavior: - name: dispenser - parameters: [request_guid: xxx, target_guid:cssdbot, transporter_type:mir] dropoff_place_name: ot_prep_room dropoff_behavior: - name: dispenser - parameters: [request_guid: yyy, target_guid:otbot, transporter_type:mir] #' + str(uuid.uuid1()) | 2.395863 | 2 |
dis_snek/api/http/http_client.py | BoredManCodes/Dis-Snek | 0 | 637 | <filename>dis_snek/api/http/http_client.py
"""This file handles the interaction with discords http endpoints."""
import asyncio
import logging
from typing import Any, Dict, Optional, Union
from urllib.parse import quote as _uriquote
from weakref import WeakValueDictionary
import aiohttp
from aiohttp import BaseConnector, ClientSession, ClientWebSocketResponse, FormData
from multidict import CIMultiDictProxy
from dis_snek.api.http.http_requests import (
BotRequests,
ChannelRequests,
EmojiRequests,
GuildRequests,
InteractionRequests,
MemberRequests,
MessageRequests,
ReactionRequests,
StickerRequests,
ThreadRequests,
UserRequests,
WebhookRequests,
ScheduledEventsRequests,
)
from dis_snek.client.const import __py_version__, __repo_url__, __version__, logger_name, MISSING, Absent
from dis_snek.client.errors import DiscordError, Forbidden, GatewayNotFound, HTTPException, NotFound, LoginError
from dis_snek.client.utils.input_utils import response_decode
from dis_snek.client.utils.serializer import dict_filter_missing
from dis_snek.models import CooldownSystem
from .route import Route
__all__ = ["HTTPClient"]
log = logging.getLogger(logger_name)
class GlobalLock:
"""Manages the global ratelimit"""
def __init__(self) -> None:
self.cooldown_system: CooldownSystem = CooldownSystem(
45, 1
) # global rate-limit is 50 per second, conservatively we use 45
self._lock: asyncio.Lock = asyncio.Lock()
async def rate_limit(self) -> None:
async with self._lock:
while not self.cooldown_system.acquire_token():
await asyncio.sleep(self.cooldown_system.get_cooldown_time())
async def lock(self, delta: float) -> None:
"""
Lock the global lock for a given duration.
Args:
delta: The time to keep the lock acquired
"""
await self._lock.acquire()
await asyncio.sleep(delta)
self._lock.release()
class BucketLock:
"""Manages the ratelimit for each bucket"""
def __init__(self) -> None:
self._lock: asyncio.Lock = asyncio.Lock()
self.unlock_on_exit: bool = True
self.bucket_hash: Optional[str] = None
self.limit: int = -1
self.remaining: int = -1
self.delta: float = 0.0
def __repr__(self) -> str:
return f"<BucketLock: {self.bucket_hash or 'Generic'}>"
@property
def locked(self) -> bool:
"""Return True if lock is acquired."""
return self._lock.locked()
def unlock(self) -> None:
"""Unlock this bucket."""
self._lock.release()
def ingest_ratelimit_header(self, header: CIMultiDictProxy) -> None:
"""
Ingests a discord rate limit header to configure this bucket lock.
Args:
header: A header from a http response
"""
self.bucket_hash = header.get("x-ratelimit-bucket")
self.limit = int(header.get("x-ratelimit-limit") or -1)
self.remaining = int(header.get("x-ratelimit-remaining") or -1)
self.delta = float(header.get("x-ratelimit-reset-after", 0.0))
async def blind_defer_unlock(self) -> None:
"""Unlocks the BucketLock but doesn't wait for completion."""
self.unlock_on_exit = False
loop = asyncio.get_running_loop()
loop.call_later(self.delta, self.unlock)
async def defer_unlock(self) -> None:
"""Unlocks the BucketLock after a specified delay."""
self.unlock_on_exit = False
await asyncio.sleep(self.delta)
self.unlock()
async def __aenter__(self) -> None:
await self._lock.acquire()
async def __aexit__(self, *args) -> None:
if self.unlock_on_exit and self._lock.locked():
self.unlock()
self.unlock_on_exit = True
class HTTPClient(
BotRequests,
ChannelRequests,
EmojiRequests,
GuildRequests,
InteractionRequests,
MemberRequests,
MessageRequests,
ReactionRequests,
StickerRequests,
ThreadRequests,
UserRequests,
WebhookRequests,
ScheduledEventsRequests,
):
"""A http client for sending requests to the Discord API."""
def __init__(self, connector: Optional[BaseConnector] = None, loop: Optional[asyncio.AbstractEventLoop] = None):
self.connector: Optional[BaseConnector] = connector
self.loop = asyncio.get_event_loop() if loop is None else loop
self.__session: Absent[Optional[ClientSession]] = MISSING
self.token: Optional[str] = None
self.global_lock: GlobalLock = GlobalLock()
self._max_attempts: int = 3
self.ratelimit_locks: WeakValueDictionary[str, BucketLock] = WeakValueDictionary()
self._endpoints = {}
self.user_agent: str = (
f"DiscordBot ({__repo_url__} {__version__} Python/{__py_version__}) aiohttp/{aiohttp.__version__}"
)
def __del__(self):
if self.__session and not self.__session.closed:
self.loop.run_until_complete(self.__session.close())
def get_ratelimit(self, route: Route) -> BucketLock:
"""
Get a route's rate limit bucket.
Args:
route: The route to fetch the ratelimit bucket for
Returns:
The BucketLock object for this route
"""
if bucket_hash := self._endpoints.get(route.rl_bucket):
# we have seen this route before, we know which bucket it is associated with
lock = self.ratelimit_locks.get(bucket_hash)
if lock:
# if we have an active lock on this route, it'll still be in the cache
# return that lock
return lock
# if no cached lock exists, return a new lock
return BucketLock()
def ingest_ratelimit(self, route: Route, header: CIMultiDictProxy, bucket_lock: BucketLock) -> None:
"""
Ingests a ratelimit header from discord to determine ratelimit.
Args:
route: The route we're ingesting ratelimit for
header: The rate limit header in question
bucket_lock: The rate limit bucket for this route
"""
bucket_lock.ingest_ratelimit_header(header)
if bucket_lock.bucket_hash:
# We only ever try and cache the bucket if the bucket hash has been set (ignores unlimited endpoints)
log.debug(f"Caching ingested rate limit data for: {bucket_lock.bucket_hash}")
self._endpoints[route.rl_bucket] = bucket_lock.bucket_hash
self.ratelimit_locks[bucket_lock.bucket_hash] = bucket_lock
async def request(
self,
route: Route,
data: Absent[Union[dict, FormData]] = MISSING,
reason: Absent[str] = MISSING,
**kwargs: Dict[str, Any],
) -> Any:
"""
Make a request to discord.
parameters:
route: The route to take
json: A json payload to send in the request
reason: Attach a reason to this request, used for audit logs
"""
# Assemble headers
kwargs["headers"] = {"User-Agent": self.user_agent}
if self.token:
kwargs["headers"]["Authorization"] = f"Bot {self.token}"
if reason not in (None, MISSING):
kwargs["headers"]["X-Audit-Log-Reason"] = _uriquote(reason, safe="/ ")
if isinstance(data, (list, dict)):
kwargs["headers"]["Content-Type"] = "application/json"
# sanity check payload
if isinstance(data, list):
kwargs["json"] = [dict_filter_missing(x) if isinstance(x, dict) else x for x in data]
elif isinstance(data, dict):
kwargs["json"] = dict_filter_missing(data)
elif isinstance(data, FormData):
kwargs["data"] = data
lock = self.get_ratelimit(route)
# this gets a BucketLock for this route.
# If this endpoint has been used before, it will get an existing ratelimit for the respective buckethash
# otherwise a brand-new bucket lock will be returned
for attempt in range(self._max_attempts):
async with lock:
try:
await self.global_lock.rate_limit()
# prevent us exceeding the global rate limit by throttling http requests
if self.__session.closed:
await self.login(self.token)
async with self.__session.request(route.method, route.url, **kwargs) as response:
result = await response_decode(response)
self.ingest_ratelimit(route, response.headers, lock)
if response.status == 429:
# ratelimit exceeded
if result.get("global", False):
# if we get a global, that's pretty bad, this would usually happen if the user is hitting the api from 2 clients sharing a token
log.error(
f"Bot has exceeded global ratelimit, locking REST API for {result.get('retry_after')} seconds"
)
await self.global_lock.lock(float(result.get("retry_after")))
continue
else:
# 429's are unfortunately unavoidable, but we can attempt to avoid them
# so long as these are infrequent we're doing well
log.warning(
f"{route.endpoint} Has exceeded it's ratelimit ({lock.limit})! Reset in {lock.delta} seconds"
)
await lock.defer_unlock() # lock this route and wait for unlock
continue
elif lock.remaining == 0:
# Last call available in the bucket, lock until reset
log.debug(
f"{route.endpoint} Has exhausted its ratelimit ({lock.limit})! Locking route for {lock.delta} seconds"
)
await lock.blind_defer_unlock() # lock this route, but continue processing the current response
elif response.status in {500, 502, 504}:
# Server issues, retry
log.warning(
f"{route.endpoint} Received {response.status}... retrying in {1 + attempt * 2} seconds"
)
await asyncio.sleep(1 + attempt * 2)
continue
if not 300 > response.status >= 200:
await self._raise_exception(response, route, result)
log.debug(
f"{route.endpoint} Received {response.status} :: [{lock.remaining}/{lock.limit} calls remaining]"
)
return result
except OSError as e:
if attempt < self._max_attempts - 1 and e.errno in (54, 10054):
await asyncio.sleep(1 + attempt * 2)
continue
raise
async def _raise_exception(self, response, route, result):
log.error(f"{route.method}::{route.url}: {response.status}")
if response.status == 403:
raise Forbidden(response, response_data=result, route=route)
elif response.status == 404:
raise NotFound(response, response_data=result, route=route)
elif response.status >= 500:
raise DiscordError(response, response_data=result, route=route)
else:
raise HTTPException(response, response_data=result, route=route)
async def request_cdn(self, url, asset) -> bytes:
log.debug(f"{asset} requests {url} from CDN")
async with self.__session.get(url) as response:
if response.status == 200:
return await response.read()
await self._raise_exception(response, asset, await response_decode(response))
async def login(self, token: str) -> dict:
"""
"Login" to the gateway, basically validates the token and grabs user data.
parameters:
token: the token to use
returns:
The currently logged in bot's data
"""
self.__session = ClientSession(connector=self.connector)
self.token = token
try:
return await self.request(Route("GET", "/users/@me"))
except HTTPException as e:
if e.status == 401:
raise LoginError("An improper token was passed") from e
raise
async def close(self) -> None:
"""Close the session."""
if self.__session:
await self.__session.close()
async def get_gateway(self) -> str:
"""Get the gateway url."""
try:
data: dict = await self.request(Route("GET", "/gateway"))
except HTTPException as exc:
raise GatewayNotFound from exc
return "{0}?encoding={1}&v=9&compress=zlib-stream".format(data["url"], "json")
async def websocket_connect(self, url: str) -> ClientWebSocketResponse:
"""
Connect to the websocket.
parameters:
url: the url to connect to
"""
return await self.__session.ws_connect(
url, timeout=30, max_msg_size=0, autoclose=False, headers={"User-Agent": self.user_agent}, compress=0
)
| <filename>dis_snek/api/http/http_client.py
"""This file handles the interaction with discords http endpoints."""
import asyncio
import logging
from typing import Any, Dict, Optional, Union
from urllib.parse import quote as _uriquote
from weakref import WeakValueDictionary
import aiohttp
from aiohttp import BaseConnector, ClientSession, ClientWebSocketResponse, FormData
from multidict import CIMultiDictProxy
from dis_snek.api.http.http_requests import (
BotRequests,
ChannelRequests,
EmojiRequests,
GuildRequests,
InteractionRequests,
MemberRequests,
MessageRequests,
ReactionRequests,
StickerRequests,
ThreadRequests,
UserRequests,
WebhookRequests,
ScheduledEventsRequests,
)
from dis_snek.client.const import __py_version__, __repo_url__, __version__, logger_name, MISSING, Absent
from dis_snek.client.errors import DiscordError, Forbidden, GatewayNotFound, HTTPException, NotFound, LoginError
from dis_snek.client.utils.input_utils import response_decode
from dis_snek.client.utils.serializer import dict_filter_missing
from dis_snek.models import CooldownSystem
from .route import Route
__all__ = ["HTTPClient"]
log = logging.getLogger(logger_name)
class GlobalLock:
"""Manages the global ratelimit"""
def __init__(self) -> None:
self.cooldown_system: CooldownSystem = CooldownSystem(
45, 1
) # global rate-limit is 50 per second, conservatively we use 45
self._lock: asyncio.Lock = asyncio.Lock()
async def rate_limit(self) -> None:
async with self._lock:
while not self.cooldown_system.acquire_token():
await asyncio.sleep(self.cooldown_system.get_cooldown_time())
async def lock(self, delta: float) -> None:
"""
Lock the global lock for a given duration.
Args:
delta: The time to keep the lock acquired
"""
await self._lock.acquire()
await asyncio.sleep(delta)
self._lock.release()
class BucketLock:
"""Manages the ratelimit for each bucket"""
def __init__(self) -> None:
self._lock: asyncio.Lock = asyncio.Lock()
self.unlock_on_exit: bool = True
self.bucket_hash: Optional[str] = None
self.limit: int = -1
self.remaining: int = -1
self.delta: float = 0.0
def __repr__(self) -> str:
return f"<BucketLock: {self.bucket_hash or 'Generic'}>"
@property
def locked(self) -> bool:
"""Return True if lock is acquired."""
return self._lock.locked()
def unlock(self) -> None:
"""Unlock this bucket."""
self._lock.release()
def ingest_ratelimit_header(self, header: CIMultiDictProxy) -> None:
"""
Ingests a discord rate limit header to configure this bucket lock.
Args:
header: A header from a http response
"""
self.bucket_hash = header.get("x-ratelimit-bucket")
self.limit = int(header.get("x-ratelimit-limit") or -1)
self.remaining = int(header.get("x-ratelimit-remaining") or -1)
self.delta = float(header.get("x-ratelimit-reset-after", 0.0))
async def blind_defer_unlock(self) -> None:
"""Unlocks the BucketLock but doesn't wait for completion."""
self.unlock_on_exit = False
loop = asyncio.get_running_loop()
loop.call_later(self.delta, self.unlock)
async def defer_unlock(self) -> None:
"""Unlocks the BucketLock after a specified delay."""
self.unlock_on_exit = False
await asyncio.sleep(self.delta)
self.unlock()
async def __aenter__(self) -> None:
await self._lock.acquire()
async def __aexit__(self, *args) -> None:
if self.unlock_on_exit and self._lock.locked():
self.unlock()
self.unlock_on_exit = True
class HTTPClient(
BotRequests,
ChannelRequests,
EmojiRequests,
GuildRequests,
InteractionRequests,
MemberRequests,
MessageRequests,
ReactionRequests,
StickerRequests,
ThreadRequests,
UserRequests,
WebhookRequests,
ScheduledEventsRequests,
):
"""A http client for sending requests to the Discord API."""
def __init__(self, connector: Optional[BaseConnector] = None, loop: Optional[asyncio.AbstractEventLoop] = None):
self.connector: Optional[BaseConnector] = connector
self.loop = asyncio.get_event_loop() if loop is None else loop
self.__session: Absent[Optional[ClientSession]] = MISSING
self.token: Optional[str] = None
self.global_lock: GlobalLock = GlobalLock()
self._max_attempts: int = 3
self.ratelimit_locks: WeakValueDictionary[str, BucketLock] = WeakValueDictionary()
self._endpoints = {}
self.user_agent: str = (
f"DiscordBot ({__repo_url__} {__version__} Python/{__py_version__}) aiohttp/{aiohttp.__version__}"
)
def __del__(self):
if self.__session and not self.__session.closed:
self.loop.run_until_complete(self.__session.close())
def get_ratelimit(self, route: Route) -> BucketLock:
"""
Get a route's rate limit bucket.
Args:
route: The route to fetch the ratelimit bucket for
Returns:
The BucketLock object for this route
"""
if bucket_hash := self._endpoints.get(route.rl_bucket):
# we have seen this route before, we know which bucket it is associated with
lock = self.ratelimit_locks.get(bucket_hash)
if lock:
# if we have an active lock on this route, it'll still be in the cache
# return that lock
return lock
# if no cached lock exists, return a new lock
return BucketLock()
def ingest_ratelimit(self, route: Route, header: CIMultiDictProxy, bucket_lock: BucketLock) -> None:
"""
Ingests a ratelimit header from discord to determine ratelimit.
Args:
route: The route we're ingesting ratelimit for
header: The rate limit header in question
bucket_lock: The rate limit bucket for this route
"""
bucket_lock.ingest_ratelimit_header(header)
if bucket_lock.bucket_hash:
# We only ever try and cache the bucket if the bucket hash has been set (ignores unlimited endpoints)
log.debug(f"Caching ingested rate limit data for: {bucket_lock.bucket_hash}")
self._endpoints[route.rl_bucket] = bucket_lock.bucket_hash
self.ratelimit_locks[bucket_lock.bucket_hash] = bucket_lock
async def request(
self,
route: Route,
data: Absent[Union[dict, FormData]] = MISSING,
reason: Absent[str] = MISSING,
**kwargs: Dict[str, Any],
) -> Any:
"""
Make a request to discord.
parameters:
route: The route to take
json: A json payload to send in the request
reason: Attach a reason to this request, used for audit logs
"""
# Assemble headers
kwargs["headers"] = {"User-Agent": self.user_agent}
if self.token:
kwargs["headers"]["Authorization"] = f"Bot {self.token}"
if reason not in (None, MISSING):
kwargs["headers"]["X-Audit-Log-Reason"] = _uriquote(reason, safe="/ ")
if isinstance(data, (list, dict)):
kwargs["headers"]["Content-Type"] = "application/json"
# sanity check payload
if isinstance(data, list):
kwargs["json"] = [dict_filter_missing(x) if isinstance(x, dict) else x for x in data]
elif isinstance(data, dict):
kwargs["json"] = dict_filter_missing(data)
elif isinstance(data, FormData):
kwargs["data"] = data
lock = self.get_ratelimit(route)
# this gets a BucketLock for this route.
# If this endpoint has been used before, it will get an existing ratelimit for the respective buckethash
# otherwise a brand-new bucket lock will be returned
for attempt in range(self._max_attempts):
async with lock:
try:
await self.global_lock.rate_limit()
# prevent us exceeding the global rate limit by throttling http requests
if self.__session.closed:
await self.login(self.token)
async with self.__session.request(route.method, route.url, **kwargs) as response:
result = await response_decode(response)
self.ingest_ratelimit(route, response.headers, lock)
if response.status == 429:
# ratelimit exceeded
if result.get("global", False):
# if we get a global, that's pretty bad, this would usually happen if the user is hitting the api from 2 clients sharing a token
log.error(
f"Bot has exceeded global ratelimit, locking REST API for {result.get('retry_after')} seconds"
)
await self.global_lock.lock(float(result.get("retry_after")))
continue
else:
# 429's are unfortunately unavoidable, but we can attempt to avoid them
# so long as these are infrequent we're doing well
log.warning(
f"{route.endpoint} Has exceeded it's ratelimit ({lock.limit})! Reset in {lock.delta} seconds"
)
await lock.defer_unlock() # lock this route and wait for unlock
continue
elif lock.remaining == 0:
# Last call available in the bucket, lock until reset
log.debug(
f"{route.endpoint} Has exhausted its ratelimit ({lock.limit})! Locking route for {lock.delta} seconds"
)
await lock.blind_defer_unlock() # lock this route, but continue processing the current response
elif response.status in {500, 502, 504}:
# Server issues, retry
log.warning(
f"{route.endpoint} Received {response.status}... retrying in {1 + attempt * 2} seconds"
)
await asyncio.sleep(1 + attempt * 2)
continue
if not 300 > response.status >= 200:
await self._raise_exception(response, route, result)
log.debug(
f"{route.endpoint} Received {response.status} :: [{lock.remaining}/{lock.limit} calls remaining]"
)
return result
except OSError as e:
if attempt < self._max_attempts - 1 and e.errno in (54, 10054):
await asyncio.sleep(1 + attempt * 2)
continue
raise
async def _raise_exception(self, response, route, result):
log.error(f"{route.method}::{route.url}: {response.status}")
if response.status == 403:
raise Forbidden(response, response_data=result, route=route)
elif response.status == 404:
raise NotFound(response, response_data=result, route=route)
elif response.status >= 500:
raise DiscordError(response, response_data=result, route=route)
else:
raise HTTPException(response, response_data=result, route=route)
async def request_cdn(self, url, asset) -> bytes:
log.debug(f"{asset} requests {url} from CDN")
async with self.__session.get(url) as response:
if response.status == 200:
return await response.read()
await self._raise_exception(response, asset, await response_decode(response))
async def login(self, token: str) -> dict:
"""
"Login" to the gateway, basically validates the token and grabs user data.
parameters:
token: the token to use
returns:
The currently logged in bot's data
"""
self.__session = ClientSession(connector=self.connector)
self.token = token
try:
return await self.request(Route("GET", "/users/@me"))
except HTTPException as e:
if e.status == 401:
raise LoginError("An improper token was passed") from e
raise
async def close(self) -> None:
"""Close the session."""
if self.__session:
await self.__session.close()
async def get_gateway(self) -> str:
"""Get the gateway url."""
try:
data: dict = await self.request(Route("GET", "/gateway"))
except HTTPException as exc:
raise GatewayNotFound from exc
return "{0}?encoding={1}&v=9&compress=zlib-stream".format(data["url"], "json")
async def websocket_connect(self, url: str) -> ClientWebSocketResponse:
"""
Connect to the websocket.
parameters:
url: the url to connect to
"""
return await self.__session.ws_connect(
url, timeout=30, max_msg_size=0, autoclose=False, headers={"User-Agent": self.user_agent}, compress=0
)
| en | 0.866382 | This file handles the interaction with discords http endpoints. Manages the global ratelimit # global rate-limit is 50 per second, conservatively we use 45 Lock the global lock for a given duration. Args: delta: The time to keep the lock acquired Manages the ratelimit for each bucket Return True if lock is acquired. Unlock this bucket. Ingests a discord rate limit header to configure this bucket lock. Args: header: A header from a http response Unlocks the BucketLock but doesn't wait for completion. Unlocks the BucketLock after a specified delay. A http client for sending requests to the Discord API. Get a route's rate limit bucket. Args: route: The route to fetch the ratelimit bucket for Returns: The BucketLock object for this route # we have seen this route before, we know which bucket it is associated with # if we have an active lock on this route, it'll still be in the cache # return that lock # if no cached lock exists, return a new lock Ingests a ratelimit header from discord to determine ratelimit. Args: route: The route we're ingesting ratelimit for header: The rate limit header in question bucket_lock: The rate limit bucket for this route # We only ever try and cache the bucket if the bucket hash has been set (ignores unlimited endpoints) Make a request to discord. parameters: route: The route to take json: A json payload to send in the request reason: Attach a reason to this request, used for audit logs # Assemble headers # sanity check payload # this gets a BucketLock for this route. # If this endpoint has been used before, it will get an existing ratelimit for the respective buckethash # otherwise a brand-new bucket lock will be returned # prevent us exceeding the global rate limit by throttling http requests # ratelimit exceeded # if we get a global, that's pretty bad, this would usually happen if the user is hitting the api from 2 clients sharing a token # 429's are unfortunately unavoidable, but we can attempt to avoid them # so long as these are infrequent we're doing well # lock this route and wait for unlock # Last call available in the bucket, lock until reset # lock this route, but continue processing the current response # Server issues, retry "Login" to the gateway, basically validates the token and grabs user data. parameters: token: the token to use returns: The currently logged in bot's data Close the session. Get the gateway url. Connect to the websocket. parameters: url: the url to connect to | 2.124131 | 2 |
config.py | conradsuuna/uac-computer-competency | 0 | 638 | from os import environ
import psycopg2
from datetime import timedelta
from dotenv import load_dotenv
load_dotenv()
class Config(object):
""" app configuration class """
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = environ.get('SECRET_KEY')
USER = environ.get('DB_USER')
PASSWORD = environ.get('DB_PASSWORD')
DB_NAME = environ.get('DB_NAME')
HOST = environ.get('DB_HOST')
SQLALCHEMY_DATABASE_URI = f"postgresql://{USER}:{PASSWORD}@{HOST}/{DB_NAME}"
SQLALCHEMY_TRACK_MODIFICATIONS = False
# jwt configuarations for the user auth api
JWT_SECRET_KEY = environ.get('SECRET_KEY')
JWT_ACCESS_TOKEN_EXPIRES = timedelta(days=1)
# pagination
NUM_OF_ITEMS_PER_PAGE = 18
class DevelopmentConfig(Config):
""" app development configuration class """
ENV = "development"
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
class ProductionConfig(Config):
DEBUG = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
| from os import environ
import psycopg2
from datetime import timedelta
from dotenv import load_dotenv
load_dotenv()
class Config(object):
""" app configuration class """
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = environ.get('SECRET_KEY')
USER = environ.get('DB_USER')
PASSWORD = environ.get('DB_PASSWORD')
DB_NAME = environ.get('DB_NAME')
HOST = environ.get('DB_HOST')
SQLALCHEMY_DATABASE_URI = f"postgresql://{USER}:{PASSWORD}@{HOST}/{DB_NAME}"
SQLALCHEMY_TRACK_MODIFICATIONS = False
# jwt configuarations for the user auth api
JWT_SECRET_KEY = environ.get('SECRET_KEY')
JWT_ACCESS_TOKEN_EXPIRES = timedelta(days=1)
# pagination
NUM_OF_ITEMS_PER_PAGE = 18
class DevelopmentConfig(Config):
""" app development configuration class """
ENV = "development"
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
class ProductionConfig(Config):
DEBUG = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
| en | 0.626592 | app configuration class # jwt configuarations for the user auth api # pagination app development configuration class | 2.229058 | 2 |
electrum/version.py | c4pt000/electrum-radiocoin | 0 | 639 | <filename>electrum/version.py
ELECTRUM_VERSION = '4.1.5-radc' # version of the client package
APK_VERSION = '4.1.5.0' # read by buildozer.spec
PROTOCOL_VERSION = '1.4' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Standard wallet
SEED_PREFIX_SW = '100' # Segwit wallet
SEED_PREFIX_2FA = '101' # Two-factor authentication
SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit
def seed_prefix(seed_type):
if seed_type == 'standard':
return SEED_PREFIX
elif seed_type == 'segwit':
return SEED_PREFIX_SW
elif seed_type == '2fa':
return SEED_PREFIX_2FA
elif seed_type == '2fa_segwit':
return SEED_PREFIX_2FA_SW
raise Exception(f"unknown seed_type: {seed_type}")
| <filename>electrum/version.py
ELECTRUM_VERSION = '4.1.5-radc' # version of the client package
APK_VERSION = '4.1.5.0' # read by buildozer.spec
PROTOCOL_VERSION = '1.4' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Standard wallet
SEED_PREFIX_SW = '100' # Segwit wallet
SEED_PREFIX_2FA = '101' # Two-factor authentication
SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit
def seed_prefix(seed_type):
if seed_type == 'standard':
return SEED_PREFIX
elif seed_type == 'segwit':
return SEED_PREFIX_SW
elif seed_type == '2fa':
return SEED_PREFIX_2FA
elif seed_type == '2fa_segwit':
return SEED_PREFIX_2FA_SW
raise Exception(f"unknown seed_type: {seed_type}")
| en | 0.831217 | # version of the client package # read by buildozer.spec # protocol version requested # The hash of the mnemonic seed must begin with this # Standard wallet # Segwit wallet # Two-factor authentication # Two-factor auth, using segwit | 1.874356 | 2 |
lib/layers/functions/prior_box.py | arleyzhang/object-detection-pytorch | 4 | 640 | from __future__ import division
from math import sqrt as sqrt
from itertools import product as product
import torch
import numpy as np
import cv2
from lib.utils.visualize_utils import TBWriter
def vis(func):
"""tensorboard visualization if has writer as input"""
def wrapper(*args, **kw):
return func(*args, **kw) if kw['tb_writer'] is not None else None
return wrapper
class PriorBoxBase(object):
"""Compute priorbox coordinates in center-offset form for each source
feature map.
"""
def __init__(self, cfg):
super(PriorBoxBase, self).__init__()
self.image_size = cfg.MODEL.IMAGE_SIZE
self._steps = cfg.MODEL.STEPS
self._cfg_list = []
self._prior_cfg = {}
self._clip = cfg.MODEL.CLIP
self._variance = cfg.MODEL.VARIANCE
for v in self._variance:
if v <= 0:
raise ValueError('Variances must be greater than 0')
def _setup(self, cfg):
num_feat = len(self._steps)
for item in self._cfg_list:
if item not in cfg.MODEL:
raise Exception("wrong anchor config!")
if len(cfg.MODEL[item]) != num_feat and len(cfg.MODEL[item]) != 0:
raise Exception("config {} length does not match step length!".format(item))
self._prior_cfg[item] = cfg.MODEL[item]
@property
def num_priors(self):
"""allow prior num calculation before knowing feature map size"""
assert self._prior_cfg is not {}
return [int(len(self._create_prior(0, 0, k)) / 4) for k in range(len(self._steps))]
def _create_prior(self, cx, cy, k):
raise NotImplementedError
@vis
def _image_proc(self, image=None, tb_writer=None):
# TODO test with image
if isinstance(image, type(None)):
image = np.ones((self.image_size[1], self.image_size[0], 3))
elif isinstance(image, str):
image = cv2.imread(image, -1)
image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
return image
@vis
def _prior_vis(self, anchor, image_ori, feat_idx, tb_writer=None):
# TODO add output path to the signature
writer = tb_writer.writer
prior_num = self.num_priors[feat_idx]
# transform coordinates
scale = [self.image_size[1], self.image_size[0], self.image_size[1], self.image_size[0]]
bboxs = np.array(anchor).reshape((-1, 4))
box_centers = bboxs[:, :2] * scale[:2] # [x, y]
# bboxs: [xmin, ymin, xmax, ymax]
bboxs = np.hstack((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2)) * scale
box_centers = box_centers.astype(np.int32)
bboxs = bboxs.astype(np.int32)
# visualize each anchor box on a feature map
for prior_idx in range(prior_num):
image = image_ori.copy()
bboxs_ = bboxs[prior_idx::prior_num, :]
box_centers_ = box_centers[4 * prior_idx::prior_num, :]
for archor, bbox in zip(box_centers_, bboxs_):
cv2.circle(image, (archor[0], archor[1]), 1, (0, 0, 255), -1)
if archor[0] == archor[1]: # only show diagnal anchors
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1)
image = image[..., ::-1]
image = image.transpose((2,0,1))
writer.add_image('base/feature_map_{}_{}'.format(feat_idx, prior_idx), image, 2)
def forward(self, layer_dims, tb_writer=None, image=None):
priors = []
image = self._image_proc(image=image, tb_writer=tb_writer)
for k in range(len(layer_dims)):
prior = []
for i, j in product(range(layer_dims[k][0]), range(layer_dims[k][1])):
steps_x = self.image_size[1] / self._steps[k]
steps_y = self.image_size[0] / self._steps[k]
cx = (j + 0.5) / steps_x # unit center x,y
cy = (i + 0.5) / steps_y
prior += self._create_prior(cx, cy, k)
priors += prior
self._prior_vis(prior, image, k, tb_writer=tb_writer)
output = torch.Tensor(priors).view(-1, 4)
# TODO this clip is meanless, should clip on [xmin, ymin, xmax, ymax]
if self._clip:
output.clamp_(max=1, min=0)
return output
class PriorBoxSSD(PriorBoxBase):
def __init__(self, cfg):
super(PriorBoxSSD, self).__init__(cfg)
# self.image_size = cfg['image_size']
self._cfg_list = ['MIN_SIZES', 'MAX_SIZES', 'ASPECT_RATIOS']
self._flip = cfg.MODEL.FLIP
self._setup(cfg)
def _create_prior(self, cx, cy, k):
# as the original paper do
prior = []
min_sizes = self._prior_cfg['MIN_SIZES'][k]
min_sizes = [min_sizes] if not isinstance(min_sizes, list) else min_sizes
for ms in min_sizes:
# min square
s_i = ms / self.image_size[0]
s_j = ms / self.image_size[1]
prior += [cx, cy, s_j, s_i]
# min max square
if len(self._prior_cfg['MAX_SIZES']) != 0:
assert type(self._prior_cfg['MAX_SIZES'][k]) is not list # one max size per layer
s_i_prime = sqrt(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))
s_j_prime = sqrt(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))
prior += [cx, cy, s_j_prime, s_i_prime]
# rectangles by min and aspect ratio
for ar in self._prior_cfg['ASPECT_RATIOS'][k]:
prior += [cx, cy, s_j * sqrt(ar), s_i / sqrt(ar)] # a vertical box
if self._flip:
prior += [cx, cy, s_j / sqrt(ar), s_i * sqrt(ar)]
return prior
# PriorBox = PriorBoxSSD
def test_no_vis(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [[30], [60], 111, 162, 213, 264]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
print(p.num_priors)
p1 = p.forward(feat_dim)
print(p1)
def test_filp(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
cfg['flip'] = False
cfg['aspect_ratios'] = [[2, 1 / 2], [2, 1 / 2, 3, 1 / 3], [2, 1 / 2, 3, 1 / 3],
[2, 1 / 2, 3, 1 / 3], [2, 1 / 2], [2, 1 / 2]]
p = PriorBox(cfg)
p2 = p.forward(feat_dim, tb_writer=tb_writer)
# print(p2)
assert (p2 - p1).sum() < 1e-8
def test_rectangle(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [30, 60, 111, 162, 213, 264]
cfg['flip'] = True
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
# cfg['image_size'] = [300, 300]
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], [item * 2 for item in cfg['feature_maps']])]
# cfg['image_size'] = [300, 600]
feat_dim = [list(a) for a in zip([item * 2 for item in cfg['feature_maps']], cfg['feature_maps'])]
cfg['image_size'] = [600, 300]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
print(p1.shape)
if __name__ == '__main__':
import copy
# from lib.datasets.config import ssd_voc_vgg as cfg
# from lib.utils.visualize_utils import TBWriter
# tb_writer = TBWriter(log_dir, {'epoch': 50})
#
# test_no_vis(cfg, tb_writer)
# test_filp(cfg, tb_writer)
# test_rectangle(cfg, tb_writer)
print('haha')
from lib.utils.config import cfg
print(cfg)
| from __future__ import division
from math import sqrt as sqrt
from itertools import product as product
import torch
import numpy as np
import cv2
from lib.utils.visualize_utils import TBWriter
def vis(func):
"""tensorboard visualization if has writer as input"""
def wrapper(*args, **kw):
return func(*args, **kw) if kw['tb_writer'] is not None else None
return wrapper
class PriorBoxBase(object):
"""Compute priorbox coordinates in center-offset form for each source
feature map.
"""
def __init__(self, cfg):
super(PriorBoxBase, self).__init__()
self.image_size = cfg.MODEL.IMAGE_SIZE
self._steps = cfg.MODEL.STEPS
self._cfg_list = []
self._prior_cfg = {}
self._clip = cfg.MODEL.CLIP
self._variance = cfg.MODEL.VARIANCE
for v in self._variance:
if v <= 0:
raise ValueError('Variances must be greater than 0')
def _setup(self, cfg):
num_feat = len(self._steps)
for item in self._cfg_list:
if item not in cfg.MODEL:
raise Exception("wrong anchor config!")
if len(cfg.MODEL[item]) != num_feat and len(cfg.MODEL[item]) != 0:
raise Exception("config {} length does not match step length!".format(item))
self._prior_cfg[item] = cfg.MODEL[item]
@property
def num_priors(self):
"""allow prior num calculation before knowing feature map size"""
assert self._prior_cfg is not {}
return [int(len(self._create_prior(0, 0, k)) / 4) for k in range(len(self._steps))]
def _create_prior(self, cx, cy, k):
raise NotImplementedError
@vis
def _image_proc(self, image=None, tb_writer=None):
# TODO test with image
if isinstance(image, type(None)):
image = np.ones((self.image_size[1], self.image_size[0], 3))
elif isinstance(image, str):
image = cv2.imread(image, -1)
image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
return image
@vis
def _prior_vis(self, anchor, image_ori, feat_idx, tb_writer=None):
# TODO add output path to the signature
writer = tb_writer.writer
prior_num = self.num_priors[feat_idx]
# transform coordinates
scale = [self.image_size[1], self.image_size[0], self.image_size[1], self.image_size[0]]
bboxs = np.array(anchor).reshape((-1, 4))
box_centers = bboxs[:, :2] * scale[:2] # [x, y]
# bboxs: [xmin, ymin, xmax, ymax]
bboxs = np.hstack((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2)) * scale
box_centers = box_centers.astype(np.int32)
bboxs = bboxs.astype(np.int32)
# visualize each anchor box on a feature map
for prior_idx in range(prior_num):
image = image_ori.copy()
bboxs_ = bboxs[prior_idx::prior_num, :]
box_centers_ = box_centers[4 * prior_idx::prior_num, :]
for archor, bbox in zip(box_centers_, bboxs_):
cv2.circle(image, (archor[0], archor[1]), 1, (0, 0, 255), -1)
if archor[0] == archor[1]: # only show diagnal anchors
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1)
image = image[..., ::-1]
image = image.transpose((2,0,1))
writer.add_image('base/feature_map_{}_{}'.format(feat_idx, prior_idx), image, 2)
def forward(self, layer_dims, tb_writer=None, image=None):
priors = []
image = self._image_proc(image=image, tb_writer=tb_writer)
for k in range(len(layer_dims)):
prior = []
for i, j in product(range(layer_dims[k][0]), range(layer_dims[k][1])):
steps_x = self.image_size[1] / self._steps[k]
steps_y = self.image_size[0] / self._steps[k]
cx = (j + 0.5) / steps_x # unit center x,y
cy = (i + 0.5) / steps_y
prior += self._create_prior(cx, cy, k)
priors += prior
self._prior_vis(prior, image, k, tb_writer=tb_writer)
output = torch.Tensor(priors).view(-1, 4)
# TODO this clip is meanless, should clip on [xmin, ymin, xmax, ymax]
if self._clip:
output.clamp_(max=1, min=0)
return output
class PriorBoxSSD(PriorBoxBase):
def __init__(self, cfg):
super(PriorBoxSSD, self).__init__(cfg)
# self.image_size = cfg['image_size']
self._cfg_list = ['MIN_SIZES', 'MAX_SIZES', 'ASPECT_RATIOS']
self._flip = cfg.MODEL.FLIP
self._setup(cfg)
def _create_prior(self, cx, cy, k):
# as the original paper do
prior = []
min_sizes = self._prior_cfg['MIN_SIZES'][k]
min_sizes = [min_sizes] if not isinstance(min_sizes, list) else min_sizes
for ms in min_sizes:
# min square
s_i = ms / self.image_size[0]
s_j = ms / self.image_size[1]
prior += [cx, cy, s_j, s_i]
# min max square
if len(self._prior_cfg['MAX_SIZES']) != 0:
assert type(self._prior_cfg['MAX_SIZES'][k]) is not list # one max size per layer
s_i_prime = sqrt(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))
s_j_prime = sqrt(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))
prior += [cx, cy, s_j_prime, s_i_prime]
# rectangles by min and aspect ratio
for ar in self._prior_cfg['ASPECT_RATIOS'][k]:
prior += [cx, cy, s_j * sqrt(ar), s_i / sqrt(ar)] # a vertical box
if self._flip:
prior += [cx, cy, s_j / sqrt(ar), s_i * sqrt(ar)]
return prior
# PriorBox = PriorBoxSSD
def test_no_vis(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [[30], [60], 111, 162, 213, 264]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
print(p.num_priors)
p1 = p.forward(feat_dim)
print(p1)
def test_filp(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
cfg['flip'] = False
cfg['aspect_ratios'] = [[2, 1 / 2], [2, 1 / 2, 3, 1 / 3], [2, 1 / 2, 3, 1 / 3],
[2, 1 / 2, 3, 1 / 3], [2, 1 / 2], [2, 1 / 2]]
p = PriorBox(cfg)
p2 = p.forward(feat_dim, tb_writer=tb_writer)
# print(p2)
assert (p2 - p1).sum() < 1e-8
def test_rectangle(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [30, 60, 111, 162, 213, 264]
cfg['flip'] = True
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
# cfg['image_size'] = [300, 300]
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], [item * 2 for item in cfg['feature_maps']])]
# cfg['image_size'] = [300, 600]
feat_dim = [list(a) for a in zip([item * 2 for item in cfg['feature_maps']], cfg['feature_maps'])]
cfg['image_size'] = [600, 300]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
print(p1.shape)
if __name__ == '__main__':
import copy
# from lib.datasets.config import ssd_voc_vgg as cfg
# from lib.utils.visualize_utils import TBWriter
# tb_writer = TBWriter(log_dir, {'epoch': 50})
#
# test_no_vis(cfg, tb_writer)
# test_filp(cfg, tb_writer)
# test_rectangle(cfg, tb_writer)
print('haha')
from lib.utils.config import cfg
print(cfg)
| en | 0.541816 | tensorboard visualization if has writer as input Compute priorbox coordinates in center-offset form for each source feature map. allow prior num calculation before knowing feature map size # TODO test with image # TODO add output path to the signature # transform coordinates # [x, y] # bboxs: [xmin, ymin, xmax, ymax] # visualize each anchor box on a feature map # only show diagnal anchors # unit center x,y # TODO this clip is meanless, should clip on [xmin, ymin, xmax, ymax] # self.image_size = cfg['image_size'] # as the original paper do # min square # min max square # one max size per layer # rectangles by min and aspect ratio # a vertical box # PriorBox = PriorBoxSSD # print(p2) # feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])] # cfg['image_size'] = [300, 300] # feat_dim = [list(a) for a in zip(cfg['feature_maps'], [item * 2 for item in cfg['feature_maps']])] # cfg['image_size'] = [300, 600] # from lib.datasets.config import ssd_voc_vgg as cfg # from lib.utils.visualize_utils import TBWriter # tb_writer = TBWriter(log_dir, {'epoch': 50}) # # test_no_vis(cfg, tb_writer) # test_filp(cfg, tb_writer) # test_rectangle(cfg, tb_writer) | 2.141236 | 2 |
python/Gaffer/SequencePath.py | cwmartin/gaffer | 0 | 641 | ##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
class SequencePath( Gaffer.Path ) :
def __init__( self, path, root="/", minSequenceSize=1, filter=None ) :
if not isinstance( path, Gaffer.Path ) :
path = Gaffer.FileSystemPath( path, root )
Gaffer.Path.__init__( self, path[:], path.root(), filter=filter )
# we use the seed for creating base paths whenever we need them
self.__basePathSeed = path
self.__minSequenceSize = minSequenceSize
def isValid( self ) :
for p in self.__basePaths() :
if not p.isValid() :
return False
return True
def isLeaf( self ) :
for p in self.__basePaths() :
if not p.isLeaf() :
return False
return True
def info( self ) :
result = Gaffer.Path.info( self )
if result is None :
return None
def average( values ) :
return sum( values ) / len( values )
def mostCommon( values ) :
counter = {}
for value in values :
if value in counter :
counter[value] += 1
else :
counter[value] = 1
maxCount = 0
mostCommonValue = None
for value, count in counter.items() :
if count > maxCount :
mostCommonValue = value
maxCount = count
return mostCommonValue
combiners = {
"fileSystem:owner" : mostCommon,
"fileSystem:group" : mostCommon,
"fileSystem:modificationTime" : max,
"fileSystem:accessTime" : max,
"fileSystem:size" : sum,
}
infos = [ path.info() for path in self.__basePaths() ]
if len( infos ) :
for key, exampleValue in infos[0].items() :
if key in result :
continue
combiner = combiners.get( key, None )
if combiner is None :
if isinstance( exampleValue, ( int, float ) ) :
combiner = average
elif isinstance( exampleValue, basestring ) :
combiner = mostCommon
if combiner is not None :
values = [ i[key] for i in infos ]
result[key] = combiner( values )
return result
def _children( self ) :
p = self.__basePath( self )
children = p.children()
nonLeafPaths = []
leafPathStrings = []
for child in children :
if child.isLeaf() :
leafPathStrings.append( str( child ) )
else :
nonLeafPaths.append( child )
sequences = IECore.findSequences( leafPathStrings, self.__minSequenceSize )
result = []
for path in sequences + nonLeafPaths :
result.append( SequencePath( self.__basePath( str( path ) ), minSequenceSize=self.__minSequenceSize, filter = self.getFilter() ) )
return result
def copy( self ) :
result = SequencePath( self.__basePathSeed, minSequenceSize = self.__minSequenceSize, filter = self.getFilter() )
result.setFromPath( self )
return result
def __basePath( self, path ) :
result = self.__basePathSeed.copy()
if isinstance( path, basestring ) :
result.setFromString( path )
else :
result.setFromPath( path )
return result
def __basePaths( self ) :
sequence = None
with IECore.IgnoredExceptions( Exception ) :
sequence = IECore.FileSequence( str( self ) )
result = []
if sequence :
for f in sequence.fileNames() :
result.append( self.__basePath( f ) )
else :
result.append( self.__basePath( self ) )
return result
def __isSequence( self ) :
s = str( self )
if IECore.FileSequence.fileNameValidator().match( s ) :
return True
return False
| ##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
class SequencePath( Gaffer.Path ) :
def __init__( self, path, root="/", minSequenceSize=1, filter=None ) :
if not isinstance( path, Gaffer.Path ) :
path = Gaffer.FileSystemPath( path, root )
Gaffer.Path.__init__( self, path[:], path.root(), filter=filter )
# we use the seed for creating base paths whenever we need them
self.__basePathSeed = path
self.__minSequenceSize = minSequenceSize
def isValid( self ) :
for p in self.__basePaths() :
if not p.isValid() :
return False
return True
def isLeaf( self ) :
for p in self.__basePaths() :
if not p.isLeaf() :
return False
return True
def info( self ) :
result = Gaffer.Path.info( self )
if result is None :
return None
def average( values ) :
return sum( values ) / len( values )
def mostCommon( values ) :
counter = {}
for value in values :
if value in counter :
counter[value] += 1
else :
counter[value] = 1
maxCount = 0
mostCommonValue = None
for value, count in counter.items() :
if count > maxCount :
mostCommonValue = value
maxCount = count
return mostCommonValue
combiners = {
"fileSystem:owner" : mostCommon,
"fileSystem:group" : mostCommon,
"fileSystem:modificationTime" : max,
"fileSystem:accessTime" : max,
"fileSystem:size" : sum,
}
infos = [ path.info() for path in self.__basePaths() ]
if len( infos ) :
for key, exampleValue in infos[0].items() :
if key in result :
continue
combiner = combiners.get( key, None )
if combiner is None :
if isinstance( exampleValue, ( int, float ) ) :
combiner = average
elif isinstance( exampleValue, basestring ) :
combiner = mostCommon
if combiner is not None :
values = [ i[key] for i in infos ]
result[key] = combiner( values )
return result
def _children( self ) :
p = self.__basePath( self )
children = p.children()
nonLeafPaths = []
leafPathStrings = []
for child in children :
if child.isLeaf() :
leafPathStrings.append( str( child ) )
else :
nonLeafPaths.append( child )
sequences = IECore.findSequences( leafPathStrings, self.__minSequenceSize )
result = []
for path in sequences + nonLeafPaths :
result.append( SequencePath( self.__basePath( str( path ) ), minSequenceSize=self.__minSequenceSize, filter = self.getFilter() ) )
return result
def copy( self ) :
result = SequencePath( self.__basePathSeed, minSequenceSize = self.__minSequenceSize, filter = self.getFilter() )
result.setFromPath( self )
return result
def __basePath( self, path ) :
result = self.__basePathSeed.copy()
if isinstance( path, basestring ) :
result.setFromString( path )
else :
result.setFromPath( path )
return result
def __basePaths( self ) :
sequence = None
with IECore.IgnoredExceptions( Exception ) :
sequence = IECore.FileSequence( str( self ) )
result = []
if sequence :
for f in sequence.fileNames() :
result.append( self.__basePath( f ) )
else :
result.append( self.__basePath( self ) )
return result
def __isSequence( self ) :
s = str( self )
if IECore.FileSequence.fileNameValidator().match( s ) :
return True
return False
| en | 0.631856 | ########################################################################## # # Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of <NAME> nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## # we use the seed for creating base paths whenever we need them | 1.215576 | 1 |
reo/migrations/0121_merge_20211001_1841.py | NREL/REopt_API | 7 | 642 | # Generated by Django 3.1.13 on 2021-10-01 18:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reo', '0117_financialmodel_generator_fuel_escalation_pct'),
('reo', '0120_auto_20210927_2046'),
('reo', '0121_auto_20211012_0305')
]
operations = [
]
| # Generated by Django 3.1.13 on 2021-10-01 18:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reo', '0117_financialmodel_generator_fuel_escalation_pct'),
('reo', '0120_auto_20210927_2046'),
('reo', '0121_auto_20211012_0305')
]
operations = [
]
| en | 0.841837 | # Generated by Django 3.1.13 on 2021-10-01 18:41 | 1.361027 | 1 |
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py | ckamtsikis/cmssw | 852 | 643 | from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3
import PhysicsTools.HeppyCore.framework.config as cfg
class TauAnalyzer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)
#----------------------------------------
# DECLARATION OF HANDLES OF LEPTONS STUFF
#----------------------------------------
def declareHandles(self):
super(TauAnalyzer, self).declareHandles()
self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>')
def beginLoop(self, setup):
super(TauAnalyzer,self).beginLoop(setup)
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('has >=1 tau at preselection')
count.register('has >=1 selected taus')
count.register('has >=1 other taus')
#------------------
# MAKE LEPTON LISTS
#------------------
def makeTaus(self, event):
event.inclusiveTaus = []
event.selectedTaus = []
event.otherTaus = []
#get all
alltaus = map( Tau, self.handles['taus'].product() )
#make inclusive taus
for tau in alltaus:
tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0]
tau.lepVeto = False
tau.idDecayMode = tau.tauID("decayModeFinding")
tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs")
if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID):
continue
tau.inclusive_lepVeto = False
if self.cfg_ana.inclusive_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR:
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if self.cfg_ana.inclusive_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID):
tau.inclusive_lepVeto = True
if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID):
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if tau.pt() < self.cfg_ana.inclusive_ptMin: continue
if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue
if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue
def id3(tau,X):
"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""
return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight")
def id5(tau,X):
"""Create an integer equal to 1-2-3-4-5 for (very loose,
loose, medium, tight, very tight)"""
return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight")
def id6(tau,X):
"""Create an integer equal to 1-2-3-4-5-6 for (very loose,
loose, medium, tight, very tight, very very tight)"""
return id5(tau, X) + tau.tauID(X%"VVTight")
tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT")
tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT")
tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits")
tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3")
tau.idAntiE = id5(tau, "againstElectron%sMVA6")
#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))
if tau.tauID(self.cfg_ana.inclusive_tauID):
event.inclusiveTaus.append(tau)
for tau in event.inclusiveTaus:
tau.loose_lepVeto = False
if self.cfg_ana.loose_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR:
tau.loose_lepVeto = True
if self.cfg_ana.loose_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID):
tau.loose_lepVeto = True
if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID):
tau.loose_lepVeto = True
if tau.tauID(self.cfg_ana.loose_decayModeID) and \
tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \
abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \
tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto:
event.selectedTaus.append(tau)
else:
event.otherTaus.append(tau)
event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True)
event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)
event.otherTaus.sort(key = lambda l : l.pt(), reverse = True)
self.counters.counter('events').inc('all events')
if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection')
if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')
if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus')
def matchTaus(self, event):
match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5)
for lep in event.inclusiveTaus:
gen = match[lep]
lep.mcMatchId = 1 if gen else 0
lep.genp = gen
def process(self, event):
self.readCollections( event.input )
self.makeTaus(event)
if not self.cfg_comp.isMC:
return True
if hasattr(event, 'gentaus'):
self.matchTaus(event)
return True
# Find the definitions of the tau ID strings here:
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object = TauAnalyzer,
# inclusive very loose hadronic tau selection
inclusive_ptMin = 18,
inclusive_etaMax = 9999,
inclusive_dxyMax = 1000.,
inclusive_dzMax = 0.4,
inclusive_vetoLeptons = False,
inclusive_leptonVetoDR = 0.4,
inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
inclusive_tauID = "decayModeFindingNewDMs",
inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required
inclusive_tauAntiMuonID = "",
inclusive_tauAntiElectronID = "",
# loose hadronic tau selection
loose_ptMin = 18,
loose_etaMax = 9999,
loose_dxyMax = 1000.,
loose_dzMax = 0.2,
loose_vetoLeptons = True,
loose_leptonVetoDR = 0.4,
loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
loose_vetoLeptonsPOG = False, # If True, the following two IDs are required
loose_tauAntiMuonID = "againstMuonLoose3",
loose_tauAntiElectronID = "againstElectronLooseMVA5"
)
)
| from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3
import PhysicsTools.HeppyCore.framework.config as cfg
class TauAnalyzer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)
#----------------------------------------
# DECLARATION OF HANDLES OF LEPTONS STUFF
#----------------------------------------
def declareHandles(self):
super(TauAnalyzer, self).declareHandles()
self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>')
def beginLoop(self, setup):
super(TauAnalyzer,self).beginLoop(setup)
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('has >=1 tau at preselection')
count.register('has >=1 selected taus')
count.register('has >=1 other taus')
#------------------
# MAKE LEPTON LISTS
#------------------
def makeTaus(self, event):
event.inclusiveTaus = []
event.selectedTaus = []
event.otherTaus = []
#get all
alltaus = map( Tau, self.handles['taus'].product() )
#make inclusive taus
for tau in alltaus:
tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0]
tau.lepVeto = False
tau.idDecayMode = tau.tauID("decayModeFinding")
tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs")
if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID):
continue
tau.inclusive_lepVeto = False
if self.cfg_ana.inclusive_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR:
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if self.cfg_ana.inclusive_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID):
tau.inclusive_lepVeto = True
if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID):
tau.inclusive_lepVeto = True
if tau.inclusive_lepVeto: continue
if tau.pt() < self.cfg_ana.inclusive_ptMin: continue
if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue
if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue
def id3(tau,X):
"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""
return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight")
def id5(tau,X):
"""Create an integer equal to 1-2-3-4-5 for (very loose,
loose, medium, tight, very tight)"""
return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight")
def id6(tau,X):
"""Create an integer equal to 1-2-3-4-5-6 for (very loose,
loose, medium, tight, very tight, very very tight)"""
return id5(tau, X) + tau.tauID(X%"VVTight")
tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT")
tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT")
tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits")
tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3")
tau.idAntiE = id5(tau, "againstElectron%sMVA6")
#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))
if tau.tauID(self.cfg_ana.inclusive_tauID):
event.inclusiveTaus.append(tau)
for tau in event.inclusiveTaus:
tau.loose_lepVeto = False
if self.cfg_ana.loose_vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR:
tau.loose_lepVeto = True
if self.cfg_ana.loose_vetoLeptonsPOG:
if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID):
tau.loose_lepVeto = True
if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID):
tau.loose_lepVeto = True
if tau.tauID(self.cfg_ana.loose_decayModeID) and \
tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \
abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \
tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto:
event.selectedTaus.append(tau)
else:
event.otherTaus.append(tau)
event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True)
event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)
event.otherTaus.sort(key = lambda l : l.pt(), reverse = True)
self.counters.counter('events').inc('all events')
if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection')
if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')
if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus')
def matchTaus(self, event):
match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5)
for lep in event.inclusiveTaus:
gen = match[lep]
lep.mcMatchId = 1 if gen else 0
lep.genp = gen
def process(self, event):
self.readCollections( event.input )
self.makeTaus(event)
if not self.cfg_comp.isMC:
return True
if hasattr(event, 'gentaus'):
self.matchTaus(event)
return True
# Find the definitions of the tau ID strings here:
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object = TauAnalyzer,
# inclusive very loose hadronic tau selection
inclusive_ptMin = 18,
inclusive_etaMax = 9999,
inclusive_dxyMax = 1000.,
inclusive_dzMax = 0.4,
inclusive_vetoLeptons = False,
inclusive_leptonVetoDR = 0.4,
inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
inclusive_tauID = "decayModeFindingNewDMs",
inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required
inclusive_tauAntiMuonID = "",
inclusive_tauAntiElectronID = "",
# loose hadronic tau selection
loose_ptMin = 18,
loose_etaMax = 9999,
loose_dxyMax = 1000.,
loose_dzMax = 0.2,
loose_vetoLeptons = True,
loose_leptonVetoDR = 0.4,
loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
loose_vetoLeptonsPOG = False, # If True, the following two IDs are required
loose_tauAntiMuonID = "againstMuonLoose3",
loose_tauAntiElectronID = "againstElectronLooseMVA5"
)
)
| en | 0.593999 | #---------------------------------------- # DECLARATION OF HANDLES OF LEPTONS STUFF #---------------------------------------- #------------------ # MAKE LEPTON LISTS #------------------ #get all #make inclusive taus Create an integer equal to 1-2-3 for (loose,medium,tight) Create an integer equal to 1-2-3-4-5 for (very loose, loose, medium, tight, very tight) Create an integer equal to 1-2-3-4-5-6 for (very loose, loose, medium, tight, very tight, very very tight) #print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID)) # Find the definitions of the tau ID strings here: # http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py # inclusive very loose hadronic tau selection # ignored if not set or "" # If True, the following two IDs are required # loose hadronic tau selection # ignored if not set or "" # If True, the following two IDs are required | 2.110509 | 2 |
vize/170401038.py | omuryorulmaz/kriptografi | 8 | 644 | # <NAME> 170401038
import math
import random
r = 3271
def egcd(a,b):
if(a == 0):
return(b,0,1)
else:
c,d,e = egcd(b % a, a)
return(c, e - (b // a) * d, d)
def modInvert(a,b):
c,d,e = egcd(a,b)
if c != 1:
raise Exception('moduler ters bulunamadi')
else:
return d % b
def randomInteger(n):
return random.randrange(2 ** (n-1), 2 ** n) | 1
def RabinMiller(f):
s = 5
if(f == 2):
return 1
if not (f & 1):
return 0
p = f-1
u = 0
r = f-1
while (r%2 == 0):
r >>= 1
u+=1
def Control(a):
z = pow(a, r, f)
if z == 1:
return 0
for i in range(u):
z = pow(a, (2**i) * r, f-1)
if z == p:
return 0
return 1
for i in range(s):
a = random.randrange(2, p-2)
if Control(a):
return 0
return 1
def Keygen(n):
while True:
p = randomInteger(n//2)
if (p - 1) % r == 0 and RabinMiller(p) and math.gcd(r, int((p - 1) / r)) == 1:
break
while True:
q = randomInteger(n//2)
if RabinMiller(q) and math.gcd(r, int(q - 1)) == 1:
break
N = p * q
phi = (p - 1) * (q - 1)
while True:
y = random.randrange(1, N)
if math.gcd(y, N) == 1:
x = pow(y, phi * modInvert(r, N) % N, N)
if x != 1:
break
publicKeyFile = open("publickey.txt", "w+")
publicKeyFile.write(str(N) + "\n" + str(y))
publicKeyFile.close()
privateKeyFile = open("privatekey.txt", "w+")
privateKeyFile.write(str(phi) + "\n" + str(x) + "\n" + str(N))
privateKeyFile.close()
def encrypt(plaintext, publickeytxt):
try:
open(publickeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan şifrelme işlemi yapılamaz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
publicKeyFile = open(publickeytxt, "r")
N, y = publicKeyFile.read().split("\n")
N = int(N)
y = int(y)
publicKeyFile.close()
plainTextFile = open(plaintext, "r")
plainCopy = int(plainTextFile.read().split("\n")[0])
plainTextFile.close()
while True:
u = random.randrange(1, int(N))
if math.gcd(y, N) == 1:
break
cipherText = pow(y, plainCopy, N) * pow(u, r, N) % N
cipherTextFile = open("ciphertext.txt", "w+")
cipherTextFile.write(str(cipherText))
cipherTextFile.close()
def decrypt(ciphertext, privatekeytxt):
try:
open(privatekeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan deşifreleme işlemi yapılamz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
privateKeyFile = open(privatekeytxt, "r")
phi, x, N = privateKeyFile.read().split("\n")
phi, x, N = int(phi), int(x), int(N)
privateKeyFile.close()
cipherTextFile = open(ciphertext, "r")
cipherCopy = int(cipherTextFile.read())
a = pow(cipherCopy, (phi * modInvert(r, N)) % N, N)
for i in range(r -1):
if(pow(x, i, N) == a):
break
plainText2File = open("plaintext2.txt", "w+")
plainText2File.write(str(i))
plainText2File.close()
plain2File = open("plaintext2.txt", "r")
plain1File = open("plaintext.txt", "r")
plain1 = plain1File.read().split("\n")[0]
plain2 = plain2File.read().split("\n")[0]
if plain1 == plain2:
print("Dosyalar Özdeştir..")
else:
print("Dosyalar özdeş değildir..")
n = int(input("Oluşturulmak istenen anahtar çiftlerinin bit uzunluğunu girin: "))
Keygen(n)
encrypt("plaintext.txt","publickey.txt")
decrypt("ciphertext.txt", "privatekey.txt")
| # <NAME> 170401038
import math
import random
r = 3271
def egcd(a,b):
if(a == 0):
return(b,0,1)
else:
c,d,e = egcd(b % a, a)
return(c, e - (b // a) * d, d)
def modInvert(a,b):
c,d,e = egcd(a,b)
if c != 1:
raise Exception('moduler ters bulunamadi')
else:
return d % b
def randomInteger(n):
return random.randrange(2 ** (n-1), 2 ** n) | 1
def RabinMiller(f):
s = 5
if(f == 2):
return 1
if not (f & 1):
return 0
p = f-1
u = 0
r = f-1
while (r%2 == 0):
r >>= 1
u+=1
def Control(a):
z = pow(a, r, f)
if z == 1:
return 0
for i in range(u):
z = pow(a, (2**i) * r, f-1)
if z == p:
return 0
return 1
for i in range(s):
a = random.randrange(2, p-2)
if Control(a):
return 0
return 1
def Keygen(n):
while True:
p = randomInteger(n//2)
if (p - 1) % r == 0 and RabinMiller(p) and math.gcd(r, int((p - 1) / r)) == 1:
break
while True:
q = randomInteger(n//2)
if RabinMiller(q) and math.gcd(r, int(q - 1)) == 1:
break
N = p * q
phi = (p - 1) * (q - 1)
while True:
y = random.randrange(1, N)
if math.gcd(y, N) == 1:
x = pow(y, phi * modInvert(r, N) % N, N)
if x != 1:
break
publicKeyFile = open("publickey.txt", "w+")
publicKeyFile.write(str(N) + "\n" + str(y))
publicKeyFile.close()
privateKeyFile = open("privatekey.txt", "w+")
privateKeyFile.write(str(phi) + "\n" + str(x) + "\n" + str(N))
privateKeyFile.close()
def encrypt(plaintext, publickeytxt):
try:
open(publickeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan şifrelme işlemi yapılamaz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
publicKeyFile = open(publickeytxt, "r")
N, y = publicKeyFile.read().split("\n")
N = int(N)
y = int(y)
publicKeyFile.close()
plainTextFile = open(plaintext, "r")
plainCopy = int(plainTextFile.read().split("\n")[0])
plainTextFile.close()
while True:
u = random.randrange(1, int(N))
if math.gcd(y, N) == 1:
break
cipherText = pow(y, plainCopy, N) * pow(u, r, N) % N
cipherTextFile = open("ciphertext.txt", "w+")
cipherTextFile.write(str(cipherText))
cipherTextFile.close()
def decrypt(ciphertext, privatekeytxt):
try:
open(privatekeytxt, "r")
except FileNotFoundError:
print("Anahtar çiftleri oluşturulmadan deşifreleme işlemi yapılamz. Lütfen önce Keygen fonksiyonunu çalıştırın.")
else:
privateKeyFile = open(privatekeytxt, "r")
phi, x, N = privateKeyFile.read().split("\n")
phi, x, N = int(phi), int(x), int(N)
privateKeyFile.close()
cipherTextFile = open(ciphertext, "r")
cipherCopy = int(cipherTextFile.read())
a = pow(cipherCopy, (phi * modInvert(r, N)) % N, N)
for i in range(r -1):
if(pow(x, i, N) == a):
break
plainText2File = open("plaintext2.txt", "w+")
plainText2File.write(str(i))
plainText2File.close()
plain2File = open("plaintext2.txt", "r")
plain1File = open("plaintext.txt", "r")
plain1 = plain1File.read().split("\n")[0]
plain2 = plain2File.read().split("\n")[0]
if plain1 == plain2:
print("Dosyalar Özdeştir..")
else:
print("Dosyalar özdeş değildir..")
n = int(input("Oluşturulmak istenen anahtar çiftlerinin bit uzunluğunu girin: "))
Keygen(n)
encrypt("plaintext.txt","publickey.txt")
decrypt("ciphertext.txt", "privatekey.txt")
| en | 0.235886 | # <NAME> 170401038 | 3.125785 | 3 |
seqenv/ontology.py | xapple/seqenv | 7 | 645 | # Built-in modules #
# Internal modules #
from seqenv import module_dir
from seqenv.common.cache import property_cached
# Third party modules #
import sh, networkx
import matplotlib.colors
# A list of envos to help test this module #
test_envos = [
"ENVO:00000033",
"ENVO:00000043",
"ENVO:00000067",
"ENVO:00000143",
"ENVO:00000210",
"ENVO:00000215",
"ENVO:00000475",
]
################################################################################
class Ontology(object):
"""A object that gives you access to the graph (network with nodes and edges)
of the ENVO ontology from the OBO file's path.
Other libraries not used here that could be added:
* graphviz: http://graphviz.readthedocs.org/en/latest/api.html#digraph
* pydot: https://github.com/erocarrera/pydot
"""
def __init__(self, path=None):
"""Give the path to the OBO file"""
if path is None: path = module_dir + 'data_envo/envo.obo'
self.path = path
# --------------------------- In this section --------------------------- #
# orange_obo
# goatools
# orange_network
# pygraphviz
# networkx
@property_cached
def orange_obo(self):
"""The ontology loaded by the `orange` library.
* http://orange.biolab.si
* http://orange-bioinformatics.readthedocs.org/en/latest/
* https://github.com/biolab/orange-bio
* https://bitbucket.org/biolab/orange-bioinformatics
To install: $ pip install Orange-Bioinformatics
"""
from orangecontrib.bio.ontology import OBOOntology
return OBOOntology(self.path)
@property_cached
def goatools(self):
"""The network loaded into goatools' format.
* https://github.com/tanghaibao/goatools
To install: $ pip install goatools
"""
from goatools import obo_parser
return obo_parser.GODag(self.path)
@property_cached
def orange_network(self):
"""The network converted to `orange network` format.
Doesn't seem to work until they update PyPI.
* https://bitbucket.org/biolab/orange-network/
* http://orange-network.readthedocs.org/en/latest/
To install: $ pip install orange-network
"""
return self.orange_obo.to_network()
@property_cached
def pygraphviz(self):
"""The network converted to `pygraphviz` format.
* http://pygraphviz.github.io/documentation/pygraphviz-1.3rc1/
To install: $ pip install pygraphviz
"""
g = self.orange_obo.to_graphviz()
assert g.is_directed()
assert g.is_strict()
return g
@property_cached
def networkx(self):
"""The network converted to `networkx` format.
Seems like it looses directionality.
* https://networkx.readthedocs.org/en/stable/
To install: $ pip install networkx
"""
g = self.orange_obo.to_networkx()
assert networkx.is_directed_acyclic_graph(g)
return g
# --------------------------- In this section --------------------------- #
# test
# get_subgraph
# add_weights
# draw_to_pdf
# write_to_dot
def get_subgraph(self, envos=None):
"""Given a list of ENVO terms, get the subgraph that contains them all
and all their ancestors, up to the root.
Outputs a networkx DiGraph object."""
# Testing mode #
if envos is None: envos = test_envos
# All nodes #
nodes = set(n for e in envos for n in networkx.descendants(self.networkx, e))
nodes.update(envos)
nodes = list(nodes)
# Return #
return self.networkx.subgraph(nodes)
def add_weights(self, g, weights=None):
"""Input a networkx DiGraph object.
Outputs a pygraphviz AGraph object."""
g = networkx.nx_agraph.to_agraph(g)
if weights is None: return g
for envo in weights:
node = g.get_node(envo)
weight = weights[envo]
color = matplotlib.colors.rgb2hex((1.0, 1.0 - weight, 0.0))
node.attr['fillcolor'] = color
return g
def add_style(self, g):
"""Input a pygraphviz AGraph object.
Outputs a pygraphviz AGraph object."""
for node in g.nodes():
text = node.attr['name']
node.attr['label'] = text.replace(' ','\\n')
node.attr['name'] = ''
node.attr['shape'] = 'Mrecord'
node.attr['style'] = 'filled'
# To add the envo id to each node, uncomment:
#envo = node.attr['label']
#node.attr['label'] = "{<f0> %s|<f1> %s}" % (envo, text)
for edge in g.edges():
if edge.attr['label'] == 'located_in': edge.attr['color'] = 'turquoise4'
edge.attr['label'] = ''
return g
def write_to_dot(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle: handle.write(g.to_string())
def add_legend(self, path):
"""Input the path to a dot file."""
legend_txt = """
digraph {
rankdir=LR
node [shape=plaintext,fontname="helvetica"]
subgraph cluster_01 {
label = "NB: darker nodes weigh more";
key [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td align="right" port="i1">Is</td></tr>
<tr><td align="right" port="i2">Part</td></tr>
<tr><td align="right" port="i3">Located</td></tr>
</table>>];
key2 [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td port="i1">a</td></tr>
<tr><td port="i2">of</td></tr>
<tr><td port="i3">in</td></tr>
</table>>];
key:i1:e -> key2:i1:w [color=red];
key:i2:e -> key2:i2:w [color=blue];
key:i3:e -> key2:i3:w [color=turquoise4];
}"""
orig_txt = [line.rstrip('\n') for line in open(path, 'r') if line]
new_text = [line.lstrip() for line in legend_txt.split('\n') if line]
new_text = '\n'.join(new_text + orig_txt[2:])
with open(path, 'w') as handle: handle.write(new_text)
def draw_to_pdf(self, in_path, out_path):
"""Input a path to a dot file."""
sh.dot(in_path, '-Tpdf', '-o', out_path)
# --------------------------- In this section --------------------------- #
# descends
def descends(self, e, root):
"""Does the envo term `e` descend from the node `root`?
Returns True or False."""
# Auto conversion #
if isinstance(e, int): e = "ENVO:%08d" % e
if isinstance(root, int): root = "ENVO:%08d" % root
# Return #
return e in networkx.ancestors(self.networkx, root)
# --------------------------- In this section --------------------------- #
# print_test
# draw_with_networkx
# draw_with_pygraphviz
def print_test(self, e=None):
"""Just a method to see a bit how the different libraries work."""
# Test node #
if e is None: e = test_envos[0]
# Goa #
print "Goa: "
print self.goatools[e]
# Pygraphviz #
print "pygraphviz: "
print self.pygraphviz[e]
print self.pygraphviz.successors(e)
print self.pygraphviz.predecessors(e)
print self.pygraphviz.get_node(e)
# Networkx #
import networkx
print "networkx: "
print self.networkx[e]
print self.networkx.successors(e)
print self.networkx.predecessors(e)
print networkx.ancestors(self.networkx, e) # same as predecessors
print networkx.descendants(self.networkx, e) # almost as child_to_parents
def draw_with_networkx(self, g, path):
"""Input a networkx DiGraph object."""
from matplotlib import pyplot
networkx.draw(g)
pyplot.savefig(path)
pyplot.close()
def draw_with_pygraphviz(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle:
handle.write(g.to_string())
| # Built-in modules #
# Internal modules #
from seqenv import module_dir
from seqenv.common.cache import property_cached
# Third party modules #
import sh, networkx
import matplotlib.colors
# A list of envos to help test this module #
test_envos = [
"ENVO:00000033",
"ENVO:00000043",
"ENVO:00000067",
"ENVO:00000143",
"ENVO:00000210",
"ENVO:00000215",
"ENVO:00000475",
]
################################################################################
class Ontology(object):
"""A object that gives you access to the graph (network with nodes and edges)
of the ENVO ontology from the OBO file's path.
Other libraries not used here that could be added:
* graphviz: http://graphviz.readthedocs.org/en/latest/api.html#digraph
* pydot: https://github.com/erocarrera/pydot
"""
def __init__(self, path=None):
"""Give the path to the OBO file"""
if path is None: path = module_dir + 'data_envo/envo.obo'
self.path = path
# --------------------------- In this section --------------------------- #
# orange_obo
# goatools
# orange_network
# pygraphviz
# networkx
@property_cached
def orange_obo(self):
"""The ontology loaded by the `orange` library.
* http://orange.biolab.si
* http://orange-bioinformatics.readthedocs.org/en/latest/
* https://github.com/biolab/orange-bio
* https://bitbucket.org/biolab/orange-bioinformatics
To install: $ pip install Orange-Bioinformatics
"""
from orangecontrib.bio.ontology import OBOOntology
return OBOOntology(self.path)
@property_cached
def goatools(self):
"""The network loaded into goatools' format.
* https://github.com/tanghaibao/goatools
To install: $ pip install goatools
"""
from goatools import obo_parser
return obo_parser.GODag(self.path)
@property_cached
def orange_network(self):
"""The network converted to `orange network` format.
Doesn't seem to work until they update PyPI.
* https://bitbucket.org/biolab/orange-network/
* http://orange-network.readthedocs.org/en/latest/
To install: $ pip install orange-network
"""
return self.orange_obo.to_network()
@property_cached
def pygraphviz(self):
"""The network converted to `pygraphviz` format.
* http://pygraphviz.github.io/documentation/pygraphviz-1.3rc1/
To install: $ pip install pygraphviz
"""
g = self.orange_obo.to_graphviz()
assert g.is_directed()
assert g.is_strict()
return g
@property_cached
def networkx(self):
"""The network converted to `networkx` format.
Seems like it looses directionality.
* https://networkx.readthedocs.org/en/stable/
To install: $ pip install networkx
"""
g = self.orange_obo.to_networkx()
assert networkx.is_directed_acyclic_graph(g)
return g
# --------------------------- In this section --------------------------- #
# test
# get_subgraph
# add_weights
# draw_to_pdf
# write_to_dot
def get_subgraph(self, envos=None):
"""Given a list of ENVO terms, get the subgraph that contains them all
and all their ancestors, up to the root.
Outputs a networkx DiGraph object."""
# Testing mode #
if envos is None: envos = test_envos
# All nodes #
nodes = set(n for e in envos for n in networkx.descendants(self.networkx, e))
nodes.update(envos)
nodes = list(nodes)
# Return #
return self.networkx.subgraph(nodes)
def add_weights(self, g, weights=None):
"""Input a networkx DiGraph object.
Outputs a pygraphviz AGraph object."""
g = networkx.nx_agraph.to_agraph(g)
if weights is None: return g
for envo in weights:
node = g.get_node(envo)
weight = weights[envo]
color = matplotlib.colors.rgb2hex((1.0, 1.0 - weight, 0.0))
node.attr['fillcolor'] = color
return g
def add_style(self, g):
"""Input a pygraphviz AGraph object.
Outputs a pygraphviz AGraph object."""
for node in g.nodes():
text = node.attr['name']
node.attr['label'] = text.replace(' ','\\n')
node.attr['name'] = ''
node.attr['shape'] = 'Mrecord'
node.attr['style'] = 'filled'
# To add the envo id to each node, uncomment:
#envo = node.attr['label']
#node.attr['label'] = "{<f0> %s|<f1> %s}" % (envo, text)
for edge in g.edges():
if edge.attr['label'] == 'located_in': edge.attr['color'] = 'turquoise4'
edge.attr['label'] = ''
return g
def write_to_dot(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle: handle.write(g.to_string())
def add_legend(self, path):
"""Input the path to a dot file."""
legend_txt = """
digraph {
rankdir=LR
node [shape=plaintext,fontname="helvetica"]
subgraph cluster_01 {
label = "NB: darker nodes weigh more";
key [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td align="right" port="i1">Is</td></tr>
<tr><td align="right" port="i2">Part</td></tr>
<tr><td align="right" port="i3">Located</td></tr>
</table>>];
key2 [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td port="i1">a</td></tr>
<tr><td port="i2">of</td></tr>
<tr><td port="i3">in</td></tr>
</table>>];
key:i1:e -> key2:i1:w [color=red];
key:i2:e -> key2:i2:w [color=blue];
key:i3:e -> key2:i3:w [color=turquoise4];
}"""
orig_txt = [line.rstrip('\n') for line in open(path, 'r') if line]
new_text = [line.lstrip() for line in legend_txt.split('\n') if line]
new_text = '\n'.join(new_text + orig_txt[2:])
with open(path, 'w') as handle: handle.write(new_text)
def draw_to_pdf(self, in_path, out_path):
"""Input a path to a dot file."""
sh.dot(in_path, '-Tpdf', '-o', out_path)
# --------------------------- In this section --------------------------- #
# descends
def descends(self, e, root):
"""Does the envo term `e` descend from the node `root`?
Returns True or False."""
# Auto conversion #
if isinstance(e, int): e = "ENVO:%08d" % e
if isinstance(root, int): root = "ENVO:%08d" % root
# Return #
return e in networkx.ancestors(self.networkx, root)
# --------------------------- In this section --------------------------- #
# print_test
# draw_with_networkx
# draw_with_pygraphviz
def print_test(self, e=None):
"""Just a method to see a bit how the different libraries work."""
# Test node #
if e is None: e = test_envos[0]
# Goa #
print "Goa: "
print self.goatools[e]
# Pygraphviz #
print "pygraphviz: "
print self.pygraphviz[e]
print self.pygraphviz.successors(e)
print self.pygraphviz.predecessors(e)
print self.pygraphviz.get_node(e)
# Networkx #
import networkx
print "networkx: "
print self.networkx[e]
print self.networkx.successors(e)
print self.networkx.predecessors(e)
print networkx.ancestors(self.networkx, e) # same as predecessors
print networkx.descendants(self.networkx, e) # almost as child_to_parents
def draw_with_networkx(self, g, path):
"""Input a networkx DiGraph object."""
from matplotlib import pyplot
networkx.draw(g)
pyplot.savefig(path)
pyplot.close()
def draw_with_pygraphviz(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle:
handle.write(g.to_string())
| en | 0.578715 | # Built-in modules # # Internal modules # # Third party modules # # A list of envos to help test this module # ################################################################################ A object that gives you access to the graph (network with nodes and edges) of the ENVO ontology from the OBO file's path. Other libraries not used here that could be added: * graphviz: http://graphviz.readthedocs.org/en/latest/api.html#digraph * pydot: https://github.com/erocarrera/pydot Give the path to the OBO file # --------------------------- In this section --------------------------- # # orange_obo # goatools # orange_network # pygraphviz # networkx The ontology loaded by the `orange` library. * http://orange.biolab.si * http://orange-bioinformatics.readthedocs.org/en/latest/ * https://github.com/biolab/orange-bio * https://bitbucket.org/biolab/orange-bioinformatics To install: $ pip install Orange-Bioinformatics The network loaded into goatools' format. * https://github.com/tanghaibao/goatools To install: $ pip install goatools The network converted to `orange network` format. Doesn't seem to work until they update PyPI. * https://bitbucket.org/biolab/orange-network/ * http://orange-network.readthedocs.org/en/latest/ To install: $ pip install orange-network The network converted to `pygraphviz` format. * http://pygraphviz.github.io/documentation/pygraphviz-1.3rc1/ To install: $ pip install pygraphviz The network converted to `networkx` format. Seems like it looses directionality. * https://networkx.readthedocs.org/en/stable/ To install: $ pip install networkx # --------------------------- In this section --------------------------- # # test # get_subgraph # add_weights # draw_to_pdf # write_to_dot Given a list of ENVO terms, get the subgraph that contains them all and all their ancestors, up to the root. Outputs a networkx DiGraph object. # Testing mode # # All nodes # # Return # Input a networkx DiGraph object. Outputs a pygraphviz AGraph object. Input a pygraphviz AGraph object. Outputs a pygraphviz AGraph object. # To add the envo id to each node, uncomment: #envo = node.attr['label'] #node.attr['label'] = "{<f0> %s|<f1> %s}" % (envo, text) Input a pygraphviz AGraph object. Input the path to a dot file. digraph { rankdir=LR node [shape=plaintext,fontname="helvetica"] subgraph cluster_01 { label = "NB: darker nodes weigh more"; key [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0"> <tr><td align="right" port="i1">Is</td></tr> <tr><td align="right" port="i2">Part</td></tr> <tr><td align="right" port="i3">Located</td></tr> </table>>]; key2 [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0"> <tr><td port="i1">a</td></tr> <tr><td port="i2">of</td></tr> <tr><td port="i3">in</td></tr> </table>>]; key:i1:e -> key2:i1:w [color=red]; key:i2:e -> key2:i2:w [color=blue]; key:i3:e -> key2:i3:w [color=turquoise4]; } Input a path to a dot file. # --------------------------- In this section --------------------------- # # descends Does the envo term `e` descend from the node `root`? Returns True or False. # Auto conversion # # Return # # --------------------------- In this section --------------------------- # # print_test # draw_with_networkx # draw_with_pygraphviz Just a method to see a bit how the different libraries work. # Test node # # Goa # # Pygraphviz # # Networkx # # same as predecessors # almost as child_to_parents Input a networkx DiGraph object. Input a pygraphviz AGraph object. | 2.338365 | 2 |
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models.py | adewaleo/azure-sdk-for-python | 1 | 646 | <reponame>adewaleo/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class CertificateBodyDescription(msrest.serialization.Model):
"""The JSON-serialized X509 Certificate.
:param certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:type certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateBodyDescription, self).__init__(**kwargs)
self.certificate = kwargs.get('certificate', None)
class CertificateDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:param properties: The description of an X509 CA Certificate.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificateProperties'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateDescription, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(msrest.serialization.Model):
"""The JSON-serialized array of Certificate objects.
:param value: The array of Certificate objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.CertificateDescription]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateDescription]'},
}
def __init__(
self,
**kwargs
):
super(CertificateListDescription, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class CertificateProperties(msrest.serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:param certificate: The certificate content.
:type certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.certificate = kwargs.get('certificate', None)
class CertificatePropertiesWithNonce(msrest.serialization.Model):
"""The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar verification_code: The certificate's verification code that will be used for proof of
possession.
:vartype verification_code: str
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
'verification_code': {'readonly': True},
'certificate': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'verification_code': {'key': 'verificationCode', 'type': 'str'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificatePropertiesWithNonce, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.verification_code = None
self.certificate = None
class CertificateVerificationDescription(msrest.serialization.Model):
"""The JSON-serialized leaf certificate.
:param certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:type certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateVerificationDescription, self).__init__(**kwargs)
self.certificate = kwargs.get('certificate', None)
class CertificateWithNonceDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:param properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificatePropertiesWithNonce
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificatePropertiesWithNonce'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateWithNonceDescription, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.id = None
self.name = None
self.etag = None
self.type = None
class CloudToDeviceProperties(msrest.serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:param max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-
device-messages.
:type max_delivery_count: int
:param default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-
to-device-messages.
:type default_ttl_as_iso8601: ~datetime.timedelta
:param feedback: The properties of the feedback queue for cloud-to-device messages.
:type feedback: ~azure.mgmt.iothub.v2019_11_04.models.FeedbackProperties
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
'default_ttl_as_iso8601': {'key': 'defaultTtlAsIso8601', 'type': 'duration'},
'feedback': {'key': 'feedback', 'type': 'FeedbackProperties'},
}
def __init__(
self,
**kwargs
):
super(CloudToDeviceProperties, self).__init__(**kwargs)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
self.default_ttl_as_iso8601 = kwargs.get('default_ttl_as_iso8601', None)
self.feedback = kwargs.get('feedback', None)
class EndpointHealthData(msrest.serialization.Model):
"""The health data for an endpoint.
:param endpoint_id: Id of the endpoint.
:type endpoint_id: str
:param health_status: Health statuses have following meanings. The 'healthy' status shows that
the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint
is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint.
The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an
eventually consistent state of health. The 'dead' status shows that the endpoint is not
accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub
metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that
the IoT Hub has not established a connection with the endpoint. No messages have been delivered
to or rejected from this endpoint. Possible values include: "unknown", "healthy", "unhealthy",
"dead".
:type health_status: str or ~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthStatus
"""
_attribute_map = {
'endpoint_id': {'key': 'endpointId', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EndpointHealthData, self).__init__(**kwargs)
self.endpoint_id = kwargs.get('endpoint_id', None)
self.health_status = kwargs.get('health_status', None)
class EndpointHealthDataListResult(msrest.serialization.Model):
"""The JSON-serialized array of EndpointHealthData objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: JSON-serialized array of Endpoint health data.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthData]
:ivar next_link: Link to more results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EndpointHealthData]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EndpointHealthDataListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class EnrichmentProperties(msrest.serialization.Model):
"""The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
All required parameters must be populated in order to send to Azure.
:param key: Required. The key or name for the enrichment property.
:type key: str
:param value: Required. The value for the enrichment property.
:type value: str
:param endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:type endpoint_names: list[str]
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
'endpoint_names': {'required': True, 'min_items': 1},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(EnrichmentProperties, self).__init__(**kwargs)
self.key = kwargs['key']
self.value = kwargs['value']
self.endpoint_names = kwargs['endpoint_names']
class ErrorDetails(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
'code': {'readonly': True},
'http_status_code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupInfo(msrest.serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:param properties: The tags.
:type properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': '{str}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubConsumerGroupInfo, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupsListResult(msrest.serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of consumer groups objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EventHubConsumerGroupInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubConsumerGroupsListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class EventHubProperties(msrest.serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:param retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:type retention_time_in_days: long
:param partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-
messaging#device-to-cloud-messages.
:type partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
'partition_ids': {'readonly': True},
'path': {'readonly': True},
'endpoint': {'readonly': True},
}
_attribute_map = {
'retention_time_in_days': {'key': 'retentionTimeInDays', 'type': 'long'},
'partition_count': {'key': 'partitionCount', 'type': 'int'},
'partition_ids': {'key': 'partitionIds', 'type': '[str]'},
'path': {'key': 'path', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubProperties, self).__init__(**kwargs)
self.retention_time_in_days = kwargs.get('retention_time_in_days', None)
self.partition_count = kwargs.get('partition_count', None)
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:param export_blob_container_uri: Required. The export blob container URI.
:type export_blob_container_uri: str
:param exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:type exclude_keys: bool
"""
_validation = {
'export_blob_container_uri': {'required': True},
'exclude_keys': {'required': True},
}
_attribute_map = {
'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'},
'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ExportDevicesRequest, self).__init__(**kwargs)
self.export_blob_container_uri = kwargs['export_blob_container_uri']
self.exclude_keys = kwargs['exclude_keys']
class FailoverInput(msrest.serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:param failover_region: Required. Region the hub will be failed over to.
:type failover_region: str
"""
_validation = {
'failover_region': {'required': True},
}
_attribute_map = {
'failover_region': {'key': 'failoverRegion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FailoverInput, self).__init__(**kwargs)
self.failover_region = kwargs['failover_region']
class FallbackRouteProperties(msrest.serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:param name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:type name: str
:param source: Required. The source to which the routing rule is to be applied to. For example,
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:type condition: str
:param endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:type endpoint_names: list[str]
:param is_enabled: Required. Used to specify whether the fallback route is enabled.
:type is_enabled: bool
"""
_validation = {
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.source = kwargs['source']
self.condition = kwargs.get('condition', None)
self.endpoint_names = kwargs['endpoint_names']
self.is_enabled = kwargs['is_enabled']
class FeedbackProperties(msrest.serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:param lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:type lock_duration_as_iso8601: ~datetime.timedelta
:param ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-
messaging#cloud-to-device-messages.
:type ttl_as_iso8601: ~datetime.timedelta
:param max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-
to-device-messages.
:type max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FeedbackProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = kwargs.get('lock_duration_as_iso8601', None)
self.ttl_as_iso8601 = kwargs.get('ttl_as_iso8601', None)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
class ImportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:param input_blob_container_uri: Required. The input blob container URI.
:type input_blob_container_uri: str
:param output_blob_container_uri: Required. The output blob container URI.
:type output_blob_container_uri: str
"""
_validation = {
'input_blob_container_uri': {'required': True},
'output_blob_container_uri': {'required': True},
}
_attribute_map = {
'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'},
'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ImportDevicesRequest, self).__init__(**kwargs)
self.input_blob_container_uri = kwargs['input_blob_container_uri']
self.output_blob_container_uri = kwargs['output_blob_container_uri']
class IotHubCapacity(msrest.serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: long
:ivar maximum: The maximum number of units.
:vartype maximum: long
:ivar default: The default number of units.
:vartype default: long
:ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubScaleType
"""
_validation = {
'minimum': {'readonly': True, 'maximum': 1, 'minimum': 1},
'maximum': {'readonly': True},
'default': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default': {'key': 'default', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(msrest.serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: Required. The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: Required. The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:type etag: str
:param properties: IotHub properties.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.IotHubProperties
:param sku: Required. IotHub SKU info.
:type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IotHubProperties'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
}
def __init__(
self,
**kwargs
):
super(IotHubDescription, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.properties = kwargs.get('properties', None)
self.sku = kwargs['sku']
class IotHubDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of IotHubDescription objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubDescriptionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class IotHubLocationDescription(msrest.serialization.Model):
"""Public representation of one of the locations where a resource is provisioned.
:param location: The name of the Azure region.
:type location: str
:param role: The role of the region, can be either primary or secondary. The primary region is
where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery
(DR) paired region and also the region where the IoT hub can failover to. Possible values
include: "primary", "secondary".
:type role: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubReplicaRoleType
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubLocationDescription, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.role = kwargs.get('role', None)
class IotHubNameAvailabilityInfo(msrest.serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Possible values include: "Invalid",
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubNameUnavailabilityReason
:param message: The detailed reason message.
:type message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubNameAvailabilityInfo, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = kwargs.get('message', None)
class IotHubProperties(msrest.serialization.Model):
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:param authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:type authorization_policies:
list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
:param ip_filter_rules: The IP filter rules.
:type ip_filter_rules: list[~azure.mgmt.iothub.v2019_11_04.models.IpFilterRule]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar state: The hub state.
:vartype state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:param event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible
keys to this dictionary is events. This key has to be present in the dictionary while making
create or update calls for the IoT hub.
:type event_hub_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.EventHubProperties]
:param routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:type routing: ~azure.mgmt.iothub.v2019_11_04.models.RoutingProperties
:param storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:type storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.StorageEndpointProperties]
:param messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:type messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.MessagingEndpointProperties]
:param enable_file_upload_notifications: If True, file upload notifications are enabled.
:type enable_file_upload_notifications: bool
:param cloud_to_device: The IoT hub cloud-to-device messaging properties.
:type cloud_to_device: ~azure.mgmt.iothub.v2019_11_04.models.CloudToDeviceProperties
:param comments: IoT hub comments.
:type comments: str
:param features: The capabilities and features enabled for the IoT hub. Possible values
include: "None", "DeviceManagement".
:type features: str or ~azure.mgmt.iothub.v2019_11_04.models.Capabilities
:ivar locations: Primary and secondary location for iot hub.
:vartype locations: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubLocationDescription]
"""
_validation = {
'provisioning_state': {'readonly': True},
'state': {'readonly': True},
'host_name': {'readonly': True},
'locations': {'readonly': True},
}
_attribute_map = {
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'ip_filter_rules': {'key': 'ipFilterRules', 'type': '[IpFilterRule]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
'event_hub_endpoints': {'key': 'eventHubEndpoints', 'type': '{EventHubProperties}'},
'routing': {'key': 'routing', 'type': 'RoutingProperties'},
'storage_endpoints': {'key': 'storageEndpoints', 'type': '{StorageEndpointProperties}'},
'messaging_endpoints': {'key': 'messagingEndpoints', 'type': '{MessagingEndpointProperties}'},
'enable_file_upload_notifications': {'key': 'enableFileUploadNotifications', 'type': 'bool'},
'cloud_to_device': {'key': 'cloudToDevice', 'type': 'CloudToDeviceProperties'},
'comments': {'key': 'comments', 'type': 'str'},
'features': {'key': 'features', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[IotHubLocationDescription]'},
}
def __init__(
self,
**kwargs
):
super(IotHubProperties, self).__init__(**kwargs)
self.authorization_policies = kwargs.get('authorization_policies', None)
self.ip_filter_rules = kwargs.get('ip_filter_rules', None)
self.provisioning_state = None
self.state = None
self.host_name = None
self.event_hub_endpoints = kwargs.get('event_hub_endpoints', None)
self.routing = kwargs.get('routing', None)
self.storage_endpoints = kwargs.get('storage_endpoints', None)
self.messaging_endpoints = kwargs.get('messaging_endpoints', None)
self.enable_file_upload_notifications = kwargs.get('enable_file_upload_notifications', None)
self.cloud_to_device = kwargs.get('cloud_to_device', None)
self.comments = kwargs.get('comments', None)
self.features = kwargs.get('features', None)
self.locations = None
class IotHubQuotaMetricInfo(msrest.serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: long
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: long
"""
_validation = {
'name': {'readonly': True},
'current_value': {'readonly': True},
'max_value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'max_value': {'key': 'maxValue', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(IotHubQuotaMetricInfo, self).__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of quota metrics objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubQuotaMetricInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubQuotaMetricInfoListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class IotHubSkuDescription(msrest.serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:param sku: Required. The type of the resource.
:type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
:param capacity: Required. IotHub capacity.
:type capacity: ~azure.mgmt.iothub.v2019_11_04.models.IotHubCapacity
"""
_validation = {
'resource_type': {'readonly': True},
'sku': {'required': True},
'capacity': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
'capacity': {'key': 'capacity', 'type': 'IotHubCapacity'},
}
def __init__(
self,
**kwargs
):
super(IotHubSkuDescription, self).__init__(**kwargs)
self.resource_type = None
self.sku = kwargs['sku']
self.capacity = kwargs['capacity']
class IotHubSkuDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of IotHubSkuDescription.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubSkuDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubSkuDescriptionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class IotHubSkuInfo(msrest.serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:type name: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Possible values include: "Free", "Standard",
"Basic".
:vartype tier: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuTier
:param capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:type capacity: long
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(IotHubSkuInfo, self).__init__(**kwargs)
self.name = kwargs['name']
self.tier = None
self.capacity = kwargs.get('capacity', None)
class IpFilterRule(msrest.serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:param filter_name: Required. The name of the IP filter rule.
:type filter_name: str
:param action: Required. The desired action for requests captured by this rule. Possible values
include: "Accept", "Reject".
:type action: str or ~azure.mgmt.iothub.v2019_11_04.models.IpFilterActionType
:param ip_mask: Required. A string that contains the IP address range in CIDR notation for the
rule.
:type ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'action': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpFilterRule, self).__init__(**kwargs)
self.filter_name = kwargs['filter_name']
self.action = kwargs['action']
self.ip_mask = kwargs['ip_mask']
class JobResponse(msrest.serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Possible values include: "unknown", "export", "import",
"backup", "readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration",
"rebootDevice", "factoryResetDevice", "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2019_11_04.models.JobType
:ivar status: The status of the job. Possible values include: "unknown", "enqueued", "running",
"completed", "failed", "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2019_11_04.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
'job_id': {'readonly': True},
'start_time_utc': {'readonly': True},
'end_time_utc': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'failure_reason': {'readonly': True},
'status_message': {'readonly': True},
'parent_job_id': {'readonly': True},
}
_attribute_map = {
'job_id': {'key': 'jobId', 'type': 'str'},
'start_time_utc': {'key': 'startTimeUtc', 'type': 'rfc-1123'},
'end_time_utc': {'key': 'endTimeUtc', 'type': 'rfc-1123'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'failure_reason': {'key': 'failureReason', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'parent_job_id': {'key': 'parentJobId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobResponse, self).__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(msrest.serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of JobResponse objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobResponseListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class MatchedRoute(msrest.serialization.Model):
"""Routes that matched.
:param properties: Properties of routes that matched.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'RouteProperties'},
}
def __init__(
self,
**kwargs
):
super(MatchedRoute, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class MessagingEndpointProperties(msrest.serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:param lock_duration_as_iso8601: The lock duration. See: https://docs.microsoft.com/azure/iot-
hub/iot-hub-devguide-file-upload.
:type lock_duration_as_iso8601: ~datetime.timedelta
:param ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-
upload.
:type ttl_as_iso8601: ~datetime.timedelta
:param max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:type max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MessagingEndpointProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = kwargs.get('lock_duration_as_iso8601', None)
self.ttl_as_iso8601 = kwargs.get('ttl_as_iso8601', None)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
class Name(msrest.serialization.Model):
"""Name of Iot Hub type.
:param value: IotHub type.
:type value: str
:param localized_value: Localized value of name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Name, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.localized_value = kwargs.get('localized_value', None)
class Operation(msrest.serialization.Model):
"""IoT Hub REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.iothub.v2019_11_04.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = kwargs.get('display', None)
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft Devices.
:vartype provider: str
:ivar resource: Resource Type: IotHubs.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationInputs(msrest.serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the IoT hub to check.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationInputs, self).__init__(**kwargs)
self.name = kwargs['name']
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class RegistryStatistics(msrest.serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: long
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: long
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: long
"""
_validation = {
'total_device_count': {'readonly': True},
'enabled_device_count': {'readonly': True},
'disabled_device_count': {'readonly': True},
}
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(msrest.serialization.Model):
"""Compilation error when evaluating route.
:param message: Route error message.
:type message: str
:param severity: Severity of the route error. Possible values include: "error", "warning".
:type severity: str or ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorSeverity
:param location: Location where the route error happened.
:type location: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorRange
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'location': {'key': 'location', 'type': 'RouteErrorRange'},
}
def __init__(
self,
**kwargs
):
super(RouteCompilationError, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.severity = kwargs.get('severity', None)
self.location = kwargs.get('location', None)
class RouteErrorPosition(msrest.serialization.Model):
"""Position where the route error happened.
:param line: Line where the route error happened.
:type line: int
:param column: Column where the route error happened.
:type column: int
"""
_attribute_map = {
'line': {'key': 'line', 'type': 'int'},
'column': {'key': 'column', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RouteErrorPosition, self).__init__(**kwargs)
self.line = kwargs.get('line', None)
self.column = kwargs.get('column', None)
class RouteErrorRange(msrest.serialization.Model):
"""Range of route errors.
:param start: Start where the route error happened.
:type start: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
:param end: End where the route error happened.
:type end: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
"""
_attribute_map = {
'start': {'key': 'start', 'type': 'RouteErrorPosition'},
'end': {'key': 'end', 'type': 'RouteErrorPosition'},
}
def __init__(
self,
**kwargs
):
super(RouteErrorRange, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
class RouteProperties(msrest.serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the route. The name can only include alphanumeric
characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be
unique.
:type name: str
:param source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:type condition: str
:param endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:type endpoint_names: list[str]
:param is_enabled: Required. Used to specify whether a route is enabled.
:type is_enabled: bool
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RouteProperties, self).__init__(**kwargs)
self.name = kwargs['name']
self.source = kwargs['source']
self.condition = kwargs.get('condition', None)
self.endpoint_names = kwargs['endpoint_names']
self.is_enabled = kwargs['is_enabled']
class RoutingEndpoints(msrest.serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:param service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:type service_bus_queues:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusQueueEndpointProperties]
:param service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:type service_bus_topics:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusTopicEndpointProperties]
:param event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:type event_hubs: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingEventHubProperties]
:param storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:type storage_containers:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
'service_bus_queues': {'key': 'serviceBusQueues', 'type': '[RoutingServiceBusQueueEndpointProperties]'},
'service_bus_topics': {'key': 'serviceBusTopics', 'type': '[RoutingServiceBusTopicEndpointProperties]'},
'event_hubs': {'key': 'eventHubs', 'type': '[RoutingEventHubProperties]'},
'storage_containers': {'key': 'storageContainers', 'type': '[RoutingStorageContainerProperties]'},
}
def __init__(
self,
**kwargs
):
super(RoutingEndpoints, self).__init__(**kwargs)
self.service_bus_queues = kwargs.get('service_bus_queues', None)
self.service_bus_topics = kwargs.get('service_bus_topics', None)
self.event_hubs = kwargs.get('event_hubs', None)
self.storage_containers = kwargs.get('storage_containers', None)
class RoutingEventHubProperties(msrest.serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the event hub endpoint.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:type name: str
:param subscription_id: The subscription identifier of the event hub endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the event hub endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingEventHubProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
class RoutingMessage(msrest.serialization.Model):
"""Routing message.
:param body: Body of routing message.
:type body: str
:param app_properties: App properties.
:type app_properties: dict[str, str]
:param system_properties: System properties.
:type system_properties: dict[str, str]
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'app_properties': {'key': 'appProperties', 'type': '{str}'},
'system_properties': {'key': 'systemProperties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(RoutingMessage, self).__init__(**kwargs)
self.body = kwargs.get('body', None)
self.app_properties = kwargs.get('app_properties', None)
self.system_properties = kwargs.get('system_properties', None)
class RoutingProperties(msrest.serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:param endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:type endpoints: ~azure.mgmt.iothub.v2019_11_04.models.RoutingEndpoints
:param routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:type routes: list[~azure.mgmt.iothub.v2019_11_04.models.RouteProperties]
:param fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:type fallback_route: ~azure.mgmt.iothub.v2019_11_04.models.FallbackRouteProperties
:param enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid.
:type enrichments: list[~azure.mgmt.iothub.v2019_11_04.models.EnrichmentProperties]
"""
_attribute_map = {
'endpoints': {'key': 'endpoints', 'type': 'RoutingEndpoints'},
'routes': {'key': 'routes', 'type': '[RouteProperties]'},
'fallback_route': {'key': 'fallbackRoute', 'type': 'FallbackRouteProperties'},
'enrichments': {'key': 'enrichments', 'type': '[EnrichmentProperties]'},
}
def __init__(
self,
**kwargs
):
super(RoutingProperties, self).__init__(**kwargs)
self.endpoints = kwargs.get('endpoints', None)
self.routes = kwargs.get('routes', None)
self.fallback_route = kwargs.get('fallback_route', None)
self.enrichments = kwargs.get('enrichments', None)
class RoutingServiceBusQueueEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the service bus queue endpoint.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:type name: str
:param subscription_id: The subscription identifier of the service bus queue endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the service bus queue endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingServiceBusQueueEndpointProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
class RoutingServiceBusTopicEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the service bus topic endpoint.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:type name: str
:param subscription_id: The subscription identifier of the service bus topic endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the service bus topic endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingServiceBusTopicEndpointProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
class RoutingStorageContainerProperties(msrest.serialization.Model):
"""The properties related to a storage container endpoint.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the storage account.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:type name: str
:param subscription_id: The subscription identifier of the storage account.
:type subscription_id: str
:param resource_group: The name of the resource group of the storage account.
:type resource_group: str
:param container_name: Required. The name of storage container in the storage account.
:type container_name: str
:param file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:type file_name_format: str
:param batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:type batch_frequency_in_seconds: int
:param max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value
should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:type max_chunk_size_in_bytes: int
:param encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:type encoding: str or
~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerPropertiesEncoding
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'container_name': {'required': True},
'batch_frequency_in_seconds': {'maximum': 720, 'minimum': 60},
'max_chunk_size_in_bytes': {'maximum': 524288000, 'minimum': 10485760},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'file_name_format': {'key': 'fileNameFormat', 'type': 'str'},
'batch_frequency_in_seconds': {'key': 'batchFrequencyInSeconds', 'type': 'int'},
'max_chunk_size_in_bytes': {'key': 'maxChunkSizeInBytes', 'type': 'int'},
'encoding': {'key': 'encoding', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingStorageContainerProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
self.container_name = kwargs['container_name']
self.file_name_format = kwargs.get('file_name_format', None)
self.batch_frequency_in_seconds = kwargs.get('batch_frequency_in_seconds', None)
self.max_chunk_size_in_bytes = kwargs.get('max_chunk_size_in_bytes', None)
self.encoding = kwargs.get('encoding', None)
class RoutingTwin(msrest.serialization.Model):
"""Twin reference input parameter. This is an optional parameter.
:param tags: A set of tags. Twin Tags.
:type tags: object
:param properties:
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwinProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'RoutingTwinProperties'},
}
def __init__(
self,
**kwargs
):
super(RoutingTwin, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.properties = kwargs.get('properties', None)
class RoutingTwinProperties(msrest.serialization.Model):
"""RoutingTwinProperties.
:param desired: Twin desired properties.
:type desired: object
:param reported: Twin desired properties.
:type reported: object
"""
_attribute_map = {
'desired': {'key': 'desired', 'type': 'object'},
'reported': {'key': 'reported', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(RoutingTwinProperties, self).__init__(**kwargs)
self.desired = kwargs.get('desired', None)
self.reported = kwargs.get('reported', None)
class SharedAccessSignatureAuthorizationRule(msrest.serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of the shared access policy.
:type key_name: str
:param primary_key: The primary key.
:type primary_key: str
:param secondary_key: The secondary key.
:type secondary_key: str
:param rights: Required. The permissions assigned to the shared access policy. Possible values
include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:type rights: str or ~azure.mgmt.iothub.v2019_11_04.models.AccessRights
"""
_validation = {
'key_name': {'required': True},
'rights': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'rights': {'key': 'rights', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedAccessSignatureAuthorizationRule, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.primary_key = kwargs.get('primary_key', None)
self.secondary_key = kwargs.get('secondary_key', None)
self.rights = kwargs['rights']
class SharedAccessSignatureAuthorizationRuleListResult(msrest.serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of shared access policies.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedAccessSignatureAuthorizationRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class StorageEndpointProperties(msrest.serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:param sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-
upload#file-upload-notification-configuration-options.
:type sas_ttl_as_iso8601: ~datetime.timedelta
:param connection_string: Required. The connection string for the Azure Storage account to
which files are uploaded.
:type connection_string: str
:param container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:type container_name: str
"""
_validation = {
'connection_string': {'required': True},
'container_name': {'required': True},
}
_attribute_map = {
'sas_ttl_as_iso8601': {'key': 'sasTtlAsIso8601', 'type': 'duration'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageEndpointProperties, self).__init__(**kwargs)
self.sas_ttl_as_iso8601 = kwargs.get('sas_ttl_as_iso8601', None)
self.connection_string = kwargs['connection_string']
self.container_name = kwargs['container_name']
class TagsResource(msrest.serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(TagsResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class TestAllRoutesInput(msrest.serialization.Model):
"""Input for testing all routes.
:param routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type routing_source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param message: Routing message.
:type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:param twin: Routing Twin Reference.
:type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
_attribute_map = {
'routing_source': {'key': 'routingSource', 'type': 'str'},
'message': {'key': 'message', 'type': 'RoutingMessage'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
**kwargs
):
super(TestAllRoutesInput, self).__init__(**kwargs)
self.routing_source = kwargs.get('routing_source', None)
self.message = kwargs.get('message', None)
self.twin = kwargs.get('twin', None)
class TestAllRoutesResult(msrest.serialization.Model):
"""Result of testing all routes.
:param routes: JSON-serialized array of matched routes.
:type routes: list[~azure.mgmt.iothub.v2019_11_04.models.MatchedRoute]
"""
_attribute_map = {
'routes': {'key': 'routes', 'type': '[MatchedRoute]'},
}
def __init__(
self,
**kwargs
):
super(TestAllRoutesResult, self).__init__(**kwargs)
self.routes = kwargs.get('routes', None)
class TestRouteInput(msrest.serialization.Model):
"""Input for testing route.
All required parameters must be populated in order to send to Azure.
:param message: Routing message.
:type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:param route: Required. Route properties.
:type route: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
:param twin: Routing Twin Reference.
:type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
_validation = {
'route': {'required': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'RoutingMessage'},
'route': {'key': 'route', 'type': 'RouteProperties'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
**kwargs
):
super(TestRouteInput, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.route = kwargs['route']
self.twin = kwargs.get('twin', None)
class TestRouteResult(msrest.serialization.Model):
"""Result of testing one route.
:param result: Result of testing route. Possible values include: "undefined", "false", "true".
:type result: str or ~azure.mgmt.iothub.v2019_11_04.models.TestResultStatus
:param details: Detailed result of testing route.
:type details: ~azure.mgmt.iothub.v2019_11_04.models.TestRouteResultDetails
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'str'},
'details': {'key': 'details', 'type': 'TestRouteResultDetails'},
}
def __init__(
self,
**kwargs
):
super(TestRouteResult, self).__init__(**kwargs)
self.result = kwargs.get('result', None)
self.details = kwargs.get('details', None)
class TestRouteResultDetails(msrest.serialization.Model):
"""Detailed result of testing a route.
:param compilation_errors: JSON-serialized list of route compilation errors.
:type compilation_errors: list[~azure.mgmt.iothub.v2019_11_04.models.RouteCompilationError]
"""
_attribute_map = {
'compilation_errors': {'key': 'compilationErrors', 'type': '[RouteCompilationError]'},
}
def __init__(
self,
**kwargs
):
super(TestRouteResultDetails, self).__init__(**kwargs)
self.compilation_errors = kwargs.get('compilation_errors', None)
class UserSubscriptionQuota(msrest.serialization.Model):
"""User subscription quota response.
:param id: IotHub type id.
:type id: str
:param type: Response type.
:type type: str
:param unit: Unit of IotHub type.
:type unit: str
:param current_value: Current number of IotHub type.
:type current_value: int
:param limit: Numerical limit on IotHub type.
:type limit: int
:param name: IotHub type.
:type name: ~azure.mgmt.iothub.v2019_11_04.models.Name
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'Name'},
}
def __init__(
self,
**kwargs
):
super(UserSubscriptionQuota, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', None)
self.unit = kwargs.get('unit', None)
self.current_value = kwargs.get('current_value', None)
self.limit = kwargs.get('limit', None)
self.name = kwargs.get('name', None)
class UserSubscriptionQuotaListResult(msrest.serialization.Model):
"""Json-serialized array of User subscription quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.UserSubscriptionQuota]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UserSubscriptionQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserSubscriptionQuotaListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class CertificateBodyDescription(msrest.serialization.Model):
"""The JSON-serialized X509 Certificate.
:param certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:type certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateBodyDescription, self).__init__(**kwargs)
self.certificate = kwargs.get('certificate', None)
class CertificateDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:param properties: The description of an X509 CA Certificate.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificateProperties'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateDescription, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(msrest.serialization.Model):
"""The JSON-serialized array of Certificate objects.
:param value: The array of Certificate objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.CertificateDescription]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateDescription]'},
}
def __init__(
self,
**kwargs
):
super(CertificateListDescription, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class CertificateProperties(msrest.serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:param certificate: The certificate content.
:type certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.certificate = kwargs.get('certificate', None)
class CertificatePropertiesWithNonce(msrest.serialization.Model):
"""The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar verification_code: The certificate's verification code that will be used for proof of
possession.
:vartype verification_code: str
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
'verification_code': {'readonly': True},
'certificate': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'verification_code': {'key': 'verificationCode', 'type': 'str'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificatePropertiesWithNonce, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.verification_code = None
self.certificate = None
class CertificateVerificationDescription(msrest.serialization.Model):
"""The JSON-serialized leaf certificate.
:param certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:type certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateVerificationDescription, self).__init__(**kwargs)
self.certificate = kwargs.get('certificate', None)
class CertificateWithNonceDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:param properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificatePropertiesWithNonce
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificatePropertiesWithNonce'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateWithNonceDescription, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.id = None
self.name = None
self.etag = None
self.type = None
class CloudToDeviceProperties(msrest.serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:param max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-
device-messages.
:type max_delivery_count: int
:param default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-
to-device-messages.
:type default_ttl_as_iso8601: ~datetime.timedelta
:param feedback: The properties of the feedback queue for cloud-to-device messages.
:type feedback: ~azure.mgmt.iothub.v2019_11_04.models.FeedbackProperties
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
'default_ttl_as_iso8601': {'key': 'defaultTtlAsIso8601', 'type': 'duration'},
'feedback': {'key': 'feedback', 'type': 'FeedbackProperties'},
}
def __init__(
self,
**kwargs
):
super(CloudToDeviceProperties, self).__init__(**kwargs)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
self.default_ttl_as_iso8601 = kwargs.get('default_ttl_as_iso8601', None)
self.feedback = kwargs.get('feedback', None)
class EndpointHealthData(msrest.serialization.Model):
"""The health data for an endpoint.
:param endpoint_id: Id of the endpoint.
:type endpoint_id: str
:param health_status: Health statuses have following meanings. The 'healthy' status shows that
the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint
is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint.
The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an
eventually consistent state of health. The 'dead' status shows that the endpoint is not
accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub
metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that
the IoT Hub has not established a connection with the endpoint. No messages have been delivered
to or rejected from this endpoint. Possible values include: "unknown", "healthy", "unhealthy",
"dead".
:type health_status: str or ~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthStatus
"""
_attribute_map = {
'endpoint_id': {'key': 'endpointId', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EndpointHealthData, self).__init__(**kwargs)
self.endpoint_id = kwargs.get('endpoint_id', None)
self.health_status = kwargs.get('health_status', None)
class EndpointHealthDataListResult(msrest.serialization.Model):
"""The JSON-serialized array of EndpointHealthData objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: JSON-serialized array of Endpoint health data.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthData]
:ivar next_link: Link to more results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EndpointHealthData]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EndpointHealthDataListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class EnrichmentProperties(msrest.serialization.Model):
"""The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
All required parameters must be populated in order to send to Azure.
:param key: Required. The key or name for the enrichment property.
:type key: str
:param value: Required. The value for the enrichment property.
:type value: str
:param endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:type endpoint_names: list[str]
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
'endpoint_names': {'required': True, 'min_items': 1},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(EnrichmentProperties, self).__init__(**kwargs)
self.key = kwargs['key']
self.value = kwargs['value']
self.endpoint_names = kwargs['endpoint_names']
class ErrorDetails(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
'code': {'readonly': True},
'http_status_code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupInfo(msrest.serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:param properties: The tags.
:type properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': '{str}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubConsumerGroupInfo, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupsListResult(msrest.serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of consumer groups objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EventHubConsumerGroupInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubConsumerGroupsListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class EventHubProperties(msrest.serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:param retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:type retention_time_in_days: long
:param partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-
messaging#device-to-cloud-messages.
:type partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
'partition_ids': {'readonly': True},
'path': {'readonly': True},
'endpoint': {'readonly': True},
}
_attribute_map = {
'retention_time_in_days': {'key': 'retentionTimeInDays', 'type': 'long'},
'partition_count': {'key': 'partitionCount', 'type': 'int'},
'partition_ids': {'key': 'partitionIds', 'type': '[str]'},
'path': {'key': 'path', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubProperties, self).__init__(**kwargs)
self.retention_time_in_days = kwargs.get('retention_time_in_days', None)
self.partition_count = kwargs.get('partition_count', None)
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:param export_blob_container_uri: Required. The export blob container URI.
:type export_blob_container_uri: str
:param exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:type exclude_keys: bool
"""
_validation = {
'export_blob_container_uri': {'required': True},
'exclude_keys': {'required': True},
}
_attribute_map = {
'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'},
'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ExportDevicesRequest, self).__init__(**kwargs)
self.export_blob_container_uri = kwargs['export_blob_container_uri']
self.exclude_keys = kwargs['exclude_keys']
class FailoverInput(msrest.serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:param failover_region: Required. Region the hub will be failed over to.
:type failover_region: str
"""
_validation = {
'failover_region': {'required': True},
}
_attribute_map = {
'failover_region': {'key': 'failoverRegion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FailoverInput, self).__init__(**kwargs)
self.failover_region = kwargs['failover_region']
class FallbackRouteProperties(msrest.serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:param name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:type name: str
:param source: Required. The source to which the routing rule is to be applied to. For example,
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:type condition: str
:param endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:type endpoint_names: list[str]
:param is_enabled: Required. Used to specify whether the fallback route is enabled.
:type is_enabled: bool
"""
_validation = {
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.source = kwargs['source']
self.condition = kwargs.get('condition', None)
self.endpoint_names = kwargs['endpoint_names']
self.is_enabled = kwargs['is_enabled']
class FeedbackProperties(msrest.serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:param lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:type lock_duration_as_iso8601: ~datetime.timedelta
:param ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-
messaging#cloud-to-device-messages.
:type ttl_as_iso8601: ~datetime.timedelta
:param max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-
to-device-messages.
:type max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FeedbackProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = kwargs.get('lock_duration_as_iso8601', None)
self.ttl_as_iso8601 = kwargs.get('ttl_as_iso8601', None)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
class ImportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:param input_blob_container_uri: Required. The input blob container URI.
:type input_blob_container_uri: str
:param output_blob_container_uri: Required. The output blob container URI.
:type output_blob_container_uri: str
"""
_validation = {
'input_blob_container_uri': {'required': True},
'output_blob_container_uri': {'required': True},
}
_attribute_map = {
'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'},
'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ImportDevicesRequest, self).__init__(**kwargs)
self.input_blob_container_uri = kwargs['input_blob_container_uri']
self.output_blob_container_uri = kwargs['output_blob_container_uri']
class IotHubCapacity(msrest.serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: long
:ivar maximum: The maximum number of units.
:vartype maximum: long
:ivar default: The default number of units.
:vartype default: long
:ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubScaleType
"""
_validation = {
'minimum': {'readonly': True, 'maximum': 1, 'minimum': 1},
'maximum': {'readonly': True},
'default': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default': {'key': 'default', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(msrest.serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: Required. The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: Required. The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:type etag: str
:param properties: IotHub properties.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.IotHubProperties
:param sku: Required. IotHub SKU info.
:type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IotHubProperties'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
}
def __init__(
self,
**kwargs
):
super(IotHubDescription, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.properties = kwargs.get('properties', None)
self.sku = kwargs['sku']
class IotHubDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of IotHubDescription objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubDescriptionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class IotHubLocationDescription(msrest.serialization.Model):
"""Public representation of one of the locations where a resource is provisioned.
:param location: The name of the Azure region.
:type location: str
:param role: The role of the region, can be either primary or secondary. The primary region is
where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery
(DR) paired region and also the region where the IoT hub can failover to. Possible values
include: "primary", "secondary".
:type role: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubReplicaRoleType
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubLocationDescription, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.role = kwargs.get('role', None)
class IotHubNameAvailabilityInfo(msrest.serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Possible values include: "Invalid",
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubNameUnavailabilityReason
:param message: The detailed reason message.
:type message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubNameAvailabilityInfo, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = kwargs.get('message', None)
class IotHubProperties(msrest.serialization.Model):
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:param authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:type authorization_policies:
list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
:param ip_filter_rules: The IP filter rules.
:type ip_filter_rules: list[~azure.mgmt.iothub.v2019_11_04.models.IpFilterRule]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar state: The hub state.
:vartype state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:param event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible
keys to this dictionary is events. This key has to be present in the dictionary while making
create or update calls for the IoT hub.
:type event_hub_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.EventHubProperties]
:param routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:type routing: ~azure.mgmt.iothub.v2019_11_04.models.RoutingProperties
:param storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:type storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.StorageEndpointProperties]
:param messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:type messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_11_04.models.MessagingEndpointProperties]
:param enable_file_upload_notifications: If True, file upload notifications are enabled.
:type enable_file_upload_notifications: bool
:param cloud_to_device: The IoT hub cloud-to-device messaging properties.
:type cloud_to_device: ~azure.mgmt.iothub.v2019_11_04.models.CloudToDeviceProperties
:param comments: IoT hub comments.
:type comments: str
:param features: The capabilities and features enabled for the IoT hub. Possible values
include: "None", "DeviceManagement".
:type features: str or ~azure.mgmt.iothub.v2019_11_04.models.Capabilities
:ivar locations: Primary and secondary location for iot hub.
:vartype locations: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubLocationDescription]
"""
_validation = {
'provisioning_state': {'readonly': True},
'state': {'readonly': True},
'host_name': {'readonly': True},
'locations': {'readonly': True},
}
_attribute_map = {
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'ip_filter_rules': {'key': 'ipFilterRules', 'type': '[IpFilterRule]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
'event_hub_endpoints': {'key': 'eventHubEndpoints', 'type': '{EventHubProperties}'},
'routing': {'key': 'routing', 'type': 'RoutingProperties'},
'storage_endpoints': {'key': 'storageEndpoints', 'type': '{StorageEndpointProperties}'},
'messaging_endpoints': {'key': 'messagingEndpoints', 'type': '{MessagingEndpointProperties}'},
'enable_file_upload_notifications': {'key': 'enableFileUploadNotifications', 'type': 'bool'},
'cloud_to_device': {'key': 'cloudToDevice', 'type': 'CloudToDeviceProperties'},
'comments': {'key': 'comments', 'type': 'str'},
'features': {'key': 'features', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[IotHubLocationDescription]'},
}
def __init__(
self,
**kwargs
):
super(IotHubProperties, self).__init__(**kwargs)
self.authorization_policies = kwargs.get('authorization_policies', None)
self.ip_filter_rules = kwargs.get('ip_filter_rules', None)
self.provisioning_state = None
self.state = None
self.host_name = None
self.event_hub_endpoints = kwargs.get('event_hub_endpoints', None)
self.routing = kwargs.get('routing', None)
self.storage_endpoints = kwargs.get('storage_endpoints', None)
self.messaging_endpoints = kwargs.get('messaging_endpoints', None)
self.enable_file_upload_notifications = kwargs.get('enable_file_upload_notifications', None)
self.cloud_to_device = kwargs.get('cloud_to_device', None)
self.comments = kwargs.get('comments', None)
self.features = kwargs.get('features', None)
self.locations = None
class IotHubQuotaMetricInfo(msrest.serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: long
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: long
"""
_validation = {
'name': {'readonly': True},
'current_value': {'readonly': True},
'max_value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'max_value': {'key': 'maxValue', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(IotHubQuotaMetricInfo, self).__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of quota metrics objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubQuotaMetricInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubQuotaMetricInfoListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class IotHubSkuDescription(msrest.serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:param sku: Required. The type of the resource.
:type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo
:param capacity: Required. IotHub capacity.
:type capacity: ~azure.mgmt.iothub.v2019_11_04.models.IotHubCapacity
"""
_validation = {
'resource_type': {'readonly': True},
'sku': {'required': True},
'capacity': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
'capacity': {'key': 'capacity', 'type': 'IotHubCapacity'},
}
def __init__(
self,
**kwargs
):
super(IotHubSkuDescription, self).__init__(**kwargs)
self.resource_type = None
self.sku = kwargs['sku']
self.capacity = kwargs['capacity']
class IotHubSkuDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of IotHubSkuDescription.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubSkuDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHubSkuDescriptionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class IotHubSkuInfo(msrest.serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:type name: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Possible values include: "Free", "Standard",
"Basic".
:vartype tier: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuTier
:param capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:type capacity: long
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(IotHubSkuInfo, self).__init__(**kwargs)
self.name = kwargs['name']
self.tier = None
self.capacity = kwargs.get('capacity', None)
class IpFilterRule(msrest.serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:param filter_name: Required. The name of the IP filter rule.
:type filter_name: str
:param action: Required. The desired action for requests captured by this rule. Possible values
include: "Accept", "Reject".
:type action: str or ~azure.mgmt.iothub.v2019_11_04.models.IpFilterActionType
:param ip_mask: Required. A string that contains the IP address range in CIDR notation for the
rule.
:type ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'action': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpFilterRule, self).__init__(**kwargs)
self.filter_name = kwargs['filter_name']
self.action = kwargs['action']
self.ip_mask = kwargs['ip_mask']
class JobResponse(msrest.serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Possible values include: "unknown", "export", "import",
"backup", "readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration",
"rebootDevice", "factoryResetDevice", "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2019_11_04.models.JobType
:ivar status: The status of the job. Possible values include: "unknown", "enqueued", "running",
"completed", "failed", "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2019_11_04.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
'job_id': {'readonly': True},
'start_time_utc': {'readonly': True},
'end_time_utc': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'failure_reason': {'readonly': True},
'status_message': {'readonly': True},
'parent_job_id': {'readonly': True},
}
_attribute_map = {
'job_id': {'key': 'jobId', 'type': 'str'},
'start_time_utc': {'key': 'startTimeUtc', 'type': 'rfc-1123'},
'end_time_utc': {'key': 'endTimeUtc', 'type': 'rfc-1123'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'failure_reason': {'key': 'failureReason', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'parent_job_id': {'key': 'parentJobId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobResponse, self).__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(msrest.serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The array of JobResponse objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobResponseListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class MatchedRoute(msrest.serialization.Model):
"""Routes that matched.
:param properties: Properties of routes that matched.
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'RouteProperties'},
}
def __init__(
self,
**kwargs
):
super(MatchedRoute, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class MessagingEndpointProperties(msrest.serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:param lock_duration_as_iso8601: The lock duration. See: https://docs.microsoft.com/azure/iot-
hub/iot-hub-devguide-file-upload.
:type lock_duration_as_iso8601: ~datetime.timedelta
:param ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-
upload.
:type ttl_as_iso8601: ~datetime.timedelta
:param max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:type max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MessagingEndpointProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = kwargs.get('lock_duration_as_iso8601', None)
self.ttl_as_iso8601 = kwargs.get('ttl_as_iso8601', None)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
class Name(msrest.serialization.Model):
"""Name of Iot Hub type.
:param value: IotHub type.
:type value: str
:param localized_value: Localized value of name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Name, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.localized_value = kwargs.get('localized_value', None)
class Operation(msrest.serialization.Model):
"""IoT Hub REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.iothub.v2019_11_04.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = kwargs.get('display', None)
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft Devices.
:vartype provider: str
:ivar resource: Resource Type: IotHubs.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationInputs(msrest.serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the IoT hub to check.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationInputs, self).__init__(**kwargs)
self.name = kwargs['name']
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class RegistryStatistics(msrest.serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: long
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: long
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: long
"""
_validation = {
'total_device_count': {'readonly': True},
'enabled_device_count': {'readonly': True},
'disabled_device_count': {'readonly': True},
}
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(msrest.serialization.Model):
"""Compilation error when evaluating route.
:param message: Route error message.
:type message: str
:param severity: Severity of the route error. Possible values include: "error", "warning".
:type severity: str or ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorSeverity
:param location: Location where the route error happened.
:type location: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorRange
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'location': {'key': 'location', 'type': 'RouteErrorRange'},
}
def __init__(
self,
**kwargs
):
super(RouteCompilationError, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.severity = kwargs.get('severity', None)
self.location = kwargs.get('location', None)
class RouteErrorPosition(msrest.serialization.Model):
"""Position where the route error happened.
:param line: Line where the route error happened.
:type line: int
:param column: Column where the route error happened.
:type column: int
"""
_attribute_map = {
'line': {'key': 'line', 'type': 'int'},
'column': {'key': 'column', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RouteErrorPosition, self).__init__(**kwargs)
self.line = kwargs.get('line', None)
self.column = kwargs.get('column', None)
class RouteErrorRange(msrest.serialization.Model):
"""Range of route errors.
:param start: Start where the route error happened.
:type start: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
:param end: End where the route error happened.
:type end: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition
"""
_attribute_map = {
'start': {'key': 'start', 'type': 'RouteErrorPosition'},
'end': {'key': 'end', 'type': 'RouteErrorPosition'},
}
def __init__(
self,
**kwargs
):
super(RouteErrorRange, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
class RouteProperties(msrest.serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the route. The name can only include alphanumeric
characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be
unique.
:type name: str
:param source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:type condition: str
:param endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:type endpoint_names: list[str]
:param is_enabled: Required. Used to specify whether a route is enabled.
:type is_enabled: bool
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RouteProperties, self).__init__(**kwargs)
self.name = kwargs['name']
self.source = kwargs['source']
self.condition = kwargs.get('condition', None)
self.endpoint_names = kwargs['endpoint_names']
self.is_enabled = kwargs['is_enabled']
class RoutingEndpoints(msrest.serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:param service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:type service_bus_queues:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusQueueEndpointProperties]
:param service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:type service_bus_topics:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusTopicEndpointProperties]
:param event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:type event_hubs: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingEventHubProperties]
:param storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:type storage_containers:
list[~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
'service_bus_queues': {'key': 'serviceBusQueues', 'type': '[RoutingServiceBusQueueEndpointProperties]'},
'service_bus_topics': {'key': 'serviceBusTopics', 'type': '[RoutingServiceBusTopicEndpointProperties]'},
'event_hubs': {'key': 'eventHubs', 'type': '[RoutingEventHubProperties]'},
'storage_containers': {'key': 'storageContainers', 'type': '[RoutingStorageContainerProperties]'},
}
def __init__(
self,
**kwargs
):
super(RoutingEndpoints, self).__init__(**kwargs)
self.service_bus_queues = kwargs.get('service_bus_queues', None)
self.service_bus_topics = kwargs.get('service_bus_topics', None)
self.event_hubs = kwargs.get('event_hubs', None)
self.storage_containers = kwargs.get('storage_containers', None)
class RoutingEventHubProperties(msrest.serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the event hub endpoint.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:type name: str
:param subscription_id: The subscription identifier of the event hub endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the event hub endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingEventHubProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
class RoutingMessage(msrest.serialization.Model):
"""Routing message.
:param body: Body of routing message.
:type body: str
:param app_properties: App properties.
:type app_properties: dict[str, str]
:param system_properties: System properties.
:type system_properties: dict[str, str]
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'app_properties': {'key': 'appProperties', 'type': '{str}'},
'system_properties': {'key': 'systemProperties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(RoutingMessage, self).__init__(**kwargs)
self.body = kwargs.get('body', None)
self.app_properties = kwargs.get('app_properties', None)
self.system_properties = kwargs.get('system_properties', None)
class RoutingProperties(msrest.serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:param endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:type endpoints: ~azure.mgmt.iothub.v2019_11_04.models.RoutingEndpoints
:param routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:type routes: list[~azure.mgmt.iothub.v2019_11_04.models.RouteProperties]
:param fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:type fallback_route: ~azure.mgmt.iothub.v2019_11_04.models.FallbackRouteProperties
:param enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid.
:type enrichments: list[~azure.mgmt.iothub.v2019_11_04.models.EnrichmentProperties]
"""
_attribute_map = {
'endpoints': {'key': 'endpoints', 'type': 'RoutingEndpoints'},
'routes': {'key': 'routes', 'type': '[RouteProperties]'},
'fallback_route': {'key': 'fallbackRoute', 'type': 'FallbackRouteProperties'},
'enrichments': {'key': 'enrichments', 'type': '[EnrichmentProperties]'},
}
def __init__(
self,
**kwargs
):
super(RoutingProperties, self).__init__(**kwargs)
self.endpoints = kwargs.get('endpoints', None)
self.routes = kwargs.get('routes', None)
self.fallback_route = kwargs.get('fallback_route', None)
self.enrichments = kwargs.get('enrichments', None)
class RoutingServiceBusQueueEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the service bus queue endpoint.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:type name: str
:param subscription_id: The subscription identifier of the service bus queue endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the service bus queue endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingServiceBusQueueEndpointProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
class RoutingServiceBusTopicEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the service bus topic endpoint.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:type name: str
:param subscription_id: The subscription identifier of the service bus topic endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the service bus topic endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingServiceBusTopicEndpointProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
class RoutingStorageContainerProperties(msrest.serialization.Model):
"""The properties related to a storage container endpoint.
All required parameters must be populated in order to send to Azure.
:param connection_string: Required. The connection string of the storage account.
:type connection_string: str
:param name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:type name: str
:param subscription_id: The subscription identifier of the storage account.
:type subscription_id: str
:param resource_group: The name of the resource group of the storage account.
:type resource_group: str
:param container_name: Required. The name of storage container in the storage account.
:type container_name: str
:param file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:type file_name_format: str
:param batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:type batch_frequency_in_seconds: int
:param max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value
should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:type max_chunk_size_in_bytes: int
:param encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:type encoding: str or
~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerPropertiesEncoding
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'container_name': {'required': True},
'batch_frequency_in_seconds': {'maximum': 720, 'minimum': 60},
'max_chunk_size_in_bytes': {'maximum': 524288000, 'minimum': 10485760},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'file_name_format': {'key': 'fileNameFormat', 'type': 'str'},
'batch_frequency_in_seconds': {'key': 'batchFrequencyInSeconds', 'type': 'int'},
'max_chunk_size_in_bytes': {'key': 'maxChunkSizeInBytes', 'type': 'int'},
'encoding': {'key': 'encoding', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoutingStorageContainerProperties, self).__init__(**kwargs)
self.connection_string = kwargs['connection_string']
self.name = kwargs['name']
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group = kwargs.get('resource_group', None)
self.container_name = kwargs['container_name']
self.file_name_format = kwargs.get('file_name_format', None)
self.batch_frequency_in_seconds = kwargs.get('batch_frequency_in_seconds', None)
self.max_chunk_size_in_bytes = kwargs.get('max_chunk_size_in_bytes', None)
self.encoding = kwargs.get('encoding', None)
class RoutingTwin(msrest.serialization.Model):
"""Twin reference input parameter. This is an optional parameter.
:param tags: A set of tags. Twin Tags.
:type tags: object
:param properties:
:type properties: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwinProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'RoutingTwinProperties'},
}
def __init__(
self,
**kwargs
):
super(RoutingTwin, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.properties = kwargs.get('properties', None)
class RoutingTwinProperties(msrest.serialization.Model):
"""RoutingTwinProperties.
:param desired: Twin desired properties.
:type desired: object
:param reported: Twin desired properties.
:type reported: object
"""
_attribute_map = {
'desired': {'key': 'desired', 'type': 'object'},
'reported': {'key': 'reported', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(RoutingTwinProperties, self).__init__(**kwargs)
self.desired = kwargs.get('desired', None)
self.reported = kwargs.get('reported', None)
class SharedAccessSignatureAuthorizationRule(msrest.serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of the shared access policy.
:type key_name: str
:param primary_key: The primary key.
:type primary_key: str
:param secondary_key: The secondary key.
:type secondary_key: str
:param rights: Required. The permissions assigned to the shared access policy. Possible values
include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:type rights: str or ~azure.mgmt.iothub.v2019_11_04.models.AccessRights
"""
_validation = {
'key_name': {'required': True},
'rights': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'rights': {'key': 'rights', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedAccessSignatureAuthorizationRule, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
self.primary_key = kwargs.get('primary_key', None)
self.secondary_key = kwargs.get('secondary_key', None)
self.rights = kwargs['rights']
class SharedAccessSignatureAuthorizationRuleListResult(msrest.serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of shared access policies.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedAccessSignatureAuthorizationRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class StorageEndpointProperties(msrest.serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:param sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-
upload#file-upload-notification-configuration-options.
:type sas_ttl_as_iso8601: ~datetime.timedelta
:param connection_string: Required. The connection string for the Azure Storage account to
which files are uploaded.
:type connection_string: str
:param container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:type container_name: str
"""
_validation = {
'connection_string': {'required': True},
'container_name': {'required': True},
}
_attribute_map = {
'sas_ttl_as_iso8601': {'key': 'sasTtlAsIso8601', 'type': 'duration'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageEndpointProperties, self).__init__(**kwargs)
self.sas_ttl_as_iso8601 = kwargs.get('sas_ttl_as_iso8601', None)
self.connection_string = kwargs['connection_string']
self.container_name = kwargs['container_name']
class TagsResource(msrest.serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(TagsResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class TestAllRoutesInput(msrest.serialization.Model):
"""Input for testing all routes.
:param routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type routing_source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param message: Routing message.
:type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:param twin: Routing Twin Reference.
:type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
_attribute_map = {
'routing_source': {'key': 'routingSource', 'type': 'str'},
'message': {'key': 'message', 'type': 'RoutingMessage'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
**kwargs
):
super(TestAllRoutesInput, self).__init__(**kwargs)
self.routing_source = kwargs.get('routing_source', None)
self.message = kwargs.get('message', None)
self.twin = kwargs.get('twin', None)
class TestAllRoutesResult(msrest.serialization.Model):
"""Result of testing all routes.
:param routes: JSON-serialized array of matched routes.
:type routes: list[~azure.mgmt.iothub.v2019_11_04.models.MatchedRoute]
"""
_attribute_map = {
'routes': {'key': 'routes', 'type': '[MatchedRoute]'},
}
def __init__(
self,
**kwargs
):
super(TestAllRoutesResult, self).__init__(**kwargs)
self.routes = kwargs.get('routes', None)
class TestRouteInput(msrest.serialization.Model):
"""Input for testing route.
All required parameters must be populated in order to send to Azure.
:param message: Routing message.
:type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage
:param route: Required. Route properties.
:type route: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties
:param twin: Routing Twin Reference.
:type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin
"""
_validation = {
'route': {'required': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'RoutingMessage'},
'route': {'key': 'route', 'type': 'RouteProperties'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
**kwargs
):
super(TestRouteInput, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.route = kwargs['route']
self.twin = kwargs.get('twin', None)
class TestRouteResult(msrest.serialization.Model):
"""Result of testing one route.
:param result: Result of testing route. Possible values include: "undefined", "false", "true".
:type result: str or ~azure.mgmt.iothub.v2019_11_04.models.TestResultStatus
:param details: Detailed result of testing route.
:type details: ~azure.mgmt.iothub.v2019_11_04.models.TestRouteResultDetails
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'str'},
'details': {'key': 'details', 'type': 'TestRouteResultDetails'},
}
def __init__(
self,
**kwargs
):
super(TestRouteResult, self).__init__(**kwargs)
self.result = kwargs.get('result', None)
self.details = kwargs.get('details', None)
class TestRouteResultDetails(msrest.serialization.Model):
"""Detailed result of testing a route.
:param compilation_errors: JSON-serialized list of route compilation errors.
:type compilation_errors: list[~azure.mgmt.iothub.v2019_11_04.models.RouteCompilationError]
"""
_attribute_map = {
'compilation_errors': {'key': 'compilationErrors', 'type': '[RouteCompilationError]'},
}
def __init__(
self,
**kwargs
):
super(TestRouteResultDetails, self).__init__(**kwargs)
self.compilation_errors = kwargs.get('compilation_errors', None)
class UserSubscriptionQuota(msrest.serialization.Model):
"""User subscription quota response.
:param id: IotHub type id.
:type id: str
:param type: Response type.
:type type: str
:param unit: Unit of IotHub type.
:type unit: str
:param current_value: Current number of IotHub type.
:type current_value: int
:param limit: Numerical limit on IotHub type.
:type limit: int
:param name: IotHub type.
:type name: ~azure.mgmt.iothub.v2019_11_04.models.Name
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'Name'},
}
def __init__(
self,
**kwargs
):
super(UserSubscriptionQuota, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', None)
self.unit = kwargs.get('unit', None)
self.current_value = kwargs.get('current_value', None)
self.limit = kwargs.get('limit', None)
self.name = kwargs.get('name', None)
class UserSubscriptionQuotaListResult(msrest.serialization.Model):
"""Json-serialized array of User subscription quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.UserSubscriptionQuota]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UserSubscriptionQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserSubscriptionQuotaListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None | en | 0.695983 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- The JSON-serialized X509 Certificate. :param certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem file content. :type certificate: str The X509 Certificate. Variables are only populated by the server, and will be ignored when sending a request. :param properties: The description of an X509 CA Certificate. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificateProperties :ivar id: The resource identifier. :vartype id: str :ivar name: The name of the certificate. :vartype name: str :ivar etag: The entity tag. :vartype etag: str :ivar type: The resource type. :vartype type: str The JSON-serialized array of Certificate objects. :param value: The array of Certificate objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.CertificateDescription] The description of an X509 CA Certificate. Variables are only populated by the server, and will be ignored when sending a request. :ivar subject: The certificate's subject name. :vartype subject: str :ivar expiry: The certificate's expiration date and time. :vartype expiry: ~datetime.datetime :ivar thumbprint: The certificate's thumbprint. :vartype thumbprint: str :ivar is_verified: Determines whether certificate has been verified. :vartype is_verified: bool :ivar created: The certificate's create date and time. :vartype created: ~datetime.datetime :ivar updated: The certificate's last update date and time. :vartype updated: ~datetime.datetime :param certificate: The certificate content. :type certificate: str The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow. Variables are only populated by the server, and will be ignored when sending a request. :ivar subject: The certificate's subject name. :vartype subject: str :ivar expiry: The certificate's expiration date and time. :vartype expiry: ~datetime.datetime :ivar thumbprint: The certificate's thumbprint. :vartype thumbprint: str :ivar is_verified: Determines whether certificate has been verified. :vartype is_verified: bool :ivar created: The certificate's create date and time. :vartype created: ~datetime.datetime :ivar updated: The certificate's last update date and time. :vartype updated: ~datetime.datetime :ivar verification_code: The certificate's verification code that will be used for proof of possession. :vartype verification_code: str :ivar certificate: The certificate content. :vartype certificate: str The JSON-serialized leaf certificate. :param certificate: base-64 representation of X509 certificate .cer file or just .pem file content. :type certificate: str The X509 Certificate. Variables are only populated by the server, and will be ignored when sending a request. :param properties: The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.CertificatePropertiesWithNonce :ivar id: The resource identifier. :vartype id: str :ivar name: The name of the certificate. :vartype name: str :ivar etag: The entity tag. :vartype etag: str :ivar type: The resource type. :vartype type: str The IoT hub cloud-to-device messaging properties. :param max_delivery_count: The max delivery count for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to- device-messages. :type max_delivery_count: int :param default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud- to-device-messages. :type default_ttl_as_iso8601: ~datetime.timedelta :param feedback: The properties of the feedback queue for cloud-to-device messages. :type feedback: ~azure.mgmt.iothub.v2019_11_04.models.FeedbackProperties The health data for an endpoint. :param endpoint_id: Id of the endpoint. :type endpoint_id: str :param health_status: Health statuses have following meanings. The 'healthy' status shows that the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint. The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an eventually consistent state of health. The 'dead' status shows that the endpoint is not accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that the IoT Hub has not established a connection with the endpoint. No messages have been delivered to or rejected from this endpoint. Possible values include: "unknown", "healthy", "unhealthy", "dead". :type health_status: str or ~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthStatus The JSON-serialized array of EndpointHealthData objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: JSON-serialized array of Endpoint health data. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.EndpointHealthData] :ivar next_link: Link to more results. :vartype next_link: str The properties of an enrichment that your IoT hub applies to messages delivered to endpoints. All required parameters must be populated in order to send to Azure. :param key: Required. The key or name for the enrichment property. :type key: str :param value: Required. The value for the enrichment property. :type value: str :param endpoint_names: Required. The list of endpoints for which the enrichment is applied to the message. :type endpoint_names: list[str] Error details. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar http_status_code: The HTTP status code. :vartype http_status_code: str :ivar message: The error message. :vartype message: str :ivar details: The error details. :vartype details: str The properties of the EventHubConsumerGroupInfo object. Variables are only populated by the server, and will be ignored when sending a request. :param properties: The tags. :type properties: dict[str, str] :ivar id: The Event Hub-compatible consumer group identifier. :vartype id: str :ivar name: The Event Hub-compatible consumer group name. :vartype name: str :ivar type: the resource type. :vartype type: str :ivar etag: The etag. :vartype etag: str The JSON-serialized array of Event Hub-compatible consumer group names with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: List of consumer groups objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.EventHubConsumerGroupInfo] :ivar next_link: The next link. :vartype next_link: str The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. :param retention_time_in_days: The retention time for device-to-cloud messages in days. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages. :type retention_time_in_days: long :param partition_count: The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide- messaging#device-to-cloud-messages. :type partition_count: int :ivar partition_ids: The partition ids in the Event Hub-compatible endpoint. :vartype partition_ids: list[str] :ivar path: The Event Hub-compatible name. :vartype path: str :ivar endpoint: The Event Hub-compatible endpoint. :vartype endpoint: str Use to provide parameters when requesting an export of all devices in the IoT hub. All required parameters must be populated in order to send to Azure. :param export_blob_container_uri: Required. The export blob container URI. :type export_blob_container_uri: str :param exclude_keys: Required. The value indicating whether keys should be excluded during export. :type exclude_keys: bool Use to provide failover region when requesting manual Failover for a hub. All required parameters must be populated in order to send to Azure. :param failover_region: Required. Region the hub will be failed over to. :type failover_region: str The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint. All required parameters must be populated in order to send to Azure. :param name: The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique. :type name: str :param source: Required. The source to which the routing rule is to be applied to. For example, DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents". :type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource :param condition: The condition which is evaluated in order to apply the fallback route. If the condition is not provided it will evaluate to true by default. For grammar, See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. :type condition: str :param endpoint_names: Required. The list of endpoints to which the messages that satisfy the condition are routed to. Currently only 1 endpoint is allowed. :type endpoint_names: list[str] :param is_enabled: Required. Used to specify whether the fallback route is enabled. :type is_enabled: bool The properties of the feedback queue for cloud-to-device messages. :param lock_duration_as_iso8601: The lock duration for the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages. :type lock_duration_as_iso8601: ~datetime.timedelta :param ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide- messaging#cloud-to-device-messages. :type ttl_as_iso8601: ~datetime.timedelta :param max_delivery_count: The number of times the IoT hub attempts to deliver a message on the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud- to-device-messages. :type max_delivery_count: int Use to provide parameters when requesting an import of all devices in the hub. All required parameters must be populated in order to send to Azure. :param input_blob_container_uri: Required. The input blob container URI. :type input_blob_container_uri: str :param output_blob_container_uri: Required. The output blob container URI. :type output_blob_container_uri: str IoT Hub capacity information. Variables are only populated by the server, and will be ignored when sending a request. :ivar minimum: The minimum number of units. :vartype minimum: long :ivar maximum: The maximum number of units. :vartype maximum: long :ivar default: The default number of units. :vartype default: long :ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic", "Manual", "None". :vartype scale_type: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubScaleType The common properties of an Azure resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource identifier. :vartype id: str :ivar name: The resource name. :vartype name: str :ivar type: The resource type. :vartype type: str :param location: Required. The resource location. :type location: str :param tags: A set of tags. The resource tags. :type tags: dict[str, str] The description of the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource identifier. :vartype id: str :ivar name: The resource name. :vartype name: str :ivar type: The resource type. :vartype type: str :param location: Required. The resource location. :type location: str :param tags: A set of tags. The resource tags. :type tags: dict[str, str] :param etag: The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention. :type etag: str :param properties: IotHub properties. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.IotHubProperties :param sku: Required. IotHub SKU info. :type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo The JSON-serialized array of IotHubDescription objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of IotHubDescription objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubDescription] :ivar next_link: The next link. :vartype next_link: str Public representation of one of the locations where a resource is provisioned. :param location: The name of the Azure region. :type location: str :param role: The role of the region, can be either primary or secondary. The primary region is where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery (DR) paired region and also the region where the IoT hub can failover to. Possible values include: "primary", "secondary". :type role: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubReplicaRoleType The properties indicating whether a given IoT hub name is available. Variables are only populated by the server, and will be ignored when sending a request. :ivar name_available: The value which indicates whether the provided name is available. :vartype name_available: bool :ivar reason: The reason for unavailability. Possible values include: "Invalid", "AlreadyExists". :vartype reason: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubNameUnavailabilityReason :param message: The detailed reason message. :type message: str The properties of an IoT hub. Variables are only populated by the server, and will be ignored when sending a request. :param authorization_policies: The shared access policies you can use to secure a connection to the IoT hub. :type authorization_policies: list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule] :param ip_filter_rules: The IP filter rules. :type ip_filter_rules: list[~azure.mgmt.iothub.v2019_11_04.models.IpFilterRule] :ivar provisioning_state: The provisioning state. :vartype provisioning_state: str :ivar state: The hub state. :vartype state: str :ivar host_name: The name of the host. :vartype host_name: str :param event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys to this dictionary is events. This key has to be present in the dictionary while making create or update calls for the IoT hub. :type event_hub_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.EventHubProperties] :param routing: The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging. :type routing: ~azure.mgmt.iothub.v2019_11_04.models.RoutingProperties :param storage_endpoints: The list of Azure Storage endpoints where you can upload files. Currently you can configure only one Azure Storage account and that MUST have its key as $default. Specifying more than one storage account causes an error to be thrown. Not specifying a value for this property when the enableFileUploadNotifications property is set to True, causes an error to be thrown. :type storage_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.StorageEndpointProperties] :param messaging_endpoints: The messaging endpoint properties for the file upload notification queue. :type messaging_endpoints: dict[str, ~azure.mgmt.iothub.v2019_11_04.models.MessagingEndpointProperties] :param enable_file_upload_notifications: If True, file upload notifications are enabled. :type enable_file_upload_notifications: bool :param cloud_to_device: The IoT hub cloud-to-device messaging properties. :type cloud_to_device: ~azure.mgmt.iothub.v2019_11_04.models.CloudToDeviceProperties :param comments: IoT hub comments. :type comments: str :param features: The capabilities and features enabled for the IoT hub. Possible values include: "None", "DeviceManagement". :type features: str or ~azure.mgmt.iothub.v2019_11_04.models.Capabilities :ivar locations: Primary and secondary location for iot hub. :vartype locations: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubLocationDescription] Quota metrics properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the quota metric. :vartype name: str :ivar current_value: The current value for the quota metric. :vartype current_value: long :ivar max_value: The maximum value of the quota metric. :vartype max_value: long The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of quota metrics objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubQuotaMetricInfo] :ivar next_link: The next link. :vartype next_link: str SKU properties. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar resource_type: The type of the resource. :vartype resource_type: str :param sku: Required. The type of the resource. :type sku: ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuInfo :param capacity: Required. IotHub capacity. :type capacity: ~azure.mgmt.iothub.v2019_11_04.models.IotHubCapacity The JSON-serialized array of IotHubSkuDescription objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of IotHubSkuDescription. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuDescription] :ivar next_link: The next link. :vartype next_link: str Information about the SKU of the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3", "B1", "B2", "B3". :type name: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSku :ivar tier: The billing tier for the IoT hub. Possible values include: "Free", "Standard", "Basic". :vartype tier: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubSkuTier :param capacity: The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits. :type capacity: long The IP filter rules for the IoT hub. All required parameters must be populated in order to send to Azure. :param filter_name: Required. The name of the IP filter rule. :type filter_name: str :param action: Required. The desired action for requests captured by this rule. Possible values include: "Accept", "Reject". :type action: str or ~azure.mgmt.iothub.v2019_11_04.models.IpFilterActionType :param ip_mask: Required. A string that contains the IP address range in CIDR notation for the rule. :type ip_mask: str The properties of the Job Response object. Variables are only populated by the server, and will be ignored when sending a request. :ivar job_id: The job identifier. :vartype job_id: str :ivar start_time_utc: The start time of the job. :vartype start_time_utc: ~datetime.datetime :ivar end_time_utc: The time the job stopped processing. :vartype end_time_utc: ~datetime.datetime :ivar type: The type of the job. Possible values include: "unknown", "export", "import", "backup", "readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration", "rebootDevice", "factoryResetDevice", "firmwareUpdate". :vartype type: str or ~azure.mgmt.iothub.v2019_11_04.models.JobType :ivar status: The status of the job. Possible values include: "unknown", "enqueued", "running", "completed", "failed", "cancelled". :vartype status: str or ~azure.mgmt.iothub.v2019_11_04.models.JobStatus :ivar failure_reason: If status == failed, this string containing the reason for the failure. :vartype failure_reason: str :ivar status_message: The status message for the job. :vartype status_message: str :ivar parent_job_id: The job identifier of the parent job, if any. :vartype parent_job_id: str The JSON-serialized array of JobResponse objects with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The array of JobResponse objects. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.JobResponse] :ivar next_link: The next link. :vartype next_link: str Routes that matched. :param properties: Properties of routes that matched. :type properties: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties The properties of the messaging endpoints used by this IoT hub. :param lock_duration_as_iso8601: The lock duration. See: https://docs.microsoft.com/azure/iot- hub/iot-hub-devguide-file-upload. :type lock_duration_as_iso8601: ~datetime.timedelta :param ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file- upload. :type ttl_as_iso8601: ~datetime.timedelta :param max_delivery_count: The number of times the IoT hub attempts to deliver a message. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload. :type max_delivery_count: int Name of Iot Hub type. :param value: IotHub type. :type value: str :param localized_value: Localized value of name. :type localized_value: str IoT Hub REST API operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}. :vartype name: str :param display: The object that represents the operation. :type display: ~azure.mgmt.iothub.v2019_11_04.models.OperationDisplay The object that represents the operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar provider: Service provider: Microsoft Devices. :vartype provider: str :ivar resource: Resource Type: IotHubs. :vartype resource: str :ivar operation: Name of the operation. :vartype operation: str :ivar description: Description of the operation. :vartype description: str Input values. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the IoT hub to check. :type name: str Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider. :vartype value: list[~azure.mgmt.iothub.v2019_11_04.models.Operation] :ivar next_link: URL to get the next set of operation list results if there are any. :vartype next_link: str Identity registry statistics. Variables are only populated by the server, and will be ignored when sending a request. :ivar total_device_count: The total count of devices in the identity registry. :vartype total_device_count: long :ivar enabled_device_count: The count of enabled devices in the identity registry. :vartype enabled_device_count: long :ivar disabled_device_count: The count of disabled devices in the identity registry. :vartype disabled_device_count: long Compilation error when evaluating route. :param message: Route error message. :type message: str :param severity: Severity of the route error. Possible values include: "error", "warning". :type severity: str or ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorSeverity :param location: Location where the route error happened. :type location: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorRange Position where the route error happened. :param line: Line where the route error happened. :type line: int :param column: Column where the route error happened. :type column: int Range of route errors. :param start: Start where the route error happened. :type start: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition :param end: End where the route error happened. :type end: ~azure.mgmt.iothub.v2019_11_04.models.RouteErrorPosition The properties of a routing rule that your IoT hub uses to route messages to endpoints. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique. :type name: str :param source: Required. The source that the routing rule is to be applied to, such as DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents". :type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource :param condition: The condition that is evaluated to apply the routing rule. If no condition is provided, it evaluates to true by default. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. :type condition: str :param endpoint_names: Required. The list of endpoints to which messages that satisfy the condition are routed. Currently only one endpoint is allowed. :type endpoint_names: list[str] :param is_enabled: Required. Used to specify whether a route is enabled. :type is_enabled: bool The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs. :param service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the messages to, based on the routing rules. :type service_bus_queues: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusQueueEndpointProperties] :param service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the messages to, based on the routing rules. :type service_bus_topics: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingServiceBusTopicEndpointProperties] :param event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on the routing rules. This list does not include the built-in Event Hubs endpoint. :type event_hubs: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingEventHubProperties] :param storage_containers: The list of storage container endpoints that IoT hub routes messages to, based on the routing rules. :type storage_containers: list[~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerProperties] The properties related to an event hub endpoint. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the event hub endpoint. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. :type name: str :param subscription_id: The subscription identifier of the event hub endpoint. :type subscription_id: str :param resource_group: The name of the resource group of the event hub endpoint. :type resource_group: str Routing message. :param body: Body of routing message. :type body: str :param app_properties: App properties. :type app_properties: dict[str, str] :param system_properties: System properties. :type system_properties: dict[str, str] The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging. :param endpoints: The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs. :type endpoints: ~azure.mgmt.iothub.v2019_11_04.models.RoutingEndpoints :param routes: The list of user-provided routing rules that the IoT hub uses to route messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and a maximum of 5 routing rules are allowed for free hubs. :type routes: list[~azure.mgmt.iothub.v2019_11_04.models.RouteProperties] :param fallback_route: The properties of the route that is used as a fall-back route when none of the conditions specified in the 'routes' section are met. This is an optional parameter. When this property is not set, the messages which do not meet any of the conditions specified in the 'routes' section get routed to the built-in eventhub endpoint. :type fallback_route: ~azure.mgmt.iothub.v2019_11_04.models.FallbackRouteProperties :param enrichments: The list of user-provided enrichments that the IoT hub applies to messages to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid. :type enrichments: list[~azure.mgmt.iothub.v2019_11_04.models.EnrichmentProperties] The properties related to service bus queue endpoint types. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the service bus queue endpoint. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual queue name. :type name: str :param subscription_id: The subscription identifier of the service bus queue endpoint. :type subscription_id: str :param resource_group: The name of the resource group of the service bus queue endpoint. :type resource_group: str The properties related to service bus topic endpoint types. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the service bus topic endpoint. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual topic name. :type name: str :param subscription_id: The subscription identifier of the service bus topic endpoint. :type subscription_id: str :param resource_group: The name of the resource group of the service bus topic endpoint. :type resource_group: str The properties related to a storage container endpoint. All required parameters must be populated in order to send to Azure. :param connection_string: Required. The connection string of the storage account. :type connection_string: str :param name: Required. The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. :type name: str :param subscription_id: The subscription identifier of the storage account. :type subscription_id: str :param resource_group: The name of the resource group of the storage account. :type resource_group: str :param container_name: Required. The name of storage container in the storage account. :type container_name: str :param file_name_format: File name format for the blob. Default format is {iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be reordered. :type file_name_format: str :param batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds. :type batch_frequency_in_seconds: int :param max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB). :type max_chunk_size_in_bytes: int :param encoding: Encoding that is used to serialize messages to blobs. Supported values are 'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro", "AvroDeflate", "JSON". :type encoding: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingStorageContainerPropertiesEncoding Twin reference input parameter. This is an optional parameter. :param tags: A set of tags. Twin Tags. :type tags: object :param properties: :type properties: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwinProperties RoutingTwinProperties. :param desired: Twin desired properties. :type desired: object :param reported: Twin desired properties. :type reported: object The properties of an IoT hub shared access policy. All required parameters must be populated in order to send to Azure. :param key_name: Required. The name of the shared access policy. :type key_name: str :param primary_key: The primary key. :type primary_key: str :param secondary_key: The secondary key. :type secondary_key: str :param rights: Required. The permissions assigned to the shared access policy. Possible values include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead, RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite, ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect", "RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect". :type rights: str or ~azure.mgmt.iothub.v2019_11_04.models.AccessRights The list of shared access policies with a next link. Variables are only populated by the server, and will be ignored when sending a request. :param value: The list of shared access policies. :type value: list[~azure.mgmt.iothub.v2019_11_04.models.SharedAccessSignatureAuthorizationRule] :ivar next_link: The next link. :vartype next_link: str The properties of the Azure Storage endpoint for file upload. All required parameters must be populated in order to send to Azure. :param sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file- upload#file-upload-notification-configuration-options. :type sas_ttl_as_iso8601: ~datetime.timedelta :param connection_string: Required. The connection string for the Azure Storage account to which files are uploaded. :type connection_string: str :param container_name: Required. The name of the root container where you upload files. The container need not exist but should be creatable using the connectionString specified. :type container_name: str A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance. :param tags: A set of tags. Resource tags. :type tags: dict[str, str] Input for testing all routes. :param routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents". :type routing_source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource :param message: Routing message. :type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage :param twin: Routing Twin Reference. :type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin Result of testing all routes. :param routes: JSON-serialized array of matched routes. :type routes: list[~azure.mgmt.iothub.v2019_11_04.models.MatchedRoute] Input for testing route. All required parameters must be populated in order to send to Azure. :param message: Routing message. :type message: ~azure.mgmt.iothub.v2019_11_04.models.RoutingMessage :param route: Required. Route properties. :type route: ~azure.mgmt.iothub.v2019_11_04.models.RouteProperties :param twin: Routing Twin Reference. :type twin: ~azure.mgmt.iothub.v2019_11_04.models.RoutingTwin Result of testing one route. :param result: Result of testing route. Possible values include: "undefined", "false", "true". :type result: str or ~azure.mgmt.iothub.v2019_11_04.models.TestResultStatus :param details: Detailed result of testing route. :type details: ~azure.mgmt.iothub.v2019_11_04.models.TestRouteResultDetails Detailed result of testing a route. :param compilation_errors: JSON-serialized list of route compilation errors. :type compilation_errors: list[~azure.mgmt.iothub.v2019_11_04.models.RouteCompilationError] User subscription quota response. :param id: IotHub type id. :type id: str :param type: Response type. :type type: str :param unit: Unit of IotHub type. :type unit: str :param current_value: Current number of IotHub type. :type current_value: int :param limit: Numerical limit on IotHub type. :type limit: int :param name: IotHub type. :type name: ~azure.mgmt.iothub.v2019_11_04.models.Name Json-serialized array of User subscription quota response. Variables are only populated by the server, and will be ignored when sending a request. :param value: :type value: list[~azure.mgmt.iothub.v2019_11_04.models.UserSubscriptionQuota] :ivar next_link: :vartype next_link: str | 1.902002 | 2 |
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py | cognifloyd/st2-open-rbac | 0 | 647 | <reponame>cognifloyd/st2-open-rbac<gh_stars>0
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import six
import mock
from st2common.services import triggers as trigger_service
with mock.patch.object(trigger_service, 'create_trigger_type_db', mock.MagicMock()):
from st2api.controllers.v1.webhooks import HooksHolder
from st2common.persistence.rbac import UserRoleAssignment
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.service_setup import register_service_in_service_registry
from st2common.services import coordination
from st2tests import config as tests_config
from st2tests.fixturesloader import FixturesLoader
from open_rbac.tests import APIControllerWithRBACTestCase
from tests.unit.controllers.v1.test_webhooks import DUMMY_TRIGGER_DICT
http_client = six.moves.http_client
__all__ = [
'APIControllersRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = OrderedDict([
('runners', ['testrunner1.yaml', 'run-local.yaml']),
('sensors', ['sensor1.yaml']),
('actions', ['action1.yaml', 'local.yaml']),
('aliases', ['alias1.yaml']),
('triggers', ['trigger1.yaml', 'cron1.yaml']),
('rules', ['rule1.yaml']),
('triggertypes', ['triggertype1.yaml']),
('executions', ['execution1.yaml']),
('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']),
('enforcements', ['enforcement1.yaml']),
('apikeys', ['apikey1.yaml']),
('traces', ['trace_for_test_enforce.yaml'])
])
MOCK_RUNNER_1 = {
'name': 'test-runner-1',
'description': 'test',
'enabled': False
}
MOCK_ACTION_1 = {
'name': 'ma.dummy.action',
'pack': 'examples',
'description': 'test description',
'enabled': True,
'entry_point': '/tmp/test/action2.py',
'runner_type': 'local-shell-script',
'parameters': {
'c': {'type': 'string', 'default': 'C1', 'position': 0},
'd': {'type': 'string', 'default': 'D1', 'immutable': True}
}
}
MOCK_ACTION_ALIAS_1 = {
'name': 'alias3',
'pack': 'aliases',
'description': 'test description',
'action_ref': 'core.local',
'formats': ['a', 'b']
}
MOCK_RULE_1 = {
'enabled': True,
'name': 'st2.test.rule2',
'pack': 'yoyohoneysingh',
'trigger': {
'type': 'wolfpack.triggertype-1'
},
'criteria': {
'trigger.k1': {
'pattern': 't1_p_v',
'type': 'equals'
}
},
'action': {
'ref': 'sixpack.st2.test.action',
'parameters': {
'ip2': '{{rule.k1}}',
'ip1': '{{trigger.t1_p}}'
}
},
'description': ''
}
class APIControllersRBACTestCase(APIControllerWithRBACTestCase):
"""
Test class which hits all the API endpoints which are behind the RBAC wall with a user which
has no permissions and makes sure API returns access denied.
"""
register_packs = True
fixtures_loader = FixturesLoader()
coordinator = None
@classmethod
def setUpClass(cls):
tests_config.parse_args(coordinator_noop=True)
super(APIControllersRBACTestCase, cls).setUpClass()
cls.coordinator = coordination.get_coordinator(use_cache=False)
# Register mock service in the service registry for testing purposes
service = six.binary_type(six.text_type('mock_service').encode('ascii'))
register_service_in_service_registry(service=service,
capabilities={'key1': 'value1',
'name': 'mock_service'},
start_heart=True)
@classmethod
def tearDownClass(cls):
super(APIControllersRBACTestCase, cls).tearDownClass()
coordination.coordinator_teardown(cls.coordinator)
def setUp(self):
super(APIControllersRBACTestCase, self).setUp()
# Register packs
if self.register_packs:
self._register_packs()
# Insert mock objects - those objects are used to test get one, edit and delete operations
self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
self.role_assignment_db_model = UserRoleAssignmentDB(
user='user', role='role', source='assignments/user.yaml')
UserRoleAssignment.add_or_update(self.role_assignment_db_model)
@mock.patch.object(HooksHolder, 'get_triggers_for_hook', mock.MagicMock(
return_value=[DUMMY_TRIGGER_DICT]))
def test_api_endpoints_behind_rbac_wall(self):
# alias_model = self.models['aliases']['alias1.yaml']
sensor_model = self.models['sensors']['sensor1.yaml']
rule_model = self.models['rules']['rule1.yaml']
enforcement_model = self.models['enforcements']['enforcement1.yaml']
execution_model = self.models['executions']['execution1.yaml']
trace_model = self.models['traces']['trace_for_test_enforce.yaml']
timer_model = self.models['triggers']['cron1.yaml']
supported_endpoints = [
# Runners
{
'path': '/v1/runnertypes',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'GET'
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'PUT',
'payload': MOCK_RUNNER_1
},
# Packs
{
'path': '/v1/packs',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/packs/dummy_pack_1',
'method': 'GET'
},
# Pack management
{
'path': '/v1/packs/install',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/uninstall',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/register',
'method': 'POST',
'payload': {'types': ['actions']}
},
{
'path': '/v1/packs/index/search',
'method': 'POST',
'payload': {'query': 'cloud'}
},
{
'path': '/v1/packs/index/health',
'method': 'GET'
},
# Pack views
{
'path': '/v1/packs/views/files/dummy_pack_1',
'method': 'GET'
},
# Pack config schemas
{
'path': '/v1/config_schemas',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/config_schemas/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/packs/views/file/dummy_pack_1/pack.yaml',
'method': 'GET'
},
# Pack configs
{
'path': '/v1/configs',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'PUT',
'payload': {
'foo': 'bar'
}
},
# Sensors
{
'path': '/v1/sensortypes',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'GET'
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'PUT',
'payload': {'enabled': False}
},
# Actions
{
'path': '/v1/actions',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'GET'
},
{
'path': '/v1/actions',
'method': 'POST',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'PUT',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'DELETE'
},
# Action aliases
{
'path': '/v1/actionalias',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'GET'
},
{
'path': '/v1/actionalias',
'method': 'POST',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'PUT',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'DELETE'
},
{
'path': '/v1/actionalias/match',
'method': 'POST',
'payload': {'command': 'test command string'}
},
# Rules
{
'path': '/v1/rules',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'GET'
},
{
'path': '/v1/rules',
'method': 'POST',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'PUT',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'DELETE'
},
# Rule enforcements
{
'path': '/v1/ruleenforcements',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/ruleenforcements/%s' % (enforcement_model.id),
'method': 'GET'
},
# Action Executions
{
'path': '/v1/executions',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions/%s/output' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions',
'method': 'POST',
'payload': {'action': 'core.local'} # schedule execution / run action
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'DELETE' # stop execution
},
{
'path': '/v1/executions/%s/re_run' % (execution_model.id),
'method': 'POST', # re-run execution
'payload': {'parameters': {}}
},
# Action execution nested controllers
{
'path': '/v1/executions/%s/attribute/trigger_instance' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions/%s/children' % (execution_model.id),
'method': 'GET'
},
# Alias executions
{
'path': '/v1/aliasexecution',
'method': 'POST',
'payload': {'name': 'alias1', 'format': 'foo bar ponies',
'command': 'foo bar ponies',
'user': 'channel', 'source_channel': 'bar'}
},
# Webhook
{
'path': '/v1/webhooks/st2',
'method': 'POST',
'payload': {
'trigger': 'some',
'payload': {
'some': 'thing'
}
}
},
# Traces
{
'path': '/v1/traces',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/traces/%s' % (trace_model.id),
'method': 'GET'
},
# Timers
{
'path': '/v1/timers',
'method': 'GET'
},
{
'path': '/v1/timers/%s' % (timer_model.id),
'method': 'GET'
},
# Webhooks
{
'path': '/v1/webhooks',
'method': 'GET'
},
{
'path': '/v1/webhooks/git',
'method': 'GET'
},
# RBAC - roles
{
'path': '/v1/rbac/roles',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rbac/roles/admin',
'method': 'GET'
},
# RBAC - user role assignments
{
'path': '/v1/rbac/role_assignments',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rbac/role_assignments/%s' % (self.role_assignment_db_model['id']),
'method': 'GET'
},
# RBAC - permission types
{
'path': '/v1/rbac/permission_types',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rbac/permission_types/action',
'method': 'GET'
},
# Action views
{
'path': '/v1/actions/views/overview',
'method': 'GET',
'is_getall': True
},
# Rule views
{
'path': '/v1/rules/views',
'method': 'GET',
'is_getall': True
},
# Service registry
{
'path': '/v1/service_registry/groups',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/service_registry/groups/mock_service/members',
'method': 'GET',
'is_getall': True
}
]
self.use_user(self.users['no_permissions'])
for endpoint in supported_endpoints:
response = self._perform_request_for_endpoint(endpoint=endpoint)
msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'],
endpoint['path'],
response.body)
self.assertEqual(response.status_code, http_client.FORBIDDEN, msg)
# Also test ?limit=-1 - non-admin user
self.use_user(self.users['observer'])
for endpoint in supported_endpoints:
if not endpoint.get('is_getall', False):
continue
response = self.app.get(endpoint['path'] + '?limit=-1', expect_errors=True)
msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'],
endpoint['path'],
response.body)
self.assertEqual(response.status_code, http_client.FORBIDDEN, msg)
# Also test ?limit=-1 - admin user
self.use_user(self.users['admin'])
for endpoint in supported_endpoints:
if not endpoint.get('is_getall', False):
continue
response = self.app.get(endpoint['path'] + '?limit=-1')
self.assertEqual(response.status_code, http_client.OK)
def test_icon_png_file_is_whitelisted(self):
self.use_user(self.users['no_permissions'])
# Test that access to icon.png file doesn't require any permissions
response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png')
self.assertEqual(response.status_code, http_client.OK)
# Other files should return forbidden
response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml',
expect_errors=True)
self.assertEqual(response.status_code, http_client.FORBIDDEN)
def _perform_request_for_endpoint(self, endpoint):
if endpoint['method'] == 'GET':
response = self.app.get(endpoint['path'], expect_errors=True)
elif endpoint['method'] == 'POST':
return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'PUT':
return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'DELETE':
return self.app.delete(endpoint['path'], expect_errors=True)
else:
raise ValueError('Unsupported method: %s' % (endpoint['method']))
return response
| # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import six
import mock
from st2common.services import triggers as trigger_service
with mock.patch.object(trigger_service, 'create_trigger_type_db', mock.MagicMock()):
from st2api.controllers.v1.webhooks import HooksHolder
from st2common.persistence.rbac import UserRoleAssignment
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.service_setup import register_service_in_service_registry
from st2common.services import coordination
from st2tests import config as tests_config
from st2tests.fixturesloader import FixturesLoader
from open_rbac.tests import APIControllerWithRBACTestCase
from tests.unit.controllers.v1.test_webhooks import DUMMY_TRIGGER_DICT
http_client = six.moves.http_client
__all__ = [
'APIControllersRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = OrderedDict([
('runners', ['testrunner1.yaml', 'run-local.yaml']),
('sensors', ['sensor1.yaml']),
('actions', ['action1.yaml', 'local.yaml']),
('aliases', ['alias1.yaml']),
('triggers', ['trigger1.yaml', 'cron1.yaml']),
('rules', ['rule1.yaml']),
('triggertypes', ['triggertype1.yaml']),
('executions', ['execution1.yaml']),
('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']),
('enforcements', ['enforcement1.yaml']),
('apikeys', ['apikey1.yaml']),
('traces', ['trace_for_test_enforce.yaml'])
])
MOCK_RUNNER_1 = {
'name': 'test-runner-1',
'description': 'test',
'enabled': False
}
MOCK_ACTION_1 = {
'name': 'ma.dummy.action',
'pack': 'examples',
'description': 'test description',
'enabled': True,
'entry_point': '/tmp/test/action2.py',
'runner_type': 'local-shell-script',
'parameters': {
'c': {'type': 'string', 'default': 'C1', 'position': 0},
'd': {'type': 'string', 'default': 'D1', 'immutable': True}
}
}
MOCK_ACTION_ALIAS_1 = {
'name': 'alias3',
'pack': 'aliases',
'description': 'test description',
'action_ref': 'core.local',
'formats': ['a', 'b']
}
MOCK_RULE_1 = {
'enabled': True,
'name': 'st2.test.rule2',
'pack': 'yoyohoneysingh',
'trigger': {
'type': 'wolfpack.triggertype-1'
},
'criteria': {
'trigger.k1': {
'pattern': 't1_p_v',
'type': 'equals'
}
},
'action': {
'ref': 'sixpack.st2.test.action',
'parameters': {
'ip2': '{{rule.k1}}',
'ip1': '{{trigger.t1_p}}'
}
},
'description': ''
}
class APIControllersRBACTestCase(APIControllerWithRBACTestCase):
"""
Test class which hits all the API endpoints which are behind the RBAC wall with a user which
has no permissions and makes sure API returns access denied.
"""
register_packs = True
fixtures_loader = FixturesLoader()
coordinator = None
@classmethod
def setUpClass(cls):
tests_config.parse_args(coordinator_noop=True)
super(APIControllersRBACTestCase, cls).setUpClass()
cls.coordinator = coordination.get_coordinator(use_cache=False)
# Register mock service in the service registry for testing purposes
service = six.binary_type(six.text_type('mock_service').encode('ascii'))
register_service_in_service_registry(service=service,
capabilities={'key1': 'value1',
'name': 'mock_service'},
start_heart=True)
@classmethod
def tearDownClass(cls):
super(APIControllersRBACTestCase, cls).tearDownClass()
coordination.coordinator_teardown(cls.coordinator)
def setUp(self):
super(APIControllersRBACTestCase, self).setUp()
# Register packs
if self.register_packs:
self._register_packs()
# Insert mock objects - those objects are used to test get one, edit and delete operations
self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
self.role_assignment_db_model = UserRoleAssignmentDB(
user='user', role='role', source='assignments/user.yaml')
UserRoleAssignment.add_or_update(self.role_assignment_db_model)
@mock.patch.object(HooksHolder, 'get_triggers_for_hook', mock.MagicMock(
return_value=[DUMMY_TRIGGER_DICT]))
def test_api_endpoints_behind_rbac_wall(self):
# alias_model = self.models['aliases']['alias1.yaml']
sensor_model = self.models['sensors']['sensor1.yaml']
rule_model = self.models['rules']['rule1.yaml']
enforcement_model = self.models['enforcements']['enforcement1.yaml']
execution_model = self.models['executions']['execution1.yaml']
trace_model = self.models['traces']['trace_for_test_enforce.yaml']
timer_model = self.models['triggers']['cron1.yaml']
supported_endpoints = [
# Runners
{
'path': '/v1/runnertypes',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'GET'
},
{
'path': '/v1/runnertypes/test-runner-1',
'method': 'PUT',
'payload': MOCK_RUNNER_1
},
# Packs
{
'path': '/v1/packs',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/packs/dummy_pack_1',
'method': 'GET'
},
# Pack management
{
'path': '/v1/packs/install',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/uninstall',
'method': 'POST',
'payload': {'packs': 'libcloud'}
},
{
'path': '/v1/packs/register',
'method': 'POST',
'payload': {'types': ['actions']}
},
{
'path': '/v1/packs/index/search',
'method': 'POST',
'payload': {'query': 'cloud'}
},
{
'path': '/v1/packs/index/health',
'method': 'GET'
},
# Pack views
{
'path': '/v1/packs/views/files/dummy_pack_1',
'method': 'GET'
},
# Pack config schemas
{
'path': '/v1/config_schemas',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/config_schemas/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/packs/views/file/dummy_pack_1/pack.yaml',
'method': 'GET'
},
# Pack configs
{
'path': '/v1/configs',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'GET'
},
{
'path': '/v1/configs/dummy_pack_1',
'method': 'PUT',
'payload': {
'foo': 'bar'
}
},
# Sensors
{
'path': '/v1/sensortypes',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'GET'
},
{
'path': '/v1/sensortypes/%s' % (sensor_model.ref),
'method': 'PUT',
'payload': {'enabled': False}
},
# Actions
{
'path': '/v1/actions',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'GET'
},
{
'path': '/v1/actions',
'method': 'POST',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'PUT',
'payload': MOCK_ACTION_1
},
{
'path': '/v1/actions/wolfpack.action-1',
'method': 'DELETE'
},
# Action aliases
{
'path': '/v1/actionalias',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'GET'
},
{
'path': '/v1/actionalias',
'method': 'POST',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'PUT',
'payload': MOCK_ACTION_ALIAS_1
},
{
'path': '/v1/actionalias/aliases.alias1',
'method': 'DELETE'
},
{
'path': '/v1/actionalias/match',
'method': 'POST',
'payload': {'command': 'test command string'}
},
# Rules
{
'path': '/v1/rules',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'GET'
},
{
'path': '/v1/rules',
'method': 'POST',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'PUT',
'payload': MOCK_RULE_1
},
{
'path': '/v1/rules/%s' % (rule_model.ref),
'method': 'DELETE'
},
# Rule enforcements
{
'path': '/v1/ruleenforcements',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/ruleenforcements/%s' % (enforcement_model.id),
'method': 'GET'
},
# Action Executions
{
'path': '/v1/executions',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions/%s/output' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions',
'method': 'POST',
'payload': {'action': 'core.local'} # schedule execution / run action
},
{
'path': '/v1/executions/%s' % (execution_model.id),
'method': 'DELETE' # stop execution
},
{
'path': '/v1/executions/%s/re_run' % (execution_model.id),
'method': 'POST', # re-run execution
'payload': {'parameters': {}}
},
# Action execution nested controllers
{
'path': '/v1/executions/%s/attribute/trigger_instance' % (execution_model.id),
'method': 'GET'
},
{
'path': '/v1/executions/%s/children' % (execution_model.id),
'method': 'GET'
},
# Alias executions
{
'path': '/v1/aliasexecution',
'method': 'POST',
'payload': {'name': 'alias1', 'format': 'foo bar ponies',
'command': 'foo bar ponies',
'user': 'channel', 'source_channel': 'bar'}
},
# Webhook
{
'path': '/v1/webhooks/st2',
'method': 'POST',
'payload': {
'trigger': 'some',
'payload': {
'some': 'thing'
}
}
},
# Traces
{
'path': '/v1/traces',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/traces/%s' % (trace_model.id),
'method': 'GET'
},
# Timers
{
'path': '/v1/timers',
'method': 'GET'
},
{
'path': '/v1/timers/%s' % (timer_model.id),
'method': 'GET'
},
# Webhooks
{
'path': '/v1/webhooks',
'method': 'GET'
},
{
'path': '/v1/webhooks/git',
'method': 'GET'
},
# RBAC - roles
{
'path': '/v1/rbac/roles',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rbac/roles/admin',
'method': 'GET'
},
# RBAC - user role assignments
{
'path': '/v1/rbac/role_assignments',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rbac/role_assignments/%s' % (self.role_assignment_db_model['id']),
'method': 'GET'
},
# RBAC - permission types
{
'path': '/v1/rbac/permission_types',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/rbac/permission_types/action',
'method': 'GET'
},
# Action views
{
'path': '/v1/actions/views/overview',
'method': 'GET',
'is_getall': True
},
# Rule views
{
'path': '/v1/rules/views',
'method': 'GET',
'is_getall': True
},
# Service registry
{
'path': '/v1/service_registry/groups',
'method': 'GET',
'is_getall': True
},
{
'path': '/v1/service_registry/groups/mock_service/members',
'method': 'GET',
'is_getall': True
}
]
self.use_user(self.users['no_permissions'])
for endpoint in supported_endpoints:
response = self._perform_request_for_endpoint(endpoint=endpoint)
msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'],
endpoint['path'],
response.body)
self.assertEqual(response.status_code, http_client.FORBIDDEN, msg)
# Also test ?limit=-1 - non-admin user
self.use_user(self.users['observer'])
for endpoint in supported_endpoints:
if not endpoint.get('is_getall', False):
continue
response = self.app.get(endpoint['path'] + '?limit=-1', expect_errors=True)
msg = '%s "%s" didn\'t return 403 status code (body=%s)' % (endpoint['method'],
endpoint['path'],
response.body)
self.assertEqual(response.status_code, http_client.FORBIDDEN, msg)
# Also test ?limit=-1 - admin user
self.use_user(self.users['admin'])
for endpoint in supported_endpoints:
if not endpoint.get('is_getall', False):
continue
response = self.app.get(endpoint['path'] + '?limit=-1')
self.assertEqual(response.status_code, http_client.OK)
def test_icon_png_file_is_whitelisted(self):
self.use_user(self.users['no_permissions'])
# Test that access to icon.png file doesn't require any permissions
response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png')
self.assertEqual(response.status_code, http_client.OK)
# Other files should return forbidden
response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml',
expect_errors=True)
self.assertEqual(response.status_code, http_client.FORBIDDEN)
def _perform_request_for_endpoint(self, endpoint):
if endpoint['method'] == 'GET':
response = self.app.get(endpoint['path'], expect_errors=True)
elif endpoint['method'] == 'POST':
return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'PUT':
return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'DELETE':
return self.app.delete(endpoint['path'], expect_errors=True)
else:
raise ValueError('Unsupported method: %s' % (endpoint['method']))
return response | en | 0.840929 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test class which hits all the API endpoints which are behind the RBAC wall with a user which has no permissions and makes sure API returns access denied. # Register mock service in the service registry for testing purposes # Register packs # Insert mock objects - those objects are used to test get one, edit and delete operations # alias_model = self.models['aliases']['alias1.yaml'] # Runners # Packs # Pack management # Pack views # Pack config schemas # Pack configs # Sensors # Actions # Action aliases # Rules # Rule enforcements # Action Executions # schedule execution / run action # stop execution # re-run execution # Action execution nested controllers # Alias executions # Webhook # Traces # Timers # Webhooks # RBAC - roles # RBAC - user role assignments # RBAC - permission types # Action views # Rule views # Service registry # Also test ?limit=-1 - non-admin user # Also test ?limit=-1 - admin user # Test that access to icon.png file doesn't require any permissions # Other files should return forbidden | 1.573659 | 2 |
testing/scripts/test_ksonnet_single_namespace.py | dtrawins/seldon-core | 0 | 648 | import pytest
import time
import subprocess
from subprocess import run,Popen
from seldon_utils import *
from k8s_utils import *
def wait_for_shutdown(deploymentName):
ret = run("kubectl get deploy/"+deploymentName, shell=True)
while ret.returncode == 0:
time.sleep(1)
ret = run("kubectl get deploy/"+deploymentName, shell=True)
def wait_for_rollout(deploymentName):
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
while ret.returncode > 0:
time.sleep(1)
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
def initial_rest_request():
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(1)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(5)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
return r
@pytest.mark.usefixtures("seldon_java_images")
@pytest.mark.usefixtures("single_namespace_seldon_ksonnet")
class TestSingleNamespace(object):
# Test singe model helm script with 4 API methods
def test_single_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-serve-simple-v1alpha2 mymodel --image seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymodel', shell=True, check=True)
wait_for_rollout("mymodel-mymodel-025d03d")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymodel",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymodel",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c mymodel && ks component rm mymodel', shell=True)
# Test AB Test model helm script with 4 API methods
def test_abtest_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-abtest-v1alpha2 myabtest --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c myabtest', shell=True)
wait_for_rollout("myabtest-myabtest-41de5b8")
wait_for_rollout("myabtest-myabtest-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("myabtest",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("myabtest",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c myabtest && ks component rm myabtest', shell=True)
# Test MAB Test model helm script with 4 API methods
def test_mab_model(self):
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-mab-v1alpha2 mymab --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymab', shell=True)
wait_for_rollout("mymab-mymab-41de5b8")
wait_for_rollout("mymab-mymab-b8038b2")
wait_for_rollout("mymab-mymab-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymab",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymab",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
| import pytest
import time
import subprocess
from subprocess import run,Popen
from seldon_utils import *
from k8s_utils import *
def wait_for_shutdown(deploymentName):
ret = run("kubectl get deploy/"+deploymentName, shell=True)
while ret.returncode == 0:
time.sleep(1)
ret = run("kubectl get deploy/"+deploymentName, shell=True)
def wait_for_rollout(deploymentName):
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
while ret.returncode > 0:
time.sleep(1)
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
def initial_rest_request():
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(1)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(5)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
return r
@pytest.mark.usefixtures("seldon_java_images")
@pytest.mark.usefixtures("single_namespace_seldon_ksonnet")
class TestSingleNamespace(object):
# Test singe model helm script with 4 API methods
def test_single_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-serve-simple-v1alpha2 mymodel --image seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymodel', shell=True, check=True)
wait_for_rollout("mymodel-mymodel-025d03d")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymodel",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymodel",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c mymodel && ks component rm mymodel', shell=True)
# Test AB Test model helm script with 4 API methods
def test_abtest_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-abtest-v1alpha2 myabtest --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c myabtest', shell=True)
wait_for_rollout("myabtest-myabtest-41de5b8")
wait_for_rollout("myabtest-myabtest-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("myabtest",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("myabtest",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c myabtest && ks component rm myabtest', shell=True)
# Test MAB Test model helm script with 4 API methods
def test_mab_model(self):
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-mab-v1alpha2 mymab --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymab', shell=True)
wait_for_rollout("mymab-mymab-41de5b8")
wait_for_rollout("mymab-mymab-b8038b2")
wait_for_rollout("mymab-mymab-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymab",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymab",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
| en | 0.742609 | # Test singe model helm script with 4 API methods # Test AB Test model helm script with 4 API methods # Test MAB Test model helm script with 4 API methods | 2.008779 | 2 |
enthought/envisage/safeweakref.py | enthought/etsproxy | 3 | 649 | <reponame>enthought/etsproxy
# proxy module
from __future__ import absolute_import
from envisage.safeweakref import *
| # proxy module
from __future__ import absolute_import
from envisage.safeweakref import * | es | 0.125187 | # proxy module | 1.082722 | 1 |
test_app/models.py | alissonmuller/django-group-by | 25 | 650 | from django.db import models
from .query import BookQuerySet
class Book(models.Model):
objects = BookQuerySet.as_manager()
title = models.CharField(max_length=50)
publication_date = models.DateTimeField()
author = models.ForeignKey('Author')
genres = models.ManyToManyField('Genre')
class Author(models.Model):
name = models.CharField(max_length=50)
nationality = models.ForeignKey('Nation', null=True)
class Genre(models.Model):
name = models.CharField(max_length=50)
class Nation(models.Model):
name = models.CharField(max_length=50)
demonym = models.CharField(max_length=50)
| from django.db import models
from .query import BookQuerySet
class Book(models.Model):
objects = BookQuerySet.as_manager()
title = models.CharField(max_length=50)
publication_date = models.DateTimeField()
author = models.ForeignKey('Author')
genres = models.ManyToManyField('Genre')
class Author(models.Model):
name = models.CharField(max_length=50)
nationality = models.ForeignKey('Nation', null=True)
class Genre(models.Model):
name = models.CharField(max_length=50)
class Nation(models.Model):
name = models.CharField(max_length=50)
demonym = models.CharField(max_length=50)
| none | 1 | 2.410215 | 2 |
|
ResNet/dropblock.py | whj363636/CamDrop | 0 | 651 | <reponame>whj363636/CamDrop
# -*- coding: utf-8 -*-
# File: dropblock.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
# from tensorpack.tfutils.compat import tfv1 as tf # this should be avoided first in model code
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.models import GlobalAvgPooling, FullyConnected
import tensorflow as tf
__all__ = ['dropblock', 'dropblock2','dropblock3','dropblock4'] # 1: paper baseline; 2: group dropout; 3: group soft-dropout; 4: Uout group dropout
def dropblock(net, keep_prob, dropblock_size, gap_w=None, label=None, G=None, CG=None, data_format='channels_first'):
"""DropBlock: a regularization method for convolutional neural networks.
DropBlock is a form of structured dropout, where units in a contiguous
region of a feature map are dropped together. DropBlock works better than
dropout on convolutional layers due to the fact that activation units in
convolutional layers are spatially correlated.
See https://arxiv.org/pdf/1810.12890.pdf for details.
Args:
net: `Tensor` input tensor.
is_training: `bool` for whether the model is training.
keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None"
means no DropBlock.
dropblock_size: `int` size of blocks to be dropped by DropBlock.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A version of input tensor with DropBlock applied.
Raises:
if width and height of the input tensor are not equal.
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, _ = net.get_shape().as_list()
else:
_, _, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (
width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(
valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(
tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(
block_pattern, net.dtype)
return net
def dropblock2(net, keep_prob, dropblock_size, G=None, CG=None, data_format='channels_first'):
"""
mimic GN
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
if G == None: G = C // CG
if CG == None: CG = C // G
net = tf.reshape(net, [N, G, CG, height, width])
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
# seed_drop_rate = (1.0 - keep_prob) * width**2 * G**2 / (C * dropblock_size**2) / (C * (width - dropblock_size + 1)**2)
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0) # for depth
valid_block_center = tf.expand_dims(valid_block_center, 0) # for batch
valid_block_center = tf.expand_dims(valid_block_center, 0) # for channel
randnoise = tf.random_uniform([N, G, 1, width, height], dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(block_pattern, axis=[2, 3, 4], keepdims=True)
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = tf.reduce_max(-block_pattern, reduction_indices=[2])
block_pattern = -tf.nn.max_pool(block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW')
block_pattern = tf.expand_dims(block_pattern, 2)
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
net = tf.reshape(net, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height, width])
return net
def CamDrop(net, keep_prob, dropblock_size, flag=None, label=None, G=None, CG=None, data_format='channels_first'):
'''CamDrop'''
def _get_cam(net, label, flag, dropblock_size, data_format='channels_first'):
'''
net: [N, C, H, W]
gap_w : [gap_C, num_of_class]
'''
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
gap_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'linear/W') if flag > 0 else None
if not gap_w is None:
gap_w = tf.convert_to_tensor(gap_w, tf.float32)
gap_C, num = tf.squeeze(gap_w, 0).get_shape().as_list() # [gap_C, num]
gap_w = tf.reshape(gap_w, [C, gap_C//C, num])
gap_w = tf.reduce_mean(gap_w, reduction_indices=[1]) # [C, num]
label = tf.gather(tf.transpose(gap_w), label) # [N, C]
# spatial
weights = tf.expand_dims(label, 2) # [N, C, 1]
net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
cam = tf.matmul(weights, net, transpose_a=True) # [N, 1, width*height]
# spt_mask = tf.not_equal(cam, tf.reduce_max(cam, reduction_indices=[2], keepdims=True))
# cam = tf.reshape(cam, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height, width])
# cam = tf.nn.avg_pool(cam, ksize=[1, 1, dropblock_size, dropblock_size], strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW')
# left_or_top = (dropblock_size-1) // 2
# right_or_bot = left_or_top if dropblock_size % 2 == 1 else dropblock_size-left_or_top-1
# cam = tf.pad(cam, [[0, 0], [0, 0], [left_or_top, right_or_bot], [left_or_top, right_or_bot]])
# cam = tf.reshape(cam, [N, height*width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height*width])
k = tf.cast(height*width/dropblock_size**2, tf.int32)
topk, _ = tf.math.top_k(cam, k=k) # [N, 1, k]
topk = tf.gather(topk, indices=[k-1], axis=-1) # [N, 1, 1]
spt_mask = (cam < topk)
spt_mask = tf.reshape(spt_mask, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(spt_mask, [N, 1, height, width])
# channel
k = tf.cast(C/8, tf.int32)
topk, _ = tf.math.top_k(label, k=k+1) # [N, k]
topk = tf.gather(topk, indices=k, axis=1) # [N, 1]
topk = tf.expand_dims(topk, 1) # [N, C, 1]
chan_mask = (label < topk)
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1]
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1, 1]
cam_mask = tf.logical_or(spt_mask, chan_mask)
# chan_mask = tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(tf.nn.softmax(cam), [N*C, height*width])
# chan_mask = tf.reshape(cam, [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(cam, [N*C, height*width])
# chan_mask = tf.reshape(tf.nn.sigmoid(cam), [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(tf.nn.sigmoid(cam), [N, 1, height, width])
else:
cam_mask = False
return cam_mask
# def _get_gradcam(net, cost=None, gap_w=None, data_format='channels_first'):
# # Conv layer tensor [?,2048,10,10]
# def _compute_gradients(tensor, var_list):
# grads = tf.gradients(tensor, var_list)
# return [grad if grad is not None else tf.zeros_like(var)
# for var, grad in zip(var_list, grads)]
# # grads = tf.gradients(cost, net)[0]
# if not gap_w is None:
# # Normalizing the gradients
# if data_format == 'channels_last':
# N, height, width, C = net.get_shape().as_list()
# else:
# N, C, height, width = net.get_shape().as_list()
# N = tf.shape(net)[0]
# grads = _compute_gradients(cost, [net])[0]
# norm_grads = tf.divide(grads, tf.sqrt(tf.reduce_mean(tf.square(grads), reduction_indices=[2,3], keepdims=True)) + tf.constant(1e-5))
# weights = tf.reduce_mean(norm_grads, reduction_indices=[2,3]) # [N, C]
# weights = tf.expand_dims(weights, 2) # [N, C, 1]
# net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
# # cam_mean = 1 + tf.matmul(net, weights, transpose_a=True) # [N, width*height, 1]
# cam_mean = tf.maximum(tf.matmul(weights, net, transpose_a=True), 0) # [N, 1, width*height]
# cam_chan = tf.maximum(tf.multiply(net, weights), 0) # [N, C, width*height]
# cam = cam_mean*cam_chan
# # Passing through ReLU
# cam = cam / tf.reduce_max(cam, reduction_indices=[1,2], keepdims=True)
# cam = tf.reshape(cam, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(cam, [N, C, height, width])
# else:
# cam = 0.
# return cam
# def _gumbel_softmax(logits, tau, shape, seed_drop_rate, eps=1e-20):
# if logits == False:
# return logits
# U = tf.random_uniform(tf.shape(logits), minval=0, maxval=1)
# y = logits - tf.log(-tf.log(U + eps) + eps)
# cam_mask = tf.nn.softmax(y / tau)
# topk, _ = tf.math.top_k(cam_mask, k=tf.cast(seed_drop_rate*shape[-1], tf.int32)) # [N, 1]
# topk = tf.gather(topk, indices=tf.cast(seed_drop_rate*shape[-1], tf.int32)-1, axis=1)
# topk = tf.expand_dims(topk, 1) # [N, C, 1]
# cam_mask = (cam_mask < topk)
# # cam_mask = tf.cast(tf.equal(cam_mask, tf.reduce_max(cam_mask, reduction_indices=[1], keepdims=True)), tf.float32)
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1]
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1, 1]
# return cam_mask
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, C = net.get_shape().as_list()
else:
_, C, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
N = tf.shape(net)[0]
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
cam_mask = _get_cam(net, label, flag, dropblock_size, data_format)
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast((1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.logical_or(block_pattern, cam_mask)
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
return net | # -*- coding: utf-8 -*-
# File: dropblock.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
# from tensorpack.tfutils.compat import tfv1 as tf # this should be avoided first in model code
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.models import GlobalAvgPooling, FullyConnected
import tensorflow as tf
__all__ = ['dropblock', 'dropblock2','dropblock3','dropblock4'] # 1: paper baseline; 2: group dropout; 3: group soft-dropout; 4: Uout group dropout
def dropblock(net, keep_prob, dropblock_size, gap_w=None, label=None, G=None, CG=None, data_format='channels_first'):
"""DropBlock: a regularization method for convolutional neural networks.
DropBlock is a form of structured dropout, where units in a contiguous
region of a feature map are dropped together. DropBlock works better than
dropout on convolutional layers due to the fact that activation units in
convolutional layers are spatially correlated.
See https://arxiv.org/pdf/1810.12890.pdf for details.
Args:
net: `Tensor` input tensor.
is_training: `bool` for whether the model is training.
keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None"
means no DropBlock.
dropblock_size: `int` size of blocks to be dropped by DropBlock.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A version of input tensor with DropBlock applied.
Raises:
if width and height of the input tensor are not equal.
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, _ = net.get_shape().as_list()
else:
_, _, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (
width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(
valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(
tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(
block_pattern, net.dtype)
return net
def dropblock2(net, keep_prob, dropblock_size, G=None, CG=None, data_format='channels_first'):
"""
mimic GN
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
if G == None: G = C // CG
if CG == None: CG = C // G
net = tf.reshape(net, [N, G, CG, height, width])
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
# seed_drop_rate = (1.0 - keep_prob) * width**2 * G**2 / (C * dropblock_size**2) / (C * (width - dropblock_size + 1)**2)
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0) # for depth
valid_block_center = tf.expand_dims(valid_block_center, 0) # for batch
valid_block_center = tf.expand_dims(valid_block_center, 0) # for channel
randnoise = tf.random_uniform([N, G, 1, width, height], dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(block_pattern, axis=[2, 3, 4], keepdims=True)
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = tf.reduce_max(-block_pattern, reduction_indices=[2])
block_pattern = -tf.nn.max_pool(block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW')
block_pattern = tf.expand_dims(block_pattern, 2)
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
net = tf.reshape(net, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height, width])
return net
def CamDrop(net, keep_prob, dropblock_size, flag=None, label=None, G=None, CG=None, data_format='channels_first'):
'''CamDrop'''
def _get_cam(net, label, flag, dropblock_size, data_format='channels_first'):
'''
net: [N, C, H, W]
gap_w : [gap_C, num_of_class]
'''
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
gap_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'linear/W') if flag > 0 else None
if not gap_w is None:
gap_w = tf.convert_to_tensor(gap_w, tf.float32)
gap_C, num = tf.squeeze(gap_w, 0).get_shape().as_list() # [gap_C, num]
gap_w = tf.reshape(gap_w, [C, gap_C//C, num])
gap_w = tf.reduce_mean(gap_w, reduction_indices=[1]) # [C, num]
label = tf.gather(tf.transpose(gap_w), label) # [N, C]
# spatial
weights = tf.expand_dims(label, 2) # [N, C, 1]
net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
cam = tf.matmul(weights, net, transpose_a=True) # [N, 1, width*height]
# spt_mask = tf.not_equal(cam, tf.reduce_max(cam, reduction_indices=[2], keepdims=True))
# cam = tf.reshape(cam, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height, width])
# cam = tf.nn.avg_pool(cam, ksize=[1, 1, dropblock_size, dropblock_size], strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW')
# left_or_top = (dropblock_size-1) // 2
# right_or_bot = left_or_top if dropblock_size % 2 == 1 else dropblock_size-left_or_top-1
# cam = tf.pad(cam, [[0, 0], [0, 0], [left_or_top, right_or_bot], [left_or_top, right_or_bot]])
# cam = tf.reshape(cam, [N, height*width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height*width])
k = tf.cast(height*width/dropblock_size**2, tf.int32)
topk, _ = tf.math.top_k(cam, k=k) # [N, 1, k]
topk = tf.gather(topk, indices=[k-1], axis=-1) # [N, 1, 1]
spt_mask = (cam < topk)
spt_mask = tf.reshape(spt_mask, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(spt_mask, [N, 1, height, width])
# channel
k = tf.cast(C/8, tf.int32)
topk, _ = tf.math.top_k(label, k=k+1) # [N, k]
topk = tf.gather(topk, indices=k, axis=1) # [N, 1]
topk = tf.expand_dims(topk, 1) # [N, C, 1]
chan_mask = (label < topk)
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1]
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1, 1]
cam_mask = tf.logical_or(spt_mask, chan_mask)
# chan_mask = tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(tf.nn.softmax(cam), [N*C, height*width])
# chan_mask = tf.reshape(cam, [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(cam, [N*C, height*width])
# chan_mask = tf.reshape(tf.nn.sigmoid(cam), [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(tf.nn.sigmoid(cam), [N, 1, height, width])
else:
cam_mask = False
return cam_mask
# def _get_gradcam(net, cost=None, gap_w=None, data_format='channels_first'):
# # Conv layer tensor [?,2048,10,10]
# def _compute_gradients(tensor, var_list):
# grads = tf.gradients(tensor, var_list)
# return [grad if grad is not None else tf.zeros_like(var)
# for var, grad in zip(var_list, grads)]
# # grads = tf.gradients(cost, net)[0]
# if not gap_w is None:
# # Normalizing the gradients
# if data_format == 'channels_last':
# N, height, width, C = net.get_shape().as_list()
# else:
# N, C, height, width = net.get_shape().as_list()
# N = tf.shape(net)[0]
# grads = _compute_gradients(cost, [net])[0]
# norm_grads = tf.divide(grads, tf.sqrt(tf.reduce_mean(tf.square(grads), reduction_indices=[2,3], keepdims=True)) + tf.constant(1e-5))
# weights = tf.reduce_mean(norm_grads, reduction_indices=[2,3]) # [N, C]
# weights = tf.expand_dims(weights, 2) # [N, C, 1]
# net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
# # cam_mean = 1 + tf.matmul(net, weights, transpose_a=True) # [N, width*height, 1]
# cam_mean = tf.maximum(tf.matmul(weights, net, transpose_a=True), 0) # [N, 1, width*height]
# cam_chan = tf.maximum(tf.multiply(net, weights), 0) # [N, C, width*height]
# cam = cam_mean*cam_chan
# # Passing through ReLU
# cam = cam / tf.reduce_max(cam, reduction_indices=[1,2], keepdims=True)
# cam = tf.reshape(cam, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(cam, [N, C, height, width])
# else:
# cam = 0.
# return cam
# def _gumbel_softmax(logits, tau, shape, seed_drop_rate, eps=1e-20):
# if logits == False:
# return logits
# U = tf.random_uniform(tf.shape(logits), minval=0, maxval=1)
# y = logits - tf.log(-tf.log(U + eps) + eps)
# cam_mask = tf.nn.softmax(y / tau)
# topk, _ = tf.math.top_k(cam_mask, k=tf.cast(seed_drop_rate*shape[-1], tf.int32)) # [N, 1]
# topk = tf.gather(topk, indices=tf.cast(seed_drop_rate*shape[-1], tf.int32)-1, axis=1)
# topk = tf.expand_dims(topk, 1) # [N, C, 1]
# cam_mask = (cam_mask < topk)
# # cam_mask = tf.cast(tf.equal(cam_mask, tf.reduce_max(cam_mask, reduction_indices=[1], keepdims=True)), tf.float32)
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1]
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1, 1]
# return cam_mask
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, C = net.get_shape().as_list()
else:
_, C, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
N = tf.shape(net)[0]
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
cam_mask = _get_cam(net, label, flag, dropblock_size, data_format)
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast((1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.logical_or(block_pattern, cam_mask)
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
return net | en | 0.479543 | # -*- coding: utf-8 -*- # File: dropblock.py # from tensorpack.tfutils.compat import tfv1 as tf # this should be avoided first in model code # 1: paper baseline; 2: group dropout; 3: group soft-dropout; 4: Uout group dropout DropBlock: a regularization method for convolutional neural networks. DropBlock is a form of structured dropout, where units in a contiguous region of a feature map are dropped together. DropBlock works better than dropout on convolutional layers due to the fact that activation units in convolutional layers are spatially correlated. See https://arxiv.org/pdf/1810.12890.pdf for details. Args: net: `Tensor` input tensor. is_training: `bool` for whether the model is training. keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None" means no DropBlock. dropblock_size: `int` size of blocks to be dropped by DropBlock. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A version of input tensor with DropBlock applied. Raises: if width and height of the input tensor are not equal. # seed_drop_rate is the gamma parameter of DropBlcok. # Forces the block to be inside the feature map. mimic GN # seed_drop_rate is the gamma parameter of DropBlcok. # seed_drop_rate = (1.0 - keep_prob) * width**2 * G**2 / (C * dropblock_size**2) / (C * (width - dropblock_size + 1)**2) # Forces the block to be inside the feature map. # for depth # for batch # for channel CamDrop net: [N, C, H, W] gap_w : [gap_C, num_of_class] # [gap_C, num] # [C, num] # [N, C] # spatial # [N, C, 1] # [N, 1, width*height] # spt_mask = tf.not_equal(cam, tf.reduce_max(cam, reduction_indices=[2], keepdims=True)) # cam = tf.reshape(cam, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height, width]) # cam = tf.nn.avg_pool(cam, ksize=[1, 1, dropblock_size, dropblock_size], strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW') # left_or_top = (dropblock_size-1) // 2 # right_or_bot = left_or_top if dropblock_size % 2 == 1 else dropblock_size-left_or_top-1 # cam = tf.pad(cam, [[0, 0], [0, 0], [left_or_top, right_or_bot], [left_or_top, right_or_bot]]) # cam = tf.reshape(cam, [N, height*width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height*width]) # [N, 1, k] # [N, 1, 1] # channel # [N, k] # [N, 1] # [N, C, 1] # [N, C, 1] # [N, C, 1, 1] # chan_mask = tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) # chan_mask = tf.reshape(cam, [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(cam, [N*C, height*width]) # chan_mask = tf.reshape(tf.nn.sigmoid(cam), [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(tf.nn.sigmoid(cam), [N, 1, height, width]) # def _get_gradcam(net, cost=None, gap_w=None, data_format='channels_first'): # # Conv layer tensor [?,2048,10,10] # def _compute_gradients(tensor, var_list): # grads = tf.gradients(tensor, var_list) # return [grad if grad is not None else tf.zeros_like(var) # for var, grad in zip(var_list, grads)] # # grads = tf.gradients(cost, net)[0] # if not gap_w is None: # # Normalizing the gradients # if data_format == 'channels_last': # N, height, width, C = net.get_shape().as_list() # else: # N, C, height, width = net.get_shape().as_list() # N = tf.shape(net)[0] # grads = _compute_gradients(cost, [net])[0] # norm_grads = tf.divide(grads, tf.sqrt(tf.reduce_mean(tf.square(grads), reduction_indices=[2,3], keepdims=True)) + tf.constant(1e-5)) # weights = tf.reduce_mean(norm_grads, reduction_indices=[2,3]) # [N, C] # weights = tf.expand_dims(weights, 2) # [N, C, 1] # net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width]) # # cam_mean = 1 + tf.matmul(net, weights, transpose_a=True) # [N, width*height, 1] # cam_mean = tf.maximum(tf.matmul(weights, net, transpose_a=True), 0) # [N, 1, width*height] # cam_chan = tf.maximum(tf.multiply(net, weights), 0) # [N, C, width*height] # cam = cam_mean*cam_chan # # Passing through ReLU # cam = cam / tf.reduce_max(cam, reduction_indices=[1,2], keepdims=True) # cam = tf.reshape(cam, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(cam, [N, C, height, width]) # else: # cam = 0. # return cam # def _gumbel_softmax(logits, tau, shape, seed_drop_rate, eps=1e-20): # if logits == False: # return logits # U = tf.random_uniform(tf.shape(logits), minval=0, maxval=1) # y = logits - tf.log(-tf.log(U + eps) + eps) # cam_mask = tf.nn.softmax(y / tau) # topk, _ = tf.math.top_k(cam_mask, k=tf.cast(seed_drop_rate*shape[-1], tf.int32)) # [N, 1] # topk = tf.gather(topk, indices=tf.cast(seed_drop_rate*shape[-1], tf.int32)-1, axis=1) # topk = tf.expand_dims(topk, 1) # [N, C, 1] # cam_mask = (cam_mask < topk) # # cam_mask = tf.cast(tf.equal(cam_mask, tf.reduce_max(cam_mask, reduction_indices=[1], keepdims=True)), tf.float32) # cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1] # cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1, 1] # return cam_mask # seed_drop_rate is the gamma parameter of DropBlcok. # Forces the block to be inside the feature map. | 2.922466 | 3 |
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py | Purple-PI/rlstructures | 281 | 652 | <gh_stars>100-1000
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from rlstructures import logging
from rlstructures.env_wrappers import GymEnv, GymEnvInf
from rlstructures.tools import weight_init
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
from tutorial.tutorial_recurrent_policy.agent import RecurrentAgent
from tutorial.tutorial_recurrent_policy.a2c import A2C
import gym
from gym.wrappers import TimeLimit
# We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes
def create_gym_env(env_name):
return gym.make(env_name)
def create_env(n_envs, env_name=None, max_episode_steps=None, seed=None):
envs = []
for k in range(n_envs):
e = create_gym_env(env_name)
e = TimeLimit(e, max_episode_steps=max_episode_steps)
envs.append(e)
return GymEnv(envs, seed)
def create_train_env(n_envs, env_name=None, max_episode_steps=None, seed=None):
envs = []
for k in range(n_envs):
e = create_gym_env(env_name)
e = TimeLimit(e, max_episode_steps=max_episode_steps)
envs.append(e)
return GymEnvInf(envs, seed)
def create_agent(model, n_actions=1):
return RecurrentAgent(model=model, n_actions=n_actions)
class Experiment(A2C):
def __init__(self, config, create_env, create_train_env, create_agent):
super().__init__(config, create_env, create_train_env, create_agent)
if __name__ == "__main__":
# We use spawn mode such that most of the environment will run in multiple processes
import torch.multiprocessing as mp
mp.set_start_method("spawn")
config = {
"env_name": "CartPole-v0",
"a2c_timesteps": 3,
"n_envs": 4,
"max_episode_steps": 100,
"env_seed": 42,
"n_threads": 4,
"n_evaluation_threads": 2,
"n_evaluation_episodes": 256,
"time_limit": 3600,
"lr": 0.001,
"discount_factor": 0.95,
"critic_coef": 1.0,
"entropy_coef": 0.01,
"a2c_coef": 1.0,
"logdir": "./results",
}
exp = Experiment(config, create_env, create_train_env, create_agent)
exp.run()
| #
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from rlstructures import logging
from rlstructures.env_wrappers import GymEnv, GymEnvInf
from rlstructures.tools import weight_init
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
from tutorial.tutorial_recurrent_policy.agent import RecurrentAgent
from tutorial.tutorial_recurrent_policy.a2c import A2C
import gym
from gym.wrappers import TimeLimit
# We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes
def create_gym_env(env_name):
return gym.make(env_name)
def create_env(n_envs, env_name=None, max_episode_steps=None, seed=None):
envs = []
for k in range(n_envs):
e = create_gym_env(env_name)
e = TimeLimit(e, max_episode_steps=max_episode_steps)
envs.append(e)
return GymEnv(envs, seed)
def create_train_env(n_envs, env_name=None, max_episode_steps=None, seed=None):
envs = []
for k in range(n_envs):
e = create_gym_env(env_name)
e = TimeLimit(e, max_episode_steps=max_episode_steps)
envs.append(e)
return GymEnvInf(envs, seed)
def create_agent(model, n_actions=1):
return RecurrentAgent(model=model, n_actions=n_actions)
class Experiment(A2C):
def __init__(self, config, create_env, create_train_env, create_agent):
super().__init__(config, create_env, create_train_env, create_agent)
if __name__ == "__main__":
# We use spawn mode such that most of the environment will run in multiple processes
import torch.multiprocessing as mp
mp.set_start_method("spawn")
config = {
"env_name": "CartPole-v0",
"a2c_timesteps": 3,
"n_envs": 4,
"max_episode_steps": 100,
"env_seed": 42,
"n_threads": 4,
"n_evaluation_threads": 2,
"n_evaluation_episodes": 256,
"time_limit": 3600,
"lr": 0.001,
"discount_factor": 0.95,
"critic_coef": 1.0,
"entropy_coef": 0.01,
"a2c_coef": 1.0,
"logdir": "./results",
}
exp = Experiment(config, create_env, create_train_env, create_agent)
exp.run() | en | 0.889574 | # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes # We use spawn mode such that most of the environment will run in multiple processes | 2.340159 | 2 |
dashboard/gnd-app.py | buchmuseum/GND_Dashboard | 5 | 653 | from matplotlib.pyplot import title
import streamlit as st
import pandas as pd
import altair as alt
import pydeck as pdk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
@st.cache
def load_gnd_top_daten(typ):
gnd_top_df = pd.DataFrame()
for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'):
gnd_top_df = gnd_top_df.append(pd.read_csv(file, index_col=None))
return gnd_top_df
def sachbegriff_cloud():
#wordcloud der top 100 sachbegriffe eines auszuwählenden tages der letzten 10 werktage
st.header('TOP 100 Sachbegriffe pro Tag')
st.write('Wählen Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.')
files = glob.glob(f'{path}/../stats/*Ts-count.csv')
daten = [x[-23:-13] for x in files]
daten.sort()
daten_filter = st.select_slider('Wählen Sie ein Datum', options=daten, value=daten[-1])
df = pd.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv')
dict = df.to_dict(orient='records')
worte = {}
for record in dict:
worte.update({record['sachbegriff']:record['count']})
wc = WordCloud(background_color="white", max_words=100, width=2000, height=800, colormap='tab20')
wc.generate_from_frequencies(worte)
return st.image(wc.to_array())
def wirkungsorte():
#ranking und karte der meistverwendeten wirkungsorte aller personen in der gnd
df = pd.read_csv(f'{path}/wirkungsorte-top50.csv')
df.drop(columns=['id'], inplace=True)
df.rename(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True)
st.header('TOP Wirkungsorte von GND-Personen')
st.markdown('Von allen Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.')
#Balkendiagramm
orte_filt = st.slider('Zeige Top …', min_value=3, max_value=len(df), value=10, step=1)
graph_count = alt.Chart(df.nlargest(orte_filt, 'Anzahl', keep='all')).mark_bar().encode(
alt.X('Name:N', sort='y'),
alt.Y('Anzahl'),
alt.Color('Name:N', legend=alt.Legend(columns=2)),
tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')]
)
st.altair_chart(graph_count, use_container_width=True)
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
scatterplotlayer = pdk.Layer(
"ScatterplotLayer",
df,
pickable=True,
opacity=0.5,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
line_width_min_pixels=1,
get_position='[lon, lat]',
get_radius="Anzahl",
get_fill_color=[255, 140, 0],
get_line_color=[0, 0, 0]
)
st.pydeck_chart(pdk.Deck(
scatterplotlayer,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"}))
def wirkungsorte_musik():
#nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte
musiker_orte = pd.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn')
st.header('Wirkungszentren der Musik 1400–2010')
st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.')
limiter = st.slider('Jahresfilter', min_value=1400, max_value=int(musiker_orte['jahrzehnt'].max()), value=(1900), step=10)
musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)]
musik_filt['norm']=(musik_filt['count']-musik_filt['count'].min())/(musik_filt['count'].max()-musik_filt['count'].min())
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
musiker_scatter = pdk.Layer(
"ScatterplotLayer",
musik_filt,
opacity=0.8,
get_position='[lon, lat]',
pickable=True,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
radiusscale=100,
line_width_min_pixels=1,
get_radius="norm*50000",
get_fill_color=[50, 168, 92],
get_line_color=[39, 71, 51]
)
st.pydeck_chart(pdk.Deck(
musiker_scatter,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{name}</b>"}))
st.subheader(f'TOP 10 Wirkungszentren der {limiter}er')
col1, col2 = st.beta_columns(2)
i = 1
for index, row in musik_filt.nlargest(10, 'norm').iterrows():
if i <= 5:
with col1:
st.write(f'{i}. {row["name"]}')
elif i > 5:
with col2:
st.write(f'{i}. {row["name"]}')
i += 1
def gesamt_entity_count():
#Gesamtzahl der GND-Entitäten
with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f:
entities = f'{int(f.read()):,}'
return st.write(f"GND-Entitäten gesamt: {entities.replace(',','.')}")
def relationen():
#Top 10 der GND-Relationierungscodes
rels = pd.read_csv(f'{path}/../stats/gnd_codes_all.csv', index_col=False)
st.subheader('Relationen')
st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pdf).')
rels_filt = st.slider('Zeige Top ...', 5, len(rels), 10, 1)
relation_count = alt.Chart(rels.nlargest(rels_filt, 'count', keep='all')).mark_bar().encode(
alt.X('code', title='Relationierungs-Code', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('code', sort='-y', title='Relationierungscode'),
tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')]
)
st.altair_chart(relation_count, use_container_width=True)
with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f:
relations = f'{int(f.read()):,}'
st.write(f"Relationen zwischen Entitäten gesamt: {relations.replace(',','.')}")
def systematik():
#Ranking der meistverwendeten GND-Systematik-Notationen
classification = pd.read_csv(f'{path}/../stats/gnd_classification_all.csv', index_col=False)
st.subheader('Systematik')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_filt = st.slider('Zeige Top …', 5, len(classification), 10, 1)
classification_count = alt.Chart(classification.nlargest(class_filt, 'count', keep='all')).mark_bar().encode(
alt.X('id', title='Notation', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title="Bezeichnung"),
tooltip=[alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_count, use_container_width=True)
def systematik_ts():
#Ranking der Systematik von Ts-Sätzen
classification_ts = pd.read_csv(f'{path}/../stats/gnd_classification_Ts_all.csv', index_col=False)
st.subheader('Systematik der Sachbegriffe')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgetragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_ts_filt = st.slider('Zeige TOP …', min_value=5, max_value=len(classification_ts), value=10, step=1)
classification_ts_count = alt.Chart(classification_ts.nlargest(class_ts_filt, 'count', keep='all')).mark_bar().encode(
alt.X('id:N', title='Notation', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Bezeichnung'),
tooltip = [alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_ts_count, use_container_width=True)
def zeitverlauf():
#zeitverlauf der erstellung der GND-Sätze ab Januar 1972
created_at = pd.read_csv(f'{path}/../stats/gnd_created_at.csv', index_col='created_at', parse_dates=True, header=0, names=['created_at', 'count'])
st.subheader('Zeitverlauf der GND-Datensatzerstellung')
st.write('Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgetragen. Die ersten Sätze stammen aus dem Januar 1972')
created_filt = st.slider('Zeitraum', 1972, 2021, (1972,2021), 1)
created = alt.Chart(created_at[f'{created_filt[0]}':f'{created_filt[1]}'].reset_index()).mark_line().encode(
alt.X('created_at:T', title='Erstelldatum'),
alt.Y('count:Q', title='Sätze pro Monat'),
tooltip=['count']
)
return st.altair_chart(created, use_container_width=True)
def entities():
#GND-Entitäten nach Satzart und Katalogisierungslevel
df = pd.read_csv(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names=['entity','count'])
df['level'] = df.entity.str[2:3]
df.entity = df.entity.str[:2]
if satzart == 'alle':
entity_count = alt.Chart(df).mark_bar().encode(
alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip('entity', title='Satzart'), alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader('Entitäten und Katalogisierungslevel')
else:
entity_count = alt.Chart(df.loc[df['entity'].str.startswith(satzart[:2])]).mark_bar().encode(
alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader(f'Katalogisierungslevel in Satzart {satzart}')
st.write('Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.')
return st.altair_chart(entity_count, use_container_width=True)
def newcomer():
#TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden
if satzart == 'alle':
st.subheader(f'TOP 10 GND-Newcomer')
st.write('TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = pd.read_csv(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None)
newcomer = alt.Chart(newcomer_daten).mark_bar().encode(
alt.X('gnd_id', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader(f'TOP 10 {satzart} GND-Newcomer')
st.write(f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = load_gnd_top_daten('newcomer_top10')
newcomer = alt.Chart(newcomer_daten.loc[newcomer_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:O', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.altair_chart(newcomer, use_container_width=True)
def gnd_top():
#TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert
if satzart == 'alle':
st.subheader(f'TOP 10 GND-Entitäten in DNB-Titeldaten')
top_daten = pd.read_csv(f'{path}/../stats/title_gnd_top10.csv', index_col=None)
gnd_top = alt.Chart(top_daten).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader(f'TOP 10 {satzart} in DNB-Titeldaten')
top_daten = load_gnd_top_daten('top10')
gnd_top = alt.Chart(top_daten.loc[top_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.write('Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine detaillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.')
st.altair_chart(gnd_top, use_container_width=True)
def dnb_links():
#GND-Verknüpfungen in DNB Titeldaten
if satzart == 'alle':
#Anzahl GND-Verknüpfungen in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links.csv", "r") as f:
links = f'{int(f.read()):,}'
#GND-Entitäten maschinell verknüpft
with open(f"{path}/../stats/title_gnd_links_auto.csv", "r") as f:
auto_entites = int(f.read())
#GND-Entitäten aus Fremddaten
with open(f"{path}/../stats/title_gnd_links_ext.csv", "r") as f:
fremd_entities = int(f.read())
#Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links_unique.csv", "r") as f:
uniques = int(f.read())
uniques_str = f'{uniques:,}'
#Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz
with open(f"{path}/../stats/title_gnd_mean.csv", "r") as f:
mean = str(round(float(f.read()),2)).replace('.',',')
st.write(f"{links.replace(',','.')} intellektuell vergebene Verknüpfungen zu {uniques_str.replace(',','.')} GND-Entitäten in den DNB-Titeldaten. Durchschnittlich {mean} GND-Verknüpfungen pro DNB-Titeldatensatz")
entity_df = pd.DataFrame.from_dict({"intellektuell verknüpfte Entitäten": uniques, "Entitäten aus automatischen Prozessen": auto_entites, "Entitäten aus Fremddaten": fremd_entities}, orient = "index").reset_index()
entity_df = entity_df.rename(columns={"index":"Datenart", 0:"Anzahl"})
st.subheader('Datenherkunft der GND-Entitäten in DNB-Titeldaten')
st.write('Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellen Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellen Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.')
entities = alt.Chart(entity_df).mark_bar().encode(
alt.X('sum(Datenart):N', title='Datenart'),
alt.Y('sum(Anzahl):Q', title='Anzahl'),
color='Datenart',
tooltip='Anzahl:N'
)
st.altair_chart(entities, use_container_width=True)
else:
with open(f"{path}/../stats/title_gnd_mean_{satzart[:2]}.csv", "r") as f:
mean = str(round(float(f.read()),2)).replace('.',',')
st.write(f'Durchschnittlich {mean} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz')
#main
st.title('GND-Dashboard')
#infoebereich oben
with st.beta_container():
st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.')
with st.beta_expander("Methodik und Datenherkunft"):
st.markdown('''
Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.
Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.
Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).
Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.
Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden.
Die Daten werden monatlich aktualisiert.
''')
#sidebar mit satzartenfilter
st.sidebar.header("Satzart wählen")
satzart = st.sidebar.selectbox(
"Über welche GND-Satzart möchten Sie etwas erfahren?",
('alle', "Tp - Personen", "Tb - Körperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen")
)
st.sidebar.info('Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.')
gnd_allgemein = st.beta_container()
with gnd_allgemein:
st.header('GND Statistik allgemein')
#allgemeine statistiken in abhängigkeit der satzart
if satzart == 'alle':
gesamt_entity_count()
entities()
newcomer()
zeitverlauf()
relationen()
systematik()
else:
entities()
newcomer()
#besondere widgets für einzelne satzarten
if satzart == "Tp - Personen":
wirkungsorte()
elif satzart == "Tg - Geografika":
wirkungsorte_musik()
wirkungsorte()
elif satzart == "Ts - Sachbegriffe":
sachbegriff_cloud()
systematik_ts()
dnb = st.beta_container()
with dnb:
st.header('GND in der Deutschen Nationalbibliothek')
gnd_top()
dnb_links()
streamlit_analytics.stop_tracking() | from matplotlib.pyplot import title
import streamlit as st
import pandas as pd
import altair as alt
import pydeck as pdk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
@st.cache
def load_gnd_top_daten(typ):
gnd_top_df = pd.DataFrame()
for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'):
gnd_top_df = gnd_top_df.append(pd.read_csv(file, index_col=None))
return gnd_top_df
def sachbegriff_cloud():
#wordcloud der top 100 sachbegriffe eines auszuwählenden tages der letzten 10 werktage
st.header('TOP 100 Sachbegriffe pro Tag')
st.write('Wählen Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.')
files = glob.glob(f'{path}/../stats/*Ts-count.csv')
daten = [x[-23:-13] for x in files]
daten.sort()
daten_filter = st.select_slider('Wählen Sie ein Datum', options=daten, value=daten[-1])
df = pd.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv')
dict = df.to_dict(orient='records')
worte = {}
for record in dict:
worte.update({record['sachbegriff']:record['count']})
wc = WordCloud(background_color="white", max_words=100, width=2000, height=800, colormap='tab20')
wc.generate_from_frequencies(worte)
return st.image(wc.to_array())
def wirkungsorte():
#ranking und karte der meistverwendeten wirkungsorte aller personen in der gnd
df = pd.read_csv(f'{path}/wirkungsorte-top50.csv')
df.drop(columns=['id'], inplace=True)
df.rename(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True)
st.header('TOP Wirkungsorte von GND-Personen')
st.markdown('Von allen Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.')
#Balkendiagramm
orte_filt = st.slider('Zeige Top …', min_value=3, max_value=len(df), value=10, step=1)
graph_count = alt.Chart(df.nlargest(orte_filt, 'Anzahl', keep='all')).mark_bar().encode(
alt.X('Name:N', sort='y'),
alt.Y('Anzahl'),
alt.Color('Name:N', legend=alt.Legend(columns=2)),
tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')]
)
st.altair_chart(graph_count, use_container_width=True)
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
scatterplotlayer = pdk.Layer(
"ScatterplotLayer",
df,
pickable=True,
opacity=0.5,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
line_width_min_pixels=1,
get_position='[lon, lat]',
get_radius="Anzahl",
get_fill_color=[255, 140, 0],
get_line_color=[0, 0, 0]
)
st.pydeck_chart(pdk.Deck(
scatterplotlayer,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"}))
def wirkungsorte_musik():
#nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte
musiker_orte = pd.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn')
st.header('Wirkungszentren der Musik 1400–2010')
st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.')
limiter = st.slider('Jahresfilter', min_value=1400, max_value=int(musiker_orte['jahrzehnt'].max()), value=(1900), step=10)
musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)]
musik_filt['norm']=(musik_filt['count']-musik_filt['count'].min())/(musik_filt['count'].max()-musik_filt['count'].min())
#Karte
INITIAL_VIEW_STATE = pdk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
max_zoom=16,
bearing=0
)
musiker_scatter = pdk.Layer(
"ScatterplotLayer",
musik_filt,
opacity=0.8,
get_position='[lon, lat]',
pickable=True,
stroked=True,
filled=True,
radius_min_pixels=1,
radius_max_pixels=100,
radiusscale=100,
line_width_min_pixels=1,
get_radius="norm*50000",
get_fill_color=[50, 168, 92],
get_line_color=[39, 71, 51]
)
st.pydeck_chart(pdk.Deck(
musiker_scatter,
initial_view_state=INITIAL_VIEW_STATE,
map_style=pdk.map_styles.LIGHT,
tooltip={"html": "<b>{name}</b>"}))
st.subheader(f'TOP 10 Wirkungszentren der {limiter}er')
col1, col2 = st.beta_columns(2)
i = 1
for index, row in musik_filt.nlargest(10, 'norm').iterrows():
if i <= 5:
with col1:
st.write(f'{i}. {row["name"]}')
elif i > 5:
with col2:
st.write(f'{i}. {row["name"]}')
i += 1
def gesamt_entity_count():
#Gesamtzahl der GND-Entitäten
with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f:
entities = f'{int(f.read()):,}'
return st.write(f"GND-Entitäten gesamt: {entities.replace(',','.')}")
def relationen():
#Top 10 der GND-Relationierungscodes
rels = pd.read_csv(f'{path}/../stats/gnd_codes_all.csv', index_col=False)
st.subheader('Relationen')
st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pdf).')
rels_filt = st.slider('Zeige Top ...', 5, len(rels), 10, 1)
relation_count = alt.Chart(rels.nlargest(rels_filt, 'count', keep='all')).mark_bar().encode(
alt.X('code', title='Relationierungs-Code', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('code', sort='-y', title='Relationierungscode'),
tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')]
)
st.altair_chart(relation_count, use_container_width=True)
with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f:
relations = f'{int(f.read()):,}'
st.write(f"Relationen zwischen Entitäten gesamt: {relations.replace(',','.')}")
def systematik():
#Ranking der meistverwendeten GND-Systematik-Notationen
classification = pd.read_csv(f'{path}/../stats/gnd_classification_all.csv', index_col=False)
st.subheader('Systematik')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_filt = st.slider('Zeige Top …', 5, len(classification), 10, 1)
classification_count = alt.Chart(classification.nlargest(class_filt, 'count', keep='all')).mark_bar().encode(
alt.X('id', title='Notation', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title="Bezeichnung"),
tooltip=[alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_count, use_container_width=True)
def systematik_ts():
#Ranking der Systematik von Ts-Sätzen
classification_ts = pd.read_csv(f'{path}/../stats/gnd_classification_Ts_all.csv', index_col=False)
st.subheader('Systematik der Sachbegriffe')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgetragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_ts_filt = st.slider('Zeige TOP …', min_value=5, max_value=len(classification_ts), value=10, step=1)
classification_ts_count = alt.Chart(classification_ts.nlargest(class_ts_filt, 'count', keep='all')).mark_bar().encode(
alt.X('id:N', title='Notation', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Bezeichnung'),
tooltip = [alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_ts_count, use_container_width=True)
def zeitverlauf():
#zeitverlauf der erstellung der GND-Sätze ab Januar 1972
created_at = pd.read_csv(f'{path}/../stats/gnd_created_at.csv', index_col='created_at', parse_dates=True, header=0, names=['created_at', 'count'])
st.subheader('Zeitverlauf der GND-Datensatzerstellung')
st.write('Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgetragen. Die ersten Sätze stammen aus dem Januar 1972')
created_filt = st.slider('Zeitraum', 1972, 2021, (1972,2021), 1)
created = alt.Chart(created_at[f'{created_filt[0]}':f'{created_filt[1]}'].reset_index()).mark_line().encode(
alt.X('created_at:T', title='Erstelldatum'),
alt.Y('count:Q', title='Sätze pro Monat'),
tooltip=['count']
)
return st.altair_chart(created, use_container_width=True)
def entities():
#GND-Entitäten nach Satzart und Katalogisierungslevel
df = pd.read_csv(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names=['entity','count'])
df['level'] = df.entity.str[2:3]
df.entity = df.entity.str[:2]
if satzart == 'alle':
entity_count = alt.Chart(df).mark_bar().encode(
alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip('entity', title='Satzart'), alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader('Entitäten und Katalogisierungslevel')
else:
entity_count = alt.Chart(df.loc[df['entity'].str.startswith(satzart[:2])]).mark_bar().encode(
alt.X('sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader(f'Katalogisierungslevel in Satzart {satzart}')
st.write('Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.')
return st.altair_chart(entity_count, use_container_width=True)
def newcomer():
#TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden
if satzart == 'alle':
st.subheader(f'TOP 10 GND-Newcomer')
st.write('TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = pd.read_csv(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None)
newcomer = alt.Chart(newcomer_daten).mark_bar().encode(
alt.X('gnd_id', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader(f'TOP 10 {satzart} GND-Newcomer')
st.write(f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = load_gnd_top_daten('newcomer_top10')
newcomer = alt.Chart(newcomer_daten.loc[newcomer_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:O', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.altair_chart(newcomer, use_container_width=True)
def gnd_top():
#TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert
if satzart == 'alle':
st.subheader(f'TOP 10 GND-Entitäten in DNB-Titeldaten')
top_daten = pd.read_csv(f'{path}/../stats/title_gnd_top10.csv', index_col=None)
gnd_top = alt.Chart(top_daten).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader(f'TOP 10 {satzart} in DNB-Titeldaten')
top_daten = load_gnd_top_daten('top10')
gnd_top = alt.Chart(top_daten.loc[top_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.write('Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine detaillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.')
st.altair_chart(gnd_top, use_container_width=True)
def dnb_links():
#GND-Verknüpfungen in DNB Titeldaten
if satzart == 'alle':
#Anzahl GND-Verknüpfungen in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links.csv", "r") as f:
links = f'{int(f.read()):,}'
#GND-Entitäten maschinell verknüpft
with open(f"{path}/../stats/title_gnd_links_auto.csv", "r") as f:
auto_entites = int(f.read())
#GND-Entitäten aus Fremddaten
with open(f"{path}/../stats/title_gnd_links_ext.csv", "r") as f:
fremd_entities = int(f.read())
#Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links_unique.csv", "r") as f:
uniques = int(f.read())
uniques_str = f'{uniques:,}'
#Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz
with open(f"{path}/../stats/title_gnd_mean.csv", "r") as f:
mean = str(round(float(f.read()),2)).replace('.',',')
st.write(f"{links.replace(',','.')} intellektuell vergebene Verknüpfungen zu {uniques_str.replace(',','.')} GND-Entitäten in den DNB-Titeldaten. Durchschnittlich {mean} GND-Verknüpfungen pro DNB-Titeldatensatz")
entity_df = pd.DataFrame.from_dict({"intellektuell verknüpfte Entitäten": uniques, "Entitäten aus automatischen Prozessen": auto_entites, "Entitäten aus Fremddaten": fremd_entities}, orient = "index").reset_index()
entity_df = entity_df.rename(columns={"index":"Datenart", 0:"Anzahl"})
st.subheader('Datenherkunft der GND-Entitäten in DNB-Titeldaten')
st.write('Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellen Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellen Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.')
entities = alt.Chart(entity_df).mark_bar().encode(
alt.X('sum(Datenart):N', title='Datenart'),
alt.Y('sum(Anzahl):Q', title='Anzahl'),
color='Datenart',
tooltip='Anzahl:N'
)
st.altair_chart(entities, use_container_width=True)
else:
with open(f"{path}/../stats/title_gnd_mean_{satzart[:2]}.csv", "r") as f:
mean = str(round(float(f.read()),2)).replace('.',',')
st.write(f'Durchschnittlich {mean} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz')
#main
st.title('GND-Dashboard')
#infoebereich oben
with st.beta_container():
st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.')
with st.beta_expander("Methodik und Datenherkunft"):
st.markdown('''
Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.
Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.
Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).
Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.
Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden.
Die Daten werden monatlich aktualisiert.
''')
#sidebar mit satzartenfilter
st.sidebar.header("Satzart wählen")
satzart = st.sidebar.selectbox(
"Über welche GND-Satzart möchten Sie etwas erfahren?",
('alle', "Tp - Personen", "Tb - Körperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen")
)
st.sidebar.info('Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.')
gnd_allgemein = st.beta_container()
with gnd_allgemein:
st.header('GND Statistik allgemein')
#allgemeine statistiken in abhängigkeit der satzart
if satzart == 'alle':
gesamt_entity_count()
entities()
newcomer()
zeitverlauf()
relationen()
systematik()
else:
entities()
newcomer()
#besondere widgets für einzelne satzarten
if satzart == "Tp - Personen":
wirkungsorte()
elif satzart == "Tg - Geografika":
wirkungsorte_musik()
wirkungsorte()
elif satzart == "Ts - Sachbegriffe":
sachbegriff_cloud()
systematik_ts()
dnb = st.beta_container()
with dnb:
st.header('GND in der Deutschen Nationalbibliothek')
gnd_top()
dnb_links()
streamlit_analytics.stop_tracking() | de | 0.980882 | #wordcloud der top 100 sachbegriffe eines auszuwählenden tages der letzten 10 werktage #ranking und karte der meistverwendeten wirkungsorte aller personen in der gnd #Balkendiagramm #Karte #nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte #Karte #Gesamtzahl der GND-Entitäten #Top 10 der GND-Relationierungscodes #Ranking der meistverwendeten GND-Systematik-Notationen #Ranking der Systematik von Ts-Sätzen #zeitverlauf der erstellung der GND-Sätze ab Januar 1972 #GND-Entitäten nach Satzart und Katalogisierungslevel #TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden #TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert #GND-Verknüpfungen in DNB Titeldaten #Anzahl GND-Verknüpfungen in DNB-Titeldaten #GND-Entitäten maschinell verknüpft #GND-Entitäten aus Fremddaten #Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten #Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz #main #infoebereich oben Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen. Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden. Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html). Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert. Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden. Die Daten werden monatlich aktualisiert. #sidebar mit satzartenfilter #allgemeine statistiken in abhängigkeit der satzart #besondere widgets für einzelne satzarten | 2.93648 | 3 |
seamo/support/seamo_exceptions.py | amandalynne/Seattle-Mobility-Index | 3 | 654 | <filename>seamo/support/seamo_exceptions.py
"""
Class for all excpetions used in following scripts
- geocoder.py
- geocoder_input.py
"""
class OverlappingGeographyError(Exception):
def __init__(self, message):
self.message = message
# msg: geodataframe has overlapping polygons representing geographic features
# please how shapefiles are processed.
class NoOverlapSpatialJoinError(Exception):
def __init__(self, message):
self.message = message
# msg: geodataframe has overlapping polygons representing geographic features
# please how shapefiles are processed.
class NoParkingAvailableError(NoOverlapSpatialJoinError):
def __init__(self, message):
self.message = message
class NoUrbanVillageError(Exception):
def __init__(self, message):
self.message = message
class NotInSeattleError(Exception):
def __init__(self, message):
self.message = message | <filename>seamo/support/seamo_exceptions.py
"""
Class for all excpetions used in following scripts
- geocoder.py
- geocoder_input.py
"""
class OverlappingGeographyError(Exception):
def __init__(self, message):
self.message = message
# msg: geodataframe has overlapping polygons representing geographic features
# please how shapefiles are processed.
class NoOverlapSpatialJoinError(Exception):
def __init__(self, message):
self.message = message
# msg: geodataframe has overlapping polygons representing geographic features
# please how shapefiles are processed.
class NoParkingAvailableError(NoOverlapSpatialJoinError):
def __init__(self, message):
self.message = message
class NoUrbanVillageError(Exception):
def __init__(self, message):
self.message = message
class NotInSeattleError(Exception):
def __init__(self, message):
self.message = message | en | 0.905318 | Class for all excpetions used in following scripts - geocoder.py - geocoder_input.py # msg: geodataframe has overlapping polygons representing geographic features # please how shapefiles are processed. # msg: geodataframe has overlapping polygons representing geographic features # please how shapefiles are processed. | 2.538499 | 3 |
vize/150401052/sunucu.py | hasan-se/blm304 | 1 | 655 | #<NAME> 150401052
import os
import sys
import time
from socket import *
from os import system, name
ip = '127.0.0.1'
port = 42
s_soket = socket(AF_INET, SOCK_DGRAM)
s_soket.bind((ip, port))
print("\nSunucu Hazir\n")
kontrol, istemciAdres = s_soket.recvfrom(4096)
s_soket.sendto(bytes("Sunucu hazir", encoding='utf-8'), istemciAdres)
i, istemciAdres = s_soket.recvfrom(4096)
if(i.decode("utf-8") == "listeleme yap"):
dosyalar = "\n".join(os.listdir())
s_soket.sendto(bytes(dosyalar, encoding='utf-8'), istemciAdres)
sys.exit()
elif(i.decode("utf-8") == "put yap"):
cevap = s_soket.recvfrom(4096)
if(cevap[0].decode("utf-8") == "mevcut"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
dosyaIcerigi = s_soket.recvfrom(4096)
if(os.path.exists(dosyaIsmi.decode("utf-8")) == True):
s_soket.sendto(bytes("aynisi mevcut", encoding='utf-8'), istemciAdres)
karar = s_soket.recvfrom(4096)
if(karar[0].decode("utf-8") == "1"):
yeniAd = dosyaIsmi.decode("utf-8")[:-4] + " (kopya)" + ".txt"
dosyaYeni = open(yeniAd, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
print("\nPUT islemi basariyla gerceklesti..")
else:
dosyaYeni = open(dosyaIsmi, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
s_soket.sendto(bytes("tamam", encoding='utf-8'), istemciAdres)
print("\nPUT islemi basariyla gerceklesti..")
else:
print("\nGirilen adda bir dosya istemcide bulunamadi..")
elif(i.decode("utf-8") == "get yap"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
if (os.path.exists(dosyaIsmi.decode("utf-8")) == True):
dosya = open(dosyaIsmi.decode("utf-8"), "rb")
s_soket.sendto(bytes("dosya mevcut", encoding='utf-8'), istemciAdres)
dosyaIcerik = dosya.read()
dosya.close()
s_soket.sendto(dosyaIcerik, istemciAdres)
kontrol = s_soket.recvfrom(4096)
print("\nGET islemi basariyla gerceklesti..")
sys.exit()
else:
print("\n! Bu isimde bir dosya sunucuda mevcut değil")
sys.exit()
elif(i.decode("utf-8") == "bitir"):
s_soket.close()
print("\nSunucu kapandi")
sys.exit() | #<NAME> 150401052
import os
import sys
import time
from socket import *
from os import system, name
ip = '127.0.0.1'
port = 42
s_soket = socket(AF_INET, SOCK_DGRAM)
s_soket.bind((ip, port))
print("\nSunucu Hazir\n")
kontrol, istemciAdres = s_soket.recvfrom(4096)
s_soket.sendto(bytes("Sunucu hazir", encoding='utf-8'), istemciAdres)
i, istemciAdres = s_soket.recvfrom(4096)
if(i.decode("utf-8") == "listeleme yap"):
dosyalar = "\n".join(os.listdir())
s_soket.sendto(bytes(dosyalar, encoding='utf-8'), istemciAdres)
sys.exit()
elif(i.decode("utf-8") == "put yap"):
cevap = s_soket.recvfrom(4096)
if(cevap[0].decode("utf-8") == "mevcut"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
dosyaIcerigi = s_soket.recvfrom(4096)
if(os.path.exists(dosyaIsmi.decode("utf-8")) == True):
s_soket.sendto(bytes("aynisi mevcut", encoding='utf-8'), istemciAdres)
karar = s_soket.recvfrom(4096)
if(karar[0].decode("utf-8") == "1"):
yeniAd = dosyaIsmi.decode("utf-8")[:-4] + " (kopya)" + ".txt"
dosyaYeni = open(yeniAd, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
print("\nPUT islemi basariyla gerceklesti..")
else:
dosyaYeni = open(dosyaIsmi, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
s_soket.sendto(bytes("tamam", encoding='utf-8'), istemciAdres)
print("\nPUT islemi basariyla gerceklesti..")
else:
print("\nGirilen adda bir dosya istemcide bulunamadi..")
elif(i.decode("utf-8") == "get yap"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
if (os.path.exists(dosyaIsmi.decode("utf-8")) == True):
dosya = open(dosyaIsmi.decode("utf-8"), "rb")
s_soket.sendto(bytes("dosya mevcut", encoding='utf-8'), istemciAdres)
dosyaIcerik = dosya.read()
dosya.close()
s_soket.sendto(dosyaIcerik, istemciAdres)
kontrol = s_soket.recvfrom(4096)
print("\nGET islemi basariyla gerceklesti..")
sys.exit()
else:
print("\n! Bu isimde bir dosya sunucuda mevcut değil")
sys.exit()
elif(i.decode("utf-8") == "bitir"):
s_soket.close()
print("\nSunucu kapandi")
sys.exit() | de | 0.365313 | #<NAME> 150401052 | 2.472808 | 2 |
httprunner/compat.py | panyuan209/httprunner | 0 | 656 | <filename>httprunner/compat.py<gh_stars>0
"""
This module handles compatibility issues between testcase format v2 and v3.
解决httprunner2 和 3 之间测试用例兼容性问题
"""
import os
import sys
from typing import List, Dict, Text, Union, Any
from loguru import logger
from httprunner import exceptions
from httprunner.loader import load_project_meta, convert_relative_project_root_dir
from httprunner.parser import parse_data
from httprunner.utils import sort_dict_by_custom_order
def convert_variables(
raw_variables: Union[Dict, List, Text], test_path: Text
) -> Dict[Text, Any]:
if isinstance(raw_variables, Dict):
return raw_variables
if isinstance(raw_variables, List):
# [{"var1": 1}, {"var2": 2}]
variables: Dict[Text, Any] = {}
for var_item in raw_variables:
if not isinstance(var_item, Dict) or len(var_item) != 1:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
variables.update(var_item)
return variables
elif isinstance(raw_variables, Text):
# get variables by function, e.g. ${get_variables()}
project_meta = load_project_meta(test_path)
variables = parse_data(raw_variables, {}, project_meta.functions)
return variables
else:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
def _convert_jmespath(raw: Text) -> Text:
if not isinstance(raw, Text):
raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}")
# content.xx/json.xx => body.xx
if raw.startswith("content"):
raw = f"body{raw[len('content'):]}"
elif raw.startswith("json"):
raw = f"body{raw[len('json'):]}"
raw_list = []
for item in raw.split("."):
if "-" in item:
# add quotes for field with separator
# e.g. headers.Content-Type => headers."Content-Type"
item = item.strip('"')
raw_list.append(f'"{item}"')
elif item.isdigit():
# convert lst.0.name to lst[0].name
if len(raw_list) == 0:
logger.error(f"Invalid jmespath: {raw}")
sys.exit(1)
last_item = raw_list.pop()
item = f"{last_item}[{item}]"
raw_list.append(item)
else:
raw_list.append(item)
return ".".join(raw_list)
def _convert_extractors(extractors: Union[List, Dict]) -> Dict:
""" convert extract list(v2) to dict(v3)
Args:
extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}]
Returns:
{"varA": "body.varA", "varB": "body.varB"}
"""
v3_extractors: Dict = {}
if isinstance(extractors, List):
# [{"varA": "content.varA"}, {"varB": "json.varB"}]
for extractor in extractors:
if not isinstance(extractor, Dict):
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in extractor.items():
v3_extractors[k] = v
elif isinstance(extractors, Dict):
# {"varA": "body.varA", "varB": "body.varB"}
v3_extractors = extractors
else:
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in v3_extractors.items():
v3_extractors[k] = _convert_jmespath(v)
return v3_extractors
def _convert_validators(validators: List) -> List:
for v in validators:
if "check" in v and "expect" in v:
# format1: {"check": "content.abc", "assert": "eq", "expect": 201}
v["check"] = _convert_jmespath(v["check"])
elif len(v) == 1:
# format2: {'eq': ['status_code', 201]}
comparator = list(v.keys())[0]
v[comparator][0] = _convert_jmespath(v[comparator][0])
return validators
def _sort_request_by_custom_order(request: Dict) -> Dict:
custom_order = [
"method",
"url",
"params",
"headers",
"cookies",
"data",
"json",
"files",
"timeout",
"allow_redirects",
"proxies",
"verify",
"stream",
"auth",
"cert",
]
return sort_dict_by_custom_order(request, custom_order)
def _sort_step_by_custom_order(step: Dict) -> Dict:
custom_order = [
"name",
"variables",
"request",
"testcase",
"setup_hooks",
"teardown_hooks",
"extract",
"validate",
"validate_script",
]
return sort_dict_by_custom_order(step, custom_order)
def _ensure_step_attachment(step: Dict) -> Dict:
test_dict = {
"name": step["name"],
}
if "variables" in step:
test_dict["variables"] = step["variables"]
if "setup_hooks" in step:
test_dict["setup_hooks"] = step["setup_hooks"]
if "teardown_hooks" in step:
test_dict["teardown_hooks"] = step["teardown_hooks"]
if "extract" in step:
test_dict["extract"] = _convert_extractors(step["extract"])
if "export" in step:
test_dict["export"] = step["export"]
if "validate" in step:
if not isinstance(step["validate"], List):
raise exceptions.TestCaseFormatError(
f'Invalid teststep validate: {step["validate"]}'
)
test_dict["validate"] = _convert_validators(step["validate"])
if "validate_script" in step:
test_dict["validate_script"] = step["validate_script"]
return test_dict
def ensure_testcase_v3_api(api_content: Dict) -> Dict:
logger.info("convert api in v2 to testcase format v3")
teststep = {
"request": _sort_request_by_custom_order(api_content["request"]),
}
teststep.update(_ensure_step_attachment(api_content))
teststep = _sort_step_by_custom_order(teststep)
config = {"name": api_content["name"]}
extract_variable_names: List = list(teststep.get("extract", {}).keys())
if extract_variable_names:
config["export"] = extract_variable_names
return {
"config": config,
"teststeps": [teststep],
}
def ensure_testcase_v3(test_content: Dict) -> Dict:
logger.info("ensure compatibility with testcase format v2")
v3_content = {"config": test_content["config"], "teststeps": []}
if "teststeps" not in test_content:
logger.error(f"Miss teststeps: {test_content}")
sys.exit(1)
if not isinstance(test_content["teststeps"], list):
logger.error(
f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}'
)
sys.exit(1)
for step in test_content["teststeps"]:
teststep = {}
if "request" in step:
teststep["request"] = _sort_request_by_custom_order(step.pop("request"))
elif "api" in step:
teststep["testcase"] = step.pop("api")
elif "testcase" in step:
teststep["testcase"] = step.pop("testcase")
else:
raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}")
teststep.update(_ensure_step_attachment(step))
teststep = _sort_step_by_custom_order(teststep)
v3_content["teststeps"].append(teststep)
return v3_content
def ensure_cli_args(args: List) -> List:
""" ensure compatibility with deprecated cli args in v2
"""
# remove deprecated --failfast
if "--failfast" in args:
logger.warning(f"remove deprecated argument: --failfast")
args.pop(args.index("--failfast"))
# convert --report-file to --html
if "--report-file" in args:
logger.warning(f"replace deprecated argument --report-file with --html")
index = args.index("--report-file")
args[index] = "--html"
args.append("--self-contained-html")
# keep compatibility with --save-tests in v2
if "--save-tests" in args:
logger.warning(
f"generate conftest.py keep compatibility with --save-tests in v2"
)
args.pop(args.index("--save-tests"))
_generate_conftest_for_summary(args)
return args
def _generate_conftest_for_summary(args: List):
for arg in args:
if os.path.exists(arg):
test_path = arg
# FIXME: several test paths maybe specified
break
else:
logger.error(f"No valid test path specified! \nargs: {args}")
sys.exit(1)
conftest_content = '''# NOTICE: Generated By HttpRunner.
import json
import os
import time
import pytest
from loguru import logger
from httprunner.utils import get_platform, ExtendJSONEncoder
@pytest.fixture(scope="session", autouse=True)
def session_fixture(request):
"""setup and teardown each task"""
logger.info(f"start running testcases ...")
start_at = time.time()
yield
logger.info(f"task finished, generate task summary for --save-tests")
summary = {
"success": True,
"stat": {
"testcases": {"total": 0, "success": 0, "fail": 0},
"teststeps": {"total": 0, "failures": 0, "successes": 0},
},
"time": {"start_at": start_at, "duration": time.time() - start_at},
"platform": get_platform(),
"details": [],
}
for item in request.node.items:
testcase_summary = item.instance.get_summary()
summary["success"] &= testcase_summary.success
summary["stat"]["testcases"]["total"] += 1
summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas)
if testcase_summary.success:
summary["stat"]["testcases"]["success"] += 1
summary["stat"]["teststeps"]["successes"] += len(
testcase_summary.step_datas
)
else:
summary["stat"]["testcases"]["fail"] += 1
summary["stat"]["teststeps"]["successes"] += (
len(testcase_summary.step_datas) - 1
)
summary["stat"]["teststeps"]["failures"] += 1
testcase_summary_json = testcase_summary.dict()
testcase_summary_json["records"] = testcase_summary_json.pop("step_datas")
summary["details"].append(testcase_summary_json)
summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}"
summary_dir = os.path.dirname(summary_path)
os.makedirs(summary_dir, exist_ok=True)
with open(summary_path, "w", encoding="utf-8") as f:
json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder)
logger.info(f"generated task summary: {summary_path}")
'''
project_meta = load_project_meta(test_path)
project_root_dir = project_meta.RootDir
conftest_path = os.path.join(project_root_dir, "conftest.py")
test_path = os.path.abspath(test_path)
logs_dir_path = os.path.join(project_root_dir, "logs")
test_path_relative_path = convert_relative_project_root_dir(test_path)
if os.path.isdir(test_path):
file_foder_path = os.path.join(logs_dir_path, test_path_relative_path)
dump_file_name = "all.summary.json"
else:
file_relative_folder_path, test_file = os.path.split(test_path_relative_path)
file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path)
test_file_name, _ = os.path.splitext(test_file)
dump_file_name = f"{test_file_name}.summary.json"
summary_path = os.path.join(file_foder_path, dump_file_name)
conftest_content = conftest_content.replace(
"{{SUMMARY_PATH_PLACEHOLDER}}", summary_path
)
dir_path = os.path.dirname(conftest_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(conftest_path, "w", encoding="utf-8") as f:
f.write(conftest_content)
logger.info("generated conftest.py to generate summary.json")
def ensure_path_sep(path: Text) -> Text:
""" ensure compatibility with different path separators of Linux and Windows
"""
if "/" in path:
path = os.sep.join(path.split("/"))
if "\\" in path:
path = os.sep.join(path.split("\\"))
return path
| <filename>httprunner/compat.py<gh_stars>0
"""
This module handles compatibility issues between testcase format v2 and v3.
解决httprunner2 和 3 之间测试用例兼容性问题
"""
import os
import sys
from typing import List, Dict, Text, Union, Any
from loguru import logger
from httprunner import exceptions
from httprunner.loader import load_project_meta, convert_relative_project_root_dir
from httprunner.parser import parse_data
from httprunner.utils import sort_dict_by_custom_order
def convert_variables(
raw_variables: Union[Dict, List, Text], test_path: Text
) -> Dict[Text, Any]:
if isinstance(raw_variables, Dict):
return raw_variables
if isinstance(raw_variables, List):
# [{"var1": 1}, {"var2": 2}]
variables: Dict[Text, Any] = {}
for var_item in raw_variables:
if not isinstance(var_item, Dict) or len(var_item) != 1:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
variables.update(var_item)
return variables
elif isinstance(raw_variables, Text):
# get variables by function, e.g. ${get_variables()}
project_meta = load_project_meta(test_path)
variables = parse_data(raw_variables, {}, project_meta.functions)
return variables
else:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
def _convert_jmespath(raw: Text) -> Text:
if not isinstance(raw, Text):
raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}")
# content.xx/json.xx => body.xx
if raw.startswith("content"):
raw = f"body{raw[len('content'):]}"
elif raw.startswith("json"):
raw = f"body{raw[len('json'):]}"
raw_list = []
for item in raw.split("."):
if "-" in item:
# add quotes for field with separator
# e.g. headers.Content-Type => headers."Content-Type"
item = item.strip('"')
raw_list.append(f'"{item}"')
elif item.isdigit():
# convert lst.0.name to lst[0].name
if len(raw_list) == 0:
logger.error(f"Invalid jmespath: {raw}")
sys.exit(1)
last_item = raw_list.pop()
item = f"{last_item}[{item}]"
raw_list.append(item)
else:
raw_list.append(item)
return ".".join(raw_list)
def _convert_extractors(extractors: Union[List, Dict]) -> Dict:
""" convert extract list(v2) to dict(v3)
Args:
extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}]
Returns:
{"varA": "body.varA", "varB": "body.varB"}
"""
v3_extractors: Dict = {}
if isinstance(extractors, List):
# [{"varA": "content.varA"}, {"varB": "json.varB"}]
for extractor in extractors:
if not isinstance(extractor, Dict):
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in extractor.items():
v3_extractors[k] = v
elif isinstance(extractors, Dict):
# {"varA": "body.varA", "varB": "body.varB"}
v3_extractors = extractors
else:
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in v3_extractors.items():
v3_extractors[k] = _convert_jmespath(v)
return v3_extractors
def _convert_validators(validators: List) -> List:
for v in validators:
if "check" in v and "expect" in v:
# format1: {"check": "content.abc", "assert": "eq", "expect": 201}
v["check"] = _convert_jmespath(v["check"])
elif len(v) == 1:
# format2: {'eq': ['status_code', 201]}
comparator = list(v.keys())[0]
v[comparator][0] = _convert_jmespath(v[comparator][0])
return validators
def _sort_request_by_custom_order(request: Dict) -> Dict:
custom_order = [
"method",
"url",
"params",
"headers",
"cookies",
"data",
"json",
"files",
"timeout",
"allow_redirects",
"proxies",
"verify",
"stream",
"auth",
"cert",
]
return sort_dict_by_custom_order(request, custom_order)
def _sort_step_by_custom_order(step: Dict) -> Dict:
custom_order = [
"name",
"variables",
"request",
"testcase",
"setup_hooks",
"teardown_hooks",
"extract",
"validate",
"validate_script",
]
return sort_dict_by_custom_order(step, custom_order)
def _ensure_step_attachment(step: Dict) -> Dict:
test_dict = {
"name": step["name"],
}
if "variables" in step:
test_dict["variables"] = step["variables"]
if "setup_hooks" in step:
test_dict["setup_hooks"] = step["setup_hooks"]
if "teardown_hooks" in step:
test_dict["teardown_hooks"] = step["teardown_hooks"]
if "extract" in step:
test_dict["extract"] = _convert_extractors(step["extract"])
if "export" in step:
test_dict["export"] = step["export"]
if "validate" in step:
if not isinstance(step["validate"], List):
raise exceptions.TestCaseFormatError(
f'Invalid teststep validate: {step["validate"]}'
)
test_dict["validate"] = _convert_validators(step["validate"])
if "validate_script" in step:
test_dict["validate_script"] = step["validate_script"]
return test_dict
def ensure_testcase_v3_api(api_content: Dict) -> Dict:
logger.info("convert api in v2 to testcase format v3")
teststep = {
"request": _sort_request_by_custom_order(api_content["request"]),
}
teststep.update(_ensure_step_attachment(api_content))
teststep = _sort_step_by_custom_order(teststep)
config = {"name": api_content["name"]}
extract_variable_names: List = list(teststep.get("extract", {}).keys())
if extract_variable_names:
config["export"] = extract_variable_names
return {
"config": config,
"teststeps": [teststep],
}
def ensure_testcase_v3(test_content: Dict) -> Dict:
logger.info("ensure compatibility with testcase format v2")
v3_content = {"config": test_content["config"], "teststeps": []}
if "teststeps" not in test_content:
logger.error(f"Miss teststeps: {test_content}")
sys.exit(1)
if not isinstance(test_content["teststeps"], list):
logger.error(
f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}'
)
sys.exit(1)
for step in test_content["teststeps"]:
teststep = {}
if "request" in step:
teststep["request"] = _sort_request_by_custom_order(step.pop("request"))
elif "api" in step:
teststep["testcase"] = step.pop("api")
elif "testcase" in step:
teststep["testcase"] = step.pop("testcase")
else:
raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}")
teststep.update(_ensure_step_attachment(step))
teststep = _sort_step_by_custom_order(teststep)
v3_content["teststeps"].append(teststep)
return v3_content
def ensure_cli_args(args: List) -> List:
""" ensure compatibility with deprecated cli args in v2
"""
# remove deprecated --failfast
if "--failfast" in args:
logger.warning(f"remove deprecated argument: --failfast")
args.pop(args.index("--failfast"))
# convert --report-file to --html
if "--report-file" in args:
logger.warning(f"replace deprecated argument --report-file with --html")
index = args.index("--report-file")
args[index] = "--html"
args.append("--self-contained-html")
# keep compatibility with --save-tests in v2
if "--save-tests" in args:
logger.warning(
f"generate conftest.py keep compatibility with --save-tests in v2"
)
args.pop(args.index("--save-tests"))
_generate_conftest_for_summary(args)
return args
def _generate_conftest_for_summary(args: List):
for arg in args:
if os.path.exists(arg):
test_path = arg
# FIXME: several test paths maybe specified
break
else:
logger.error(f"No valid test path specified! \nargs: {args}")
sys.exit(1)
conftest_content = '''# NOTICE: Generated By HttpRunner.
import json
import os
import time
import pytest
from loguru import logger
from httprunner.utils import get_platform, ExtendJSONEncoder
@pytest.fixture(scope="session", autouse=True)
def session_fixture(request):
"""setup and teardown each task"""
logger.info(f"start running testcases ...")
start_at = time.time()
yield
logger.info(f"task finished, generate task summary for --save-tests")
summary = {
"success": True,
"stat": {
"testcases": {"total": 0, "success": 0, "fail": 0},
"teststeps": {"total": 0, "failures": 0, "successes": 0},
},
"time": {"start_at": start_at, "duration": time.time() - start_at},
"platform": get_platform(),
"details": [],
}
for item in request.node.items:
testcase_summary = item.instance.get_summary()
summary["success"] &= testcase_summary.success
summary["stat"]["testcases"]["total"] += 1
summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas)
if testcase_summary.success:
summary["stat"]["testcases"]["success"] += 1
summary["stat"]["teststeps"]["successes"] += len(
testcase_summary.step_datas
)
else:
summary["stat"]["testcases"]["fail"] += 1
summary["stat"]["teststeps"]["successes"] += (
len(testcase_summary.step_datas) - 1
)
summary["stat"]["teststeps"]["failures"] += 1
testcase_summary_json = testcase_summary.dict()
testcase_summary_json["records"] = testcase_summary_json.pop("step_datas")
summary["details"].append(testcase_summary_json)
summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}"
summary_dir = os.path.dirname(summary_path)
os.makedirs(summary_dir, exist_ok=True)
with open(summary_path, "w", encoding="utf-8") as f:
json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder)
logger.info(f"generated task summary: {summary_path}")
'''
project_meta = load_project_meta(test_path)
project_root_dir = project_meta.RootDir
conftest_path = os.path.join(project_root_dir, "conftest.py")
test_path = os.path.abspath(test_path)
logs_dir_path = os.path.join(project_root_dir, "logs")
test_path_relative_path = convert_relative_project_root_dir(test_path)
if os.path.isdir(test_path):
file_foder_path = os.path.join(logs_dir_path, test_path_relative_path)
dump_file_name = "all.summary.json"
else:
file_relative_folder_path, test_file = os.path.split(test_path_relative_path)
file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path)
test_file_name, _ = os.path.splitext(test_file)
dump_file_name = f"{test_file_name}.summary.json"
summary_path = os.path.join(file_foder_path, dump_file_name)
conftest_content = conftest_content.replace(
"{{SUMMARY_PATH_PLACEHOLDER}}", summary_path
)
dir_path = os.path.dirname(conftest_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(conftest_path, "w", encoding="utf-8") as f:
f.write(conftest_content)
logger.info("generated conftest.py to generate summary.json")
def ensure_path_sep(path: Text) -> Text:
""" ensure compatibility with different path separators of Linux and Windows
"""
if "/" in path:
path = os.sep.join(path.split("/"))
if "\\" in path:
path = os.sep.join(path.split("\\"))
return path
| en | 0.317848 | This module handles compatibility issues between testcase format v2 and v3. 解决httprunner2 和 3 之间测试用例兼容性问题 # [{"var1": 1}, {"var2": 2}] # get variables by function, e.g. ${get_variables()} # content.xx/json.xx => body.xx # add quotes for field with separator # e.g. headers.Content-Type => headers."Content-Type" # convert lst.0.name to lst[0].name convert extract list(v2) to dict(v3) Args: extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}] Returns: {"varA": "body.varA", "varB": "body.varB"} # [{"varA": "content.varA"}, {"varB": "json.varB"}] # {"varA": "body.varA", "varB": "body.varB"} # format1: {"check": "content.abc", "assert": "eq", "expect": 201} # format2: {'eq': ['status_code', 201]} ensure compatibility with deprecated cli args in v2 # remove deprecated --failfast # convert --report-file to --html # keep compatibility with --save-tests in v2 # FIXME: several test paths maybe specified # NOTICE: Generated By HttpRunner. import json import os import time import pytest from loguru import logger from httprunner.utils import get_platform, ExtendJSONEncoder @pytest.fixture(scope="session", autouse=True) def session_fixture(request): """setup and teardown each task""" logger.info(f"start running testcases ...") start_at = time.time() yield logger.info(f"task finished, generate task summary for --save-tests") summary = { "success": True, "stat": { "testcases": {"total": 0, "success": 0, "fail": 0}, "teststeps": {"total": 0, "failures": 0, "successes": 0}, }, "time": {"start_at": start_at, "duration": time.time() - start_at}, "platform": get_platform(), "details": [], } for item in request.node.items: testcase_summary = item.instance.get_summary() summary["success"] &= testcase_summary.success summary["stat"]["testcases"]["total"] += 1 summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas) if testcase_summary.success: summary["stat"]["testcases"]["success"] += 1 summary["stat"]["teststeps"]["successes"] += len( testcase_summary.step_datas ) else: summary["stat"]["testcases"]["fail"] += 1 summary["stat"]["teststeps"]["successes"] += ( len(testcase_summary.step_datas) - 1 ) summary["stat"]["teststeps"]["failures"] += 1 testcase_summary_json = testcase_summary.dict() testcase_summary_json["records"] = testcase_summary_json.pop("step_datas") summary["details"].append(testcase_summary_json) summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}" summary_dir = os.path.dirname(summary_path) os.makedirs(summary_dir, exist_ok=True) with open(summary_path, "w", encoding="utf-8") as f: json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder) logger.info(f"generated task summary: {summary_path}") ensure compatibility with different path separators of Linux and Windows | 2.296743 | 2 |
examples/demo/basic/scatter.py | ContinuumIO/chaco | 3 | 657 | """
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data obect and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
| """
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data obect and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
| en | 0.68297 | Scatter plot with panning and zooming Shows a scatter plot of a set of random points, with basic Chaco panning and zooming. Interacting with the plot: - Left-mouse-drag pans the plot. - Mouse wheel up and down zooms the plot in and out. - Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and alt-right-arrow moves you forwards and backwards through the "zoom history". # Major library imports # Enthought library imports # Chaco imports #=============================================================================== # # Create the Chaco plot. #=============================================================================== # Create some data # Create a plot data obect and give it this data # Create the plot # Tweak some of the plot properties # Attach some tools to the plot #=============================================================================== # Attributes to use for the plot view. #=============================================================================== # # Demo class that is used by the demo.py application. #=============================================================================== #--EOF--- | 4.206877 | 4 |
webstr/core/config.py | fbalak/webstr | 3 | 658 | """
Central configuration module of webstr selenium tests.
This module provides configuration options along with default values and
function to redefine values.
"""
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
SELENIUM_LOG_LEVEL = logging.INFO
SCHEME = 'https'
PORT = 443
BROWSER = 'Firefox'
BROWSER_VERSION = ''
BROWSER_PLATFORM = 'ANY'
SELENIUM_SERVER = None
SELENIUM_PORT = 4444
BROWSER_WIDTH = 1280
BROWSER_HEIGHT = 1024
def update_value(key_name, value, force=False):
"""
Update single value of this config module.
"""
this_module = sys.modules[__name__]
key_name = key_name.upper()
# raise AttributeError if we try to define new value (unless force is used)
if not force:
getattr(this_module, key_name)
setattr(this_module, key_name, value)
| """
Central configuration module of webstr selenium tests.
This module provides configuration options along with default values and
function to redefine values.
"""
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
SELENIUM_LOG_LEVEL = logging.INFO
SCHEME = 'https'
PORT = 443
BROWSER = 'Firefox'
BROWSER_VERSION = ''
BROWSER_PLATFORM = 'ANY'
SELENIUM_SERVER = None
SELENIUM_PORT = 4444
BROWSER_WIDTH = 1280
BROWSER_HEIGHT = 1024
def update_value(key_name, value, force=False):
"""
Update single value of this config module.
"""
this_module = sys.modules[__name__]
key_name = key_name.upper()
# raise AttributeError if we try to define new value (unless force is used)
if not force:
getattr(this_module, key_name)
setattr(this_module, key_name, value)
| en | 0.760758 | Central configuration module of webstr selenium tests. This module provides configuration options along with default values and function to redefine values. # Copyright 2016 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Update single value of this config module. # raise AttributeError if we try to define new value (unless force is used) | 2.504798 | 3 |
operations/mutations/mutation.py | PiotrBosowski/feat-genes | 0 | 659 | <filename>operations/mutations/mutation.py
import random
class Mutation:
def __init__(self, chrom_mut_chance, gen_mut_chance):
self.chrom_mut_chance = chrom_mut_chance
self.gen_mut_chance = gen_mut_chance
def __call__(self, population):
chroms_to_mutate = random.sample(
population,
round(self.chrom_mut_chance * len(population)))
for chrom in chroms_to_mutate:
genes_to_mutate = random.sample(
range(len(chrom)),
round(self.gen_mut_chance * len(chrom)))
for gt in genes_to_mutate:
chrom[gt] = int(not bool(chrom[gt]))
return population
| <filename>operations/mutations/mutation.py
import random
class Mutation:
def __init__(self, chrom_mut_chance, gen_mut_chance):
self.chrom_mut_chance = chrom_mut_chance
self.gen_mut_chance = gen_mut_chance
def __call__(self, population):
chroms_to_mutate = random.sample(
population,
round(self.chrom_mut_chance * len(population)))
for chrom in chroms_to_mutate:
genes_to_mutate = random.sample(
range(len(chrom)),
round(self.gen_mut_chance * len(chrom)))
for gt in genes_to_mutate:
chrom[gt] = int(not bool(chrom[gt]))
return population
| none | 1 | 3.120772 | 3 |
|
examples/CountLettersInList.py | Ellis0817/Introduction-to-Programming-Using-Python | 0 | 660 | <filename>examples/CountLettersInList.py
import RandomCharacter # Defined in Listing 6.9
def main():
"""Main."""
# Create a list of characters
chars = createList()
# Display the list
print("The lowercase letters are:")
displayList(chars)
# Count the occurrences of each letter
counts = countLetters(chars)
# Display counts
print("The occurrences of each letter are:")
displayCounts(counts)
def createList():
"""Create a list of characters."""
# Create an empty list
chars = []
# Create lowercase letters randomly and add them to the list
for i in range(100):
chars.append(RandomCharacter.getRandomLowerCaseLetter())
# Return the list
return chars
def displayList(chars):
"""Display the list of characters."""
# Display the characters in the list 20 on each line
for i in range(len(chars)):
if (i + 1) % 20 == 0:
print(chars[i])
else:
print(chars[i], end=' ')
def countLetters(chars):
"""Count the occurrences of each letter."""
# Create a list of 26 integers with initial value 0
counts = 26 * [0]
# For each lowercase letter in the list, count it
for i in range(len(chars)):
counts[ord(chars[i]) - ord('a')] += 1
return counts
def displayCounts(counts):
"""Display counts."""
for i in range(len(counts)):
if (i + 1) % 10 == 0:
print(counts[i], chr(i + ord('a')))
else:
print(counts[i], chr(i + ord('a')), end=' ')
print()
main() # Call the main function
| <filename>examples/CountLettersInList.py
import RandomCharacter # Defined in Listing 6.9
def main():
"""Main."""
# Create a list of characters
chars = createList()
# Display the list
print("The lowercase letters are:")
displayList(chars)
# Count the occurrences of each letter
counts = countLetters(chars)
# Display counts
print("The occurrences of each letter are:")
displayCounts(counts)
def createList():
"""Create a list of characters."""
# Create an empty list
chars = []
# Create lowercase letters randomly and add them to the list
for i in range(100):
chars.append(RandomCharacter.getRandomLowerCaseLetter())
# Return the list
return chars
def displayList(chars):
"""Display the list of characters."""
# Display the characters in the list 20 on each line
for i in range(len(chars)):
if (i + 1) % 20 == 0:
print(chars[i])
else:
print(chars[i], end=' ')
def countLetters(chars):
"""Count the occurrences of each letter."""
# Create a list of 26 integers with initial value 0
counts = 26 * [0]
# For each lowercase letter in the list, count it
for i in range(len(chars)):
counts[ord(chars[i]) - ord('a')] += 1
return counts
def displayCounts(counts):
"""Display counts."""
for i in range(len(counts)):
if (i + 1) % 10 == 0:
print(counts[i], chr(i + ord('a')))
else:
print(counts[i], chr(i + ord('a')), end=' ')
print()
main() # Call the main function
| en | 0.777494 | # Defined in Listing 6.9 Main. # Create a list of characters # Display the list # Count the occurrences of each letter # Display counts Create a list of characters. # Create an empty list # Create lowercase letters randomly and add them to the list # Return the list Display the list of characters. # Display the characters in the list 20 on each line Count the occurrences of each letter. # Create a list of 26 integers with initial value 0 # For each lowercase letter in the list, count it Display counts. # Call the main function | 4.286845 | 4 |
ddtrace/contrib/vertica/__init__.py | lightstep/dd-trace-py | 5 | 661 | """
The Vertica integration will trace queries made using the vertica-python
library.
Vertica will be automatically instrumented with ``patch_all``, or when using
the ``ls-trace-run`` command.
Vertica is instrumented on import. To instrument Vertica manually use the
``patch`` function. Note the ordering of the following statements::
from ddtrace import patch
patch(vertica=True)
import vertica_python
# use vertica_python like usual
To configure the Vertica integration globally you can use the ``Config`` API::
from ddtrace import config, patch
patch(vertica=True)
config.vertica['service_name'] = 'my-vertica-database'
To configure the Vertica integration on an instance-per-instance basis use the
``Pin`` API::
from ddtrace import Pin, patch, Tracer
patch(vertica=True)
import vertica_python
custom_tracer = Tracer()
conn = vertica_python.connect(**YOUR_VERTICA_CONFIG)
# override the service and tracer to be used
Pin.override(conn, service='myverticaservice', tracer=custom_tracer)
"""
from ...utils.importlib import require_modules
required_modules = ['vertica_python']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch, unpatch
__all__ = [patch, unpatch]
| """
The Vertica integration will trace queries made using the vertica-python
library.
Vertica will be automatically instrumented with ``patch_all``, or when using
the ``ls-trace-run`` command.
Vertica is instrumented on import. To instrument Vertica manually use the
``patch`` function. Note the ordering of the following statements::
from ddtrace import patch
patch(vertica=True)
import vertica_python
# use vertica_python like usual
To configure the Vertica integration globally you can use the ``Config`` API::
from ddtrace import config, patch
patch(vertica=True)
config.vertica['service_name'] = 'my-vertica-database'
To configure the Vertica integration on an instance-per-instance basis use the
``Pin`` API::
from ddtrace import Pin, patch, Tracer
patch(vertica=True)
import vertica_python
custom_tracer = Tracer()
conn = vertica_python.connect(**YOUR_VERTICA_CONFIG)
# override the service and tracer to be used
Pin.override(conn, service='myverticaservice', tracer=custom_tracer)
"""
from ...utils.importlib import require_modules
required_modules = ['vertica_python']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch, unpatch
__all__ = [patch, unpatch]
| en | 0.595028 | The Vertica integration will trace queries made using the vertica-python library. Vertica will be automatically instrumented with ``patch_all``, or when using the ``ls-trace-run`` command. Vertica is instrumented on import. To instrument Vertica manually use the ``patch`` function. Note the ordering of the following statements:: from ddtrace import patch patch(vertica=True) import vertica_python # use vertica_python like usual To configure the Vertica integration globally you can use the ``Config`` API:: from ddtrace import config, patch patch(vertica=True) config.vertica['service_name'] = 'my-vertica-database' To configure the Vertica integration on an instance-per-instance basis use the ``Pin`` API:: from ddtrace import Pin, patch, Tracer patch(vertica=True) import vertica_python custom_tracer = Tracer() conn = vertica_python.connect(**YOUR_VERTICA_CONFIG) # override the service and tracer to be used Pin.override(conn, service='myverticaservice', tracer=custom_tracer) | 2.246038 | 2 |
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py | kokosing/hue | 5,079 | 662 | <filename>desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
from openid.consumer.discover import OpenIDServiceEndpoint
import datadriven
class BadLinksTestCase(datadriven.DataDrivenTestCase):
cases = [
'',
"http://not.in.a.link.tag/",
'<link rel="openid.server" href="not.in.html.or.head" />',
]
def __init__(self, data):
datadriven.DataDrivenTestCase.__init__(self, data)
self.data = data
def runOneTest(self):
actual = OpenIDServiceEndpoint.fromHTML('http://unused.url/', self.data)
expected = []
self.failUnlessEqual(expected, actual)
def pyUnitTests():
return datadriven.loadTests(__name__)
| <filename>desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
from openid.consumer.discover import OpenIDServiceEndpoint
import datadriven
class BadLinksTestCase(datadriven.DataDrivenTestCase):
cases = [
'',
"http://not.in.a.link.tag/",
'<link rel="openid.server" href="not.in.html.or.head" />',
]
def __init__(self, data):
datadriven.DataDrivenTestCase.__init__(self, data)
self.data = data
def runOneTest(self):
actual = OpenIDServiceEndpoint.fromHTML('http://unused.url/', self.data)
expected = []
self.failUnlessEqual(expected, actual)
def pyUnitTests():
return datadriven.loadTests(__name__)
| none | 1 | 2.431434 | 2 |
|
project/settings/production.py | chiehtu/kissaten | 0 | 663 | <reponame>chiehtu/kissaten<gh_stars>0
from .base import *
SECRET_KEY = get_env_var('SECRET_KEY')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_env_var('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env_var('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = ''
USERENA_USE_HTTPS = True
| from .base import *
SECRET_KEY = get_env_var('SECRET_KEY')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_env_var('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env_var('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = ''
USERENA_USE_HTTPS = True | none | 1 | 1.546202 | 2 |
|
modelflow/graph_viz_from_outputs.py | ModelFlow/modelflow | 6 | 664 | import pandas as pd
import argparse
import json
try:
from graphviz import Digraph
except:
print("Note: Optional graphviz not installed")
def generate_graph(df, graph_format='pdf'):
g = Digraph('ModelFlow', filename='modelflow.gv', engine='neato', format=graph_format)
g.attr(overlap='false')
g.attr(splines='true')
column_names = df.columns
states = []
g.attr('node', shape='ellipse')
for column_name in column_names:
if column_name[:6] == 'state_':
states.append((column_name[6:], column_name))
g.node(column_name[6:])
models = []
g.attr('node', shape='box')
for column_name in column_names:
if column_name[:6] != 'state_':
models.append((column_name.split('_')[0], column_name))
g.node(column_name.split('_')[0])
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
state = '_'.join(parts[1:])[6:-7]
print(parts[0], state, df[column_name].min(),
df[column_name].max())
if df[column_name].min() < 0 and df[column_name].max() <= 0:
g.edge(state, parts[0])
elif df[column_name].min() >= 0 and df[column_name].max() > 0:
g.edge(parts[0], state)
else:
g.edge(parts[0], state)
g.edge(state, parts[0])
if graph_format == 'json':
# TODO: THIS DOES NOT WORK FOR MULTIPLE MODELFLOWS
with open('modelflow.gv.json', 'r') as f:
return json.load(f)
else:
g.view()
def generate_react_flow_chart(outputs):
df = pd.DataFrame()
for key, value in outputs['output_states'].items():
df[key] = value['data']
return generate_react_flow_chart_from_df(df)
def generate_react_flow_chart_from_df(df):
column_names = df.columns
nodes = {}
# Elipses
for column_name in column_names:
if column_name[:6] == 'state_':
nodes[column_name[6:]] = dict(name=column_name[6:], kind='elipse')
# Boxes
for column_name in column_names:
if column_name[:6] != 'state_':
nodes[column_name.split('_')[0]] = dict(name=column_name.split('_')[0], kind='box')
edges = []
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
name1 = parts[0]
state = '_'.join(parts[1:])[6:-7]
# print(name1, state, df[column_name].min(),
# df[column_name].max())
if df[column_name].min() < 0 and df[column_name].max() <= 0:
edges.append([state, name1, 'one_way'])
elif df[column_name].min() >= 0 and df[column_name].max() > 0:
edges.append([name1, state, 'one_way'])
else:
edges.append([name1, state, 'both'])
return dict(nodes=list(nodes.values()), edges=edges)
def main(args):
df = pd.read_csv(args.output_file)
# generate_graph(df)
generate_react_flow_chart_from_df(df)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Graph Viz')
parser.add_argument('-f', '--output_file', type=str,
help='The output file to generate a graph of', required=True)
args = parser.parse_args()
main(args)
| import pandas as pd
import argparse
import json
try:
from graphviz import Digraph
except:
print("Note: Optional graphviz not installed")
def generate_graph(df, graph_format='pdf'):
g = Digraph('ModelFlow', filename='modelflow.gv', engine='neato', format=graph_format)
g.attr(overlap='false')
g.attr(splines='true')
column_names = df.columns
states = []
g.attr('node', shape='ellipse')
for column_name in column_names:
if column_name[:6] == 'state_':
states.append((column_name[6:], column_name))
g.node(column_name[6:])
models = []
g.attr('node', shape='box')
for column_name in column_names:
if column_name[:6] != 'state_':
models.append((column_name.split('_')[0], column_name))
g.node(column_name.split('_')[0])
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
state = '_'.join(parts[1:])[6:-7]
print(parts[0], state, df[column_name].min(),
df[column_name].max())
if df[column_name].min() < 0 and df[column_name].max() <= 0:
g.edge(state, parts[0])
elif df[column_name].min() >= 0 and df[column_name].max() > 0:
g.edge(parts[0], state)
else:
g.edge(parts[0], state)
g.edge(state, parts[0])
if graph_format == 'json':
# TODO: THIS DOES NOT WORK FOR MULTIPLE MODELFLOWS
with open('modelflow.gv.json', 'r') as f:
return json.load(f)
else:
g.view()
def generate_react_flow_chart(outputs):
df = pd.DataFrame()
for key, value in outputs['output_states'].items():
df[key] = value['data']
return generate_react_flow_chart_from_df(df)
def generate_react_flow_chart_from_df(df):
column_names = df.columns
nodes = {}
# Elipses
for column_name in column_names:
if column_name[:6] == 'state_':
nodes[column_name[6:]] = dict(name=column_name[6:], kind='elipse')
# Boxes
for column_name in column_names:
if column_name[:6] != 'state_':
nodes[column_name.split('_')[0]] = dict(name=column_name.split('_')[0], kind='box')
edges = []
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
name1 = parts[0]
state = '_'.join(parts[1:])[6:-7]
# print(name1, state, df[column_name].min(),
# df[column_name].max())
if df[column_name].min() < 0 and df[column_name].max() <= 0:
edges.append([state, name1, 'one_way'])
elif df[column_name].min() >= 0 and df[column_name].max() > 0:
edges.append([name1, state, 'one_way'])
else:
edges.append([name1, state, 'both'])
return dict(nodes=list(nodes.values()), edges=edges)
def main(args):
df = pd.read_csv(args.output_file)
# generate_graph(df)
generate_react_flow_chart_from_df(df)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Graph Viz')
parser.add_argument('-f', '--output_file', type=str,
help='The output file to generate a graph of', required=True)
args = parser.parse_args()
main(args)
| en | 0.244667 | # TODO: THIS DOES NOT WORK FOR MULTIPLE MODELFLOWS # Elipses # Boxes # print(name1, state, df[column_name].min(), # df[column_name].max()) # generate_graph(df) | 2.789418 | 3 |
src/command/voice_log/chart.py | link1345/Vol-GameClanTools-DiscordBot | 0 | 665 | import discord
import os
import json
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from pprint import pprint
import base.ColorPrint as CPrint
import command.voice_log.Config_Main as CSetting
def most_old_Month() :
old_month = 1
labels = []
fileNameList = []
while True :
filetime = datetime.datetime.today() - relativedelta(months=old_month)
m_month = datetime.datetime.strftime(filetime,'%m')
m_year = datetime.datetime.strftime(filetime,'%Y')
filename = CSetting.baseLogFolder + CSetting.JSONPATH_row + m_year + m_month + ".json"
if not os.path.exists( filename ) :
old_month -= 1 # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。
break
labels.append( m_year + "/" + m_month )
fileNameList.append( filename )
old_month += 1
return old_month , labels , fileNameList
async def makeOldTimeList( client: discord.Client, MonthFileList:list[str] , IndexLabel:list[str], RoleList: list[int] = CSetting.OneMonthOutput_RoleID ):
all_df = None
for fileName in MonthFileList :
df = await makeTimeList( client, Datafile_path=fileName , RoleList=RoleList)
#print( "test1" )
pprint( df )
if df is None :
break
labelname = IndexLabel[MonthFileList.index(fileName)]
df = df.rename(columns={'time': labelname })
if MonthFileList.index(fileName) == 0 :
all_df = df
else :
df = df.drop(columns=['name'])
all_df = pd.merge(all_df, df , left_index=True, right_index=True)
#all_df = pd.merge(all_df, df , left_index=True)
#df.loc[:,[labelname]]
#pprint(all_df)
return all_df
async def UserRoleMember( client: discord.Client, RoleList: list[int] ) :
"""
[VC] 指定ロールに参加しているメンバーを抽出する
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
return:
list[discord.Member]: 指定ロールに参加しているメンバー
"""
data = []
for guild_item in client.guilds :
# ギルドデータ更新
await guild_item.chunk()
# ロール制限がなければ、全員分を取ってくる
if len(RoleList) == 0 :
data += guild_item.members
continue
# ロール制限がなければ、該当ロール部を取ってくる
for role_item in guild_item.roles :
if role_item.id in RoleList :
data += role_item.members
return data
async def makeTimeList( client: discord.Client, Datafile_path: str , RoleList: list[int]):
"""
[VC] 生のログデータを計算して、表にして返す。
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
mode (string): ユーザーを示すものは、何か?(UserName or ID)
return:
pd.DataFrame: 計算済みデータ
"""
# ユーザーリスト取得
members = await UserRoleMember(client, RoleList)
# IDだけ抽出
def getID(members: list[discord.Member]):
IDlist = []
Namelist = []
for member in members :
IDlist.append( member.id )
Namelist.append( member.name + "#" + member.discriminator )
return IDlist , Namelist
members_IDlist , members_Namelist = getID(members=members)
if members_IDlist is None or members_IDlist == [] :
return None
# JSON取得
orig_TimeData : dict
try :
with open( Datafile_path ) as f:
orig_TimeData = json.load(f)
except :
CPrint.error_print("JSONではありません")
import traceback
traceback.print_exc()
return None
if orig_TimeData is None :
return None
#df = pd.DataFrame({
# 'start': [None, None],
# 'end': [None, None],
# 'time': [13, 23]},
# index=['ONE', 'TWO']
#)
df_dict = {
'name': members_Namelist,
'start': [None] * len(members),
'exit': [None] * len(members),
'time': [0.0] * len(members),
}
# 計算
for item in orig_TimeData :
try :
indexNum = members_IDlist.index(item["member.id"])
except ValueError as error :
# 現在の鯖に、存在しない人は処理しない。
continue
if item["Flag"] == "entry" :
df_dict["start"][indexNum] = item["time"]
if item["Flag"] == "exit" :
# スタートがないのに、エンドがある場合
if df_dict["start"][indexNum] is None :
# とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..)
tmp_startTime = datetime.now().strftime("%Y/%m/01 00:00:00")
df_dict["start"][indexNum] = tmp_startTime
# --
df_dict["exit"][indexNum] = item["time"]
# 差分計算
a_time = datetime.datetime.strptime( df_dict["start"][indexNum] , '%Y/%m/%d %H:%M:%S')
b_time = datetime.datetime.strptime( df_dict["exit"][indexNum] , '%Y/%m/%d %H:%M:%S')
time : float = (b_time - a_time).total_seconds()
#print( "time : " + str(time) )
if time < 0.0 :
df_dict["time"][indexNum] += 0.0
else :
df_dict["time"][indexNum] += time
# DataFrameに変更
df = pd.DataFrame(df_dict,
index=members_IDlist
)
# 作業用の"start"と"end"を削除
df = df.drop(columns=['start','exit'])
# 計算
df["time"] = df["time"] / 60 / 60
#pprint(df)
return df
| import discord
import os
import json
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from pprint import pprint
import base.ColorPrint as CPrint
import command.voice_log.Config_Main as CSetting
def most_old_Month() :
old_month = 1
labels = []
fileNameList = []
while True :
filetime = datetime.datetime.today() - relativedelta(months=old_month)
m_month = datetime.datetime.strftime(filetime,'%m')
m_year = datetime.datetime.strftime(filetime,'%Y')
filename = CSetting.baseLogFolder + CSetting.JSONPATH_row + m_year + m_month + ".json"
if not os.path.exists( filename ) :
old_month -= 1 # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。
break
labels.append( m_year + "/" + m_month )
fileNameList.append( filename )
old_month += 1
return old_month , labels , fileNameList
async def makeOldTimeList( client: discord.Client, MonthFileList:list[str] , IndexLabel:list[str], RoleList: list[int] = CSetting.OneMonthOutput_RoleID ):
all_df = None
for fileName in MonthFileList :
df = await makeTimeList( client, Datafile_path=fileName , RoleList=RoleList)
#print( "test1" )
pprint( df )
if df is None :
break
labelname = IndexLabel[MonthFileList.index(fileName)]
df = df.rename(columns={'time': labelname })
if MonthFileList.index(fileName) == 0 :
all_df = df
else :
df = df.drop(columns=['name'])
all_df = pd.merge(all_df, df , left_index=True, right_index=True)
#all_df = pd.merge(all_df, df , left_index=True)
#df.loc[:,[labelname]]
#pprint(all_df)
return all_df
async def UserRoleMember( client: discord.Client, RoleList: list[int] ) :
"""
[VC] 指定ロールに参加しているメンバーを抽出する
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
return:
list[discord.Member]: 指定ロールに参加しているメンバー
"""
data = []
for guild_item in client.guilds :
# ギルドデータ更新
await guild_item.chunk()
# ロール制限がなければ、全員分を取ってくる
if len(RoleList) == 0 :
data += guild_item.members
continue
# ロール制限がなければ、該当ロール部を取ってくる
for role_item in guild_item.roles :
if role_item.id in RoleList :
data += role_item.members
return data
async def makeTimeList( client: discord.Client, Datafile_path: str , RoleList: list[int]):
"""
[VC] 生のログデータを計算して、表にして返す。
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
mode (string): ユーザーを示すものは、何か?(UserName or ID)
return:
pd.DataFrame: 計算済みデータ
"""
# ユーザーリスト取得
members = await UserRoleMember(client, RoleList)
# IDだけ抽出
def getID(members: list[discord.Member]):
IDlist = []
Namelist = []
for member in members :
IDlist.append( member.id )
Namelist.append( member.name + "#" + member.discriminator )
return IDlist , Namelist
members_IDlist , members_Namelist = getID(members=members)
if members_IDlist is None or members_IDlist == [] :
return None
# JSON取得
orig_TimeData : dict
try :
with open( Datafile_path ) as f:
orig_TimeData = json.load(f)
except :
CPrint.error_print("JSONではありません")
import traceback
traceback.print_exc()
return None
if orig_TimeData is None :
return None
#df = pd.DataFrame({
# 'start': [None, None],
# 'end': [None, None],
# 'time': [13, 23]},
# index=['ONE', 'TWO']
#)
df_dict = {
'name': members_Namelist,
'start': [None] * len(members),
'exit': [None] * len(members),
'time': [0.0] * len(members),
}
# 計算
for item in orig_TimeData :
try :
indexNum = members_IDlist.index(item["member.id"])
except ValueError as error :
# 現在の鯖に、存在しない人は処理しない。
continue
if item["Flag"] == "entry" :
df_dict["start"][indexNum] = item["time"]
if item["Flag"] == "exit" :
# スタートがないのに、エンドがある場合
if df_dict["start"][indexNum] is None :
# とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..)
tmp_startTime = datetime.now().strftime("%Y/%m/01 00:00:00")
df_dict["start"][indexNum] = tmp_startTime
# --
df_dict["exit"][indexNum] = item["time"]
# 差分計算
a_time = datetime.datetime.strptime( df_dict["start"][indexNum] , '%Y/%m/%d %H:%M:%S')
b_time = datetime.datetime.strptime( df_dict["exit"][indexNum] , '%Y/%m/%d %H:%M:%S')
time : float = (b_time - a_time).total_seconds()
#print( "time : " + str(time) )
if time < 0.0 :
df_dict["time"][indexNum] += 0.0
else :
df_dict["time"][indexNum] += time
# DataFrameに変更
df = pd.DataFrame(df_dict,
index=members_IDlist
)
# 作業用の"start"と"end"を削除
df = df.drop(columns=['start','exit'])
# 計算
df["time"] = df["time"] / 60 / 60
#pprint(df)
return df
| ja | 0.987151 | # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。 #print( "test1" ) #all_df = pd.merge(all_df, df , left_index=True) #df.loc[:,[labelname]] #pprint(all_df) [VC] 指定ロールに参加しているメンバーを抽出する Args: client (discord.Client): クライアント RoleList (list[int]): 役職ID return: list[discord.Member]: 指定ロールに参加しているメンバー # ギルドデータ更新 # ロール制限がなければ、全員分を取ってくる # ロール制限がなければ、該当ロール部を取ってくる [VC] 生のログデータを計算して、表にして返す。 Args: client (discord.Client): クライアント RoleList (list[int]): 役職ID mode (string): ユーザーを示すものは、何か?(UserName or ID) return: pd.DataFrame: 計算済みデータ # ユーザーリスト取得 # IDだけ抽出 # JSON取得 #df = pd.DataFrame({ # 'start': [None, None], # 'end': [None, None], # 'time': [13, 23]}, # index=['ONE', 'TWO'] #) # 計算 # 現在の鯖に、存在しない人は処理しない。 # スタートがないのに、エンドがある場合 # とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..) # -- # 差分計算 #print( "time : " + str(time) ) # DataFrameに変更 # 作業用の"start"と"end"を削除 # 計算 #pprint(df) | 2.652009 | 3 |
5kyu/(5 kyu) Count IP Addresses/(5 kyu) Count IP Addresses.py | e1r0nd/codewars | 49 | 666 | def ips_between(start, end):
calc = lambda n, m: (int(end.split(".")[n]) - int(start.split(".")[n])) * m
return calc(0, 256 * 256 * 256) + calc(1, 256 * 256) + calc(2, 256) + calc(3, 1)
| def ips_between(start, end):
calc = lambda n, m: (int(end.split(".")[n]) - int(start.split(".")[n])) * m
return calc(0, 256 * 256 * 256) + calc(1, 256 * 256) + calc(2, 256) + calc(3, 1)
| none | 1 | 2.957941 | 3 |
|
python/src/vmaf/core/feature_extractor.py | jayholman/vmaf | 40 | 667 | from abc import ABCMeta, abstractmethod
import os
from vmaf.tools.misc import make_absolute_path, run_process
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import re
import numpy as np
import ast
from vmaf import ExternalProgramCaller, to_list
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.executor import Executor
from vmaf.core.result import Result
from vmaf.tools.reader import YuvReader
class FeatureExtractor(Executor):
"""
FeatureExtractor takes in a list of assets, and run feature extraction on
them, and return a list of corresponding results. A FeatureExtractor must
specify a unique type and version combination (by the TYPE and VERSION
attribute), so that the Result generated by it can be identified.
A derived class of FeatureExtractor must:
1) Override TYPE and VERSION
2) Override _generate_result(self, asset), which call a
command-line executable and generate feature scores in a log file.
3) Override _get_feature_scores(self, asset), which read the feature
scores from the log file, and return the scores in a dictionary format.
For an example, follow VmafFeatureExtractor.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def ATOM_FEATURES(self):
raise NotImplementedError
def _read_result(self, asset):
result = {}
result.update(self._get_feature_scores(asset))
executor_id = self.executor_id
return Result(asset, executor_id, result)
@classmethod
def get_scores_key(cls, atom_feature):
return "{type}_{atom_feature}_scores".format(
type=cls.TYPE, atom_feature=atom_feature)
@classmethod
def get_score_key(cls, atom_feature):
return "{type}_{atom_feature}_score".format(
type=cls.TYPE, atom_feature=atom_feature)
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
atom_feature_scores_dict = {}
atom_feature_idx_dict = {}
for atom_feature in self.ATOM_FEATURES:
atom_feature_scores_dict[atom_feature] = []
atom_feature_idx_dict[atom_feature] = 0
with open(log_file_path, 'rt') as log_file:
for line in log_file.readlines():
for atom_feature in self.ATOM_FEATURES:
re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature)
mo = re.match(re_template, line)
if mo:
cur_idx = int(mo.group(1))
assert cur_idx == atom_feature_idx_dict[atom_feature]
# parse value, allowing NaN and inf
val = float(mo.group(2))
if np.isnan(val) or np.isinf(val):
val = None
atom_feature_scores_dict[atom_feature].append(val)
atom_feature_idx_dict[atom_feature] += 1
continue
len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])
assert len_score != 0
for atom_feature in self.ATOM_FEATURES[1:]:
assert len_score == len(atom_feature_scores_dict[atom_feature]), \
"Feature data possibly corrupt. Run cleanup script and try again."
feature_result = {}
for atom_feature in self.ATOM_FEATURES:
scores_key = self.get_scores_key(atom_feature)
feature_result[scores_key] = atom_feature_scores_dict[atom_feature]
return feature_result
class VmafFeatureExtractor(FeatureExtractor):
TYPE = "VMAF_feature"
# VERSION = '0.1' # vmaf_study; Anush's VIF fix
# VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr
# VERSION = '0.2.1' # expose vif num/den of each scale
# VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case
# VERSION = '0.2.2b' # expose adm_den/num_scalex
# VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef
# VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step
# VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2
VERSION = '0.2.4c' # Modify by moving motion2 to c code
ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',
'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',
'vif_num_scale0', 'vif_den_scale0',
'vif_num_scale1', 'vif_den_scale1',
'vif_num_scale2', 'vif_den_scale2',
'vif_num_scale3', 'vif_den_scale3',
'adm_num_scale0', 'adm_den_scale0',
'adm_num_scale1', 'adm_den_scale1',
'adm_num_scale2', 'adm_den_scale2',
'adm_num_scale3', 'adm_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',
'vif2', 'adm2', 'adm3',
'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VmafFeatureExtractor, cls)._post_process_result(result)
# adm2 =
# (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)
adm2_scores_key = cls.get_scores_key('adm2')
adm_num_scores_key = cls.get_scores_key('adm_num')
adm_den_scores_key = cls.get_scores_key('adm_den')
result.result_dict[adm2_scores_key] = list(
(np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /
(np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)
)
# vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3
vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')
vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')
vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')
vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')
vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')
vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')
vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')
vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')
vif_scale0_scores_key = cls.get_scores_key('vif_scale0')
vif_scale1_scores_key = cls.get_scores_key('vif_scale1')
vif_scale2_scores_key = cls.get_scores_key('vif_scale2')
vif_scale3_scores_key = cls.get_scores_key('vif_scale3')
result.result_dict[vif_scale0_scores_key] = list(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key]))
)
result.result_dict[vif_scale1_scores_key] = list(
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key]))
)
result.result_dict[vif_scale2_scores_key] = list(
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key]))
)
result.result_dict[vif_scale3_scores_key] = list(
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
)
# vif2 =
# ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +
# (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0
vif_scores_key = cls.get_scores_key('vif2')
result.result_dict[vif_scores_key] = list(
(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key])) +
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key])) +
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key])) +
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
) / 4.0
)
# adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3
adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')
adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')
adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')
adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')
adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')
adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')
adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')
adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')
adm_scale0_scores_key = cls.get_scores_key('adm_scale0')
adm_scale1_scores_key = cls.get_scores_key('adm_scale1')
adm_scale2_scores_key = cls.get_scores_key('adm_scale2')
adm_scale3_scores_key = cls.get_scores_key('adm_scale3')
result.result_dict[adm_scale0_scores_key] = list(
(np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale1_scores_key] = list(
(np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale2_scores_key] = list(
(np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale3_scores_key] = list(
(np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
# adm3 = \
# (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0
adm3_scores_key = cls.get_scores_key('adm3')
result.result_dict[adm3_scores_key] = list(
(
((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))
) / 4.0
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class VifFrameDifferenceFeatureExtractor(FeatureExtractor):
TYPE = "VifDiff_feature"
VERSION = '0.1'
ATOM_FEATURES = ['vifdiff',
'vifdiff_num', 'vifdiff_den',
'vifdiff_num_scale0', 'vifdiff_den_scale0',
'vifdiff_num_scale1', 'vifdiff_den_scale1',
'vifdiff_num_scale2', 'vifdiff_den_scale2',
'vifdiff_num_scale3', 'vifdiff_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)
# vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3
vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')
vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')
vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')
vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')
vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')
vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')
vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')
vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')
vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')
vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')
vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')
vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')
result.result_dict[vifdiff_scale0_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale0_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale0_scores_key]))
)
result.result_dict[vifdiff_scale1_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale1_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale1_scores_key]))
)
result.result_dict[vifdiff_scale2_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale2_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale2_scores_key]))
)
result.result_dict[vifdiff_scale3_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale3_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale3_scores_key]))
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class PsnrFeatureExtractor(FeatureExtractor):
TYPE = "PSNR_feature"
VERSION = "1.0"
ATOM_FEATURES = ['psnr']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MomentFeatureExtractor(FeatureExtractor):
TYPE = "Moment_feature"
# VERSION = "1.0" # call executable
VERSION = "1.1" # python only
ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]
DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_w, quality_h = asset.quality_width_height
ref_scores_mtx = None
with YuvReader(filepath=asset.ref_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:
scores_mtx_list = []
i = 0
for ref_yuv in ref_yuv_reader:
ref_y = ref_yuv[0]
firstm = ref_y.mean()
secondm = ref_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
ref_scores_mtx = np.vstack(scores_mtx_list)
dis_scores_mtx = None
with YuvReader(filepath=asset.dis_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:
scores_mtx_list = []
i = 0
for dis_yuv in dis_yuv_reader:
dis_y = dis_yuv[0]
firstm = dis_y.mean()
secondm = dis_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
dis_scores_mtx = np.vstack(scores_mtx_list)
assert ref_scores_mtx is not None and dis_scores_mtx is not None
log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),
'dis_scores_mtx': dis_scores_mtx.tolist()}
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'wt') as log_file:
log_file.write(str(log_dict))
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'rt') as log_file:
log_str = log_file.read()
log_dict = ast.literal_eval(log_str)
ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])
dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])
_, num_ref_features = ref_scores_mtx.shape
assert num_ref_features == 2 # ref1st, ref2nd
_, num_dis_features = dis_scores_mtx.shape
assert num_dis_features == 2 # dis1st, dis2nd
feature_result = {}
feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])
feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])
feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])
feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])
return feature_result
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(MomentFeatureExtractor, cls)._post_process_result(result)
# calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd
refvar_scores_key = cls.get_scores_key('refvar')
ref1st_scores_key = cls.get_scores_key('ref1st')
ref2nd_scores_key = cls.get_scores_key('ref2nd')
disvar_scores_key = cls.get_scores_key('disvar')
dis1st_scores_key = cls.get_scores_key('dis1st')
dis2nd_scores_key = cls.get_scores_key('dis2nd')
get_var = lambda m: m[1] - m[0] * m[0]
result.result_dict[refvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[ref1st_scores_key],
result.result_dict[ref2nd_scores_key])))
result.result_dict[disvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[dis1st_scores_key],
result.result_dict[dis2nd_scores_key])))
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class SsimFeatureExtractor(FeatureExtractor):
TYPE = "SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ssim', 'ssim_l', 'ssim_c', 'ssim_s']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MsSsimFeatureExtractor(FeatureExtractor):
TYPE = "MS_SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ms_ssim',
'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',
'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',
'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',
'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',
'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',
]
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
| from abc import ABCMeta, abstractmethod
import os
from vmaf.tools.misc import make_absolute_path, run_process
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import re
import numpy as np
import ast
from vmaf import ExternalProgramCaller, to_list
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.executor import Executor
from vmaf.core.result import Result
from vmaf.tools.reader import YuvReader
class FeatureExtractor(Executor):
"""
FeatureExtractor takes in a list of assets, and run feature extraction on
them, and return a list of corresponding results. A FeatureExtractor must
specify a unique type and version combination (by the TYPE and VERSION
attribute), so that the Result generated by it can be identified.
A derived class of FeatureExtractor must:
1) Override TYPE and VERSION
2) Override _generate_result(self, asset), which call a
command-line executable and generate feature scores in a log file.
3) Override _get_feature_scores(self, asset), which read the feature
scores from the log file, and return the scores in a dictionary format.
For an example, follow VmafFeatureExtractor.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def ATOM_FEATURES(self):
raise NotImplementedError
def _read_result(self, asset):
result = {}
result.update(self._get_feature_scores(asset))
executor_id = self.executor_id
return Result(asset, executor_id, result)
@classmethod
def get_scores_key(cls, atom_feature):
return "{type}_{atom_feature}_scores".format(
type=cls.TYPE, atom_feature=atom_feature)
@classmethod
def get_score_key(cls, atom_feature):
return "{type}_{atom_feature}_score".format(
type=cls.TYPE, atom_feature=atom_feature)
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
atom_feature_scores_dict = {}
atom_feature_idx_dict = {}
for atom_feature in self.ATOM_FEATURES:
atom_feature_scores_dict[atom_feature] = []
atom_feature_idx_dict[atom_feature] = 0
with open(log_file_path, 'rt') as log_file:
for line in log_file.readlines():
for atom_feature in self.ATOM_FEATURES:
re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature)
mo = re.match(re_template, line)
if mo:
cur_idx = int(mo.group(1))
assert cur_idx == atom_feature_idx_dict[atom_feature]
# parse value, allowing NaN and inf
val = float(mo.group(2))
if np.isnan(val) or np.isinf(val):
val = None
atom_feature_scores_dict[atom_feature].append(val)
atom_feature_idx_dict[atom_feature] += 1
continue
len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])
assert len_score != 0
for atom_feature in self.ATOM_FEATURES[1:]:
assert len_score == len(atom_feature_scores_dict[atom_feature]), \
"Feature data possibly corrupt. Run cleanup script and try again."
feature_result = {}
for atom_feature in self.ATOM_FEATURES:
scores_key = self.get_scores_key(atom_feature)
feature_result[scores_key] = atom_feature_scores_dict[atom_feature]
return feature_result
class VmafFeatureExtractor(FeatureExtractor):
TYPE = "VMAF_feature"
# VERSION = '0.1' # vmaf_study; Anush's VIF fix
# VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr
# VERSION = '0.2.1' # expose vif num/den of each scale
# VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case
# VERSION = '0.2.2b' # expose adm_den/num_scalex
# VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef
# VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step
# VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2
VERSION = '0.2.4c' # Modify by moving motion2 to c code
ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',
'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',
'vif_num_scale0', 'vif_den_scale0',
'vif_num_scale1', 'vif_den_scale1',
'vif_num_scale2', 'vif_den_scale2',
'vif_num_scale3', 'vif_den_scale3',
'adm_num_scale0', 'adm_den_scale0',
'adm_num_scale1', 'adm_den_scale1',
'adm_num_scale2', 'adm_den_scale2',
'adm_num_scale3', 'adm_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',
'vif2', 'adm2', 'adm3',
'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VmafFeatureExtractor, cls)._post_process_result(result)
# adm2 =
# (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)
adm2_scores_key = cls.get_scores_key('adm2')
adm_num_scores_key = cls.get_scores_key('adm_num')
adm_den_scores_key = cls.get_scores_key('adm_den')
result.result_dict[adm2_scores_key] = list(
(np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /
(np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)
)
# vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3
vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')
vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')
vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')
vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')
vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')
vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')
vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')
vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')
vif_scale0_scores_key = cls.get_scores_key('vif_scale0')
vif_scale1_scores_key = cls.get_scores_key('vif_scale1')
vif_scale2_scores_key = cls.get_scores_key('vif_scale2')
vif_scale3_scores_key = cls.get_scores_key('vif_scale3')
result.result_dict[vif_scale0_scores_key] = list(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key]))
)
result.result_dict[vif_scale1_scores_key] = list(
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key]))
)
result.result_dict[vif_scale2_scores_key] = list(
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key]))
)
result.result_dict[vif_scale3_scores_key] = list(
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
)
# vif2 =
# ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +
# (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0
vif_scores_key = cls.get_scores_key('vif2')
result.result_dict[vif_scores_key] = list(
(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key])) +
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key])) +
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key])) +
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
) / 4.0
)
# adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3
adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')
adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')
adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')
adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')
adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')
adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')
adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')
adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')
adm_scale0_scores_key = cls.get_scores_key('adm_scale0')
adm_scale1_scores_key = cls.get_scores_key('adm_scale1')
adm_scale2_scores_key = cls.get_scores_key('adm_scale2')
adm_scale3_scores_key = cls.get_scores_key('adm_scale3')
result.result_dict[adm_scale0_scores_key] = list(
(np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale1_scores_key] = list(
(np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale2_scores_key] = list(
(np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale3_scores_key] = list(
(np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
# adm3 = \
# (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0
adm3_scores_key = cls.get_scores_key('adm3')
result.result_dict[adm3_scores_key] = list(
(
((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))
) / 4.0
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class VifFrameDifferenceFeatureExtractor(FeatureExtractor):
TYPE = "VifDiff_feature"
VERSION = '0.1'
ATOM_FEATURES = ['vifdiff',
'vifdiff_num', 'vifdiff_den',
'vifdiff_num_scale0', 'vifdiff_den_scale0',
'vifdiff_num_scale1', 'vifdiff_den_scale1',
'vifdiff_num_scale2', 'vifdiff_den_scale2',
'vifdiff_num_scale3', 'vifdiff_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)
# vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3
vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')
vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')
vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')
vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')
vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')
vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')
vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')
vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')
vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')
vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')
vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')
vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')
result.result_dict[vifdiff_scale0_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale0_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale0_scores_key]))
)
result.result_dict[vifdiff_scale1_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale1_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale1_scores_key]))
)
result.result_dict[vifdiff_scale2_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale2_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale2_scores_key]))
)
result.result_dict[vifdiff_scale3_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale3_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale3_scores_key]))
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class PsnrFeatureExtractor(FeatureExtractor):
TYPE = "PSNR_feature"
VERSION = "1.0"
ATOM_FEATURES = ['psnr']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MomentFeatureExtractor(FeatureExtractor):
TYPE = "Moment_feature"
# VERSION = "1.0" # call executable
VERSION = "1.1" # python only
ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]
DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_w, quality_h = asset.quality_width_height
ref_scores_mtx = None
with YuvReader(filepath=asset.ref_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:
scores_mtx_list = []
i = 0
for ref_yuv in ref_yuv_reader:
ref_y = ref_yuv[0]
firstm = ref_y.mean()
secondm = ref_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
ref_scores_mtx = np.vstack(scores_mtx_list)
dis_scores_mtx = None
with YuvReader(filepath=asset.dis_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:
scores_mtx_list = []
i = 0
for dis_yuv in dis_yuv_reader:
dis_y = dis_yuv[0]
firstm = dis_y.mean()
secondm = dis_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
dis_scores_mtx = np.vstack(scores_mtx_list)
assert ref_scores_mtx is not None and dis_scores_mtx is not None
log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),
'dis_scores_mtx': dis_scores_mtx.tolist()}
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'wt') as log_file:
log_file.write(str(log_dict))
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'rt') as log_file:
log_str = log_file.read()
log_dict = ast.literal_eval(log_str)
ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])
dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])
_, num_ref_features = ref_scores_mtx.shape
assert num_ref_features == 2 # ref1st, ref2nd
_, num_dis_features = dis_scores_mtx.shape
assert num_dis_features == 2 # dis1st, dis2nd
feature_result = {}
feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])
feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])
feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])
feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])
return feature_result
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(MomentFeatureExtractor, cls)._post_process_result(result)
# calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd
refvar_scores_key = cls.get_scores_key('refvar')
ref1st_scores_key = cls.get_scores_key('ref1st')
ref2nd_scores_key = cls.get_scores_key('ref2nd')
disvar_scores_key = cls.get_scores_key('disvar')
dis1st_scores_key = cls.get_scores_key('dis1st')
dis2nd_scores_key = cls.get_scores_key('dis2nd')
get_var = lambda m: m[1] - m[0] * m[0]
result.result_dict[refvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[ref1st_scores_key],
result.result_dict[ref2nd_scores_key])))
result.result_dict[disvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[dis1st_scores_key],
result.result_dict[dis2nd_scores_key])))
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class SsimFeatureExtractor(FeatureExtractor):
TYPE = "SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ssim', 'ssim_l', 'ssim_c', 'ssim_s']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MsSsimFeatureExtractor(FeatureExtractor):
TYPE = "MS_SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ms_ssim',
'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',
'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',
'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',
'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',
'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',
]
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
| en | 0.687904 | FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. # routine to read the feature scores from the log file, and return # the scores in a dictionary format. # parse value, allowing NaN and inf # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 # Modify by moving motion2 to c code # routine to call the command-line executable and generate feature # scores in the log file. # override Executor._post_process_result # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 # vif2 = # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) + # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0 # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3 # adm3 = \ # (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0 # validate # routine to call the command-line executable and generate feature # scores in the log file. # override Executor._post_process_result # vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3 # validate # routine to call the command-line executable and generate quality # scores in the log file. # VERSION = "1.0" # call executable # python only # routine to call the command-line executable and generate feature # scores in the log file. # routine to read the feature scores from the log file, and return # the scores in a dictionary format. # ref1st, ref2nd # dis1st, dis2nd # override Executor._post_process_result # calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd # validate # VERSION = "1.0" # fix OPT_RANGE_PIXEL_OFFSET = 0 # routine to call the command-line executable and generate quality # scores in the log file. # VERSION = "1.0" # fix OPT_RANGE_PIXEL_OFFSET = 0 # routine to call the command-line executable and generate quality # scores in the log file. | 2.178416 | 2 |
notebooks/_solutions/pandas_02_basic_operations28.py | rprops/Python_DS-WS | 65 | 668 | <gh_stars>10-100
df['Age'].hist() #bins=30, log=True | df['Age'].hist() #bins=30, log=True | en | 0.218845 | #bins=30, log=True | 2.062892 | 2 |
controller/base_service.py | oopsteams/pansite | 0 | 669 | # -*- coding: utf-8 -*-
"""
Created by susy at 2019/11/8
"""
from dao.dao import DataDao
import pytz
from dao.models import PanAccounts
from cfg import PAN_SERVICE, MASTER_ACCOUNT_ID
class BaseService:
def __init__(self):
self.default_tz = pytz.timezone('Asia/Chongqing')
# self.pan_acc: PanAccounts = DataDao.pan_account_list(MASTER_ACCOUNT_ID, False)
| # -*- coding: utf-8 -*-
"""
Created by susy at 2019/11/8
"""
from dao.dao import DataDao
import pytz
from dao.models import PanAccounts
from cfg import PAN_SERVICE, MASTER_ACCOUNT_ID
class BaseService:
def __init__(self):
self.default_tz = pytz.timezone('Asia/Chongqing')
# self.pan_acc: PanAccounts = DataDao.pan_account_list(MASTER_ACCOUNT_ID, False)
| en | 0.714344 | # -*- coding: utf-8 -*- Created by susy at 2019/11/8 # self.pan_acc: PanAccounts = DataDao.pan_account_list(MASTER_ACCOUNT_ID, False) | 2.114922 | 2 |
transformerquant/modules/attention/multi_head.py | StateOfTheArt-quant/transformerquant | 22 | 670 | <reponame>StateOfTheArt-quant/transformerquant
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch.nn as nn
from .single import attention
class MultiHeadedAttention(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1):
super().__init__()
assert d_model % nhead ==0
# we assume d_v always equal d_k
self.d_k = d_model // nhead
self.nhead = nhead
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.nhead, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.nhead * self.d_k)
context = self.output_linear(x)
return context#, attn | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch.nn as nn
from .single import attention
class MultiHeadedAttention(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1):
super().__init__()
assert d_model % nhead ==0
# we assume d_v always equal d_k
self.d_k = d_model // nhead
self.nhead = nhead
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.nhead, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.nhead * self.d_k)
context = self.output_linear(x)
return context#, attn | en | 0.773081 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # we assume d_v always equal d_k # 1) Do all the linear projections in batch from d_model => h x d_k # 2) Apply attention on all the projected vectors in batch. # 3) "Concat" using a view and apply a final linear. #, attn | 2.599878 | 3 |
avatar/generalization.py | Julian-Theis/AVATAR | 7 | 671 | import os, time, argparse
from datetime import datetime
from pm4py.objects.log.importer.csv import factory as csv_importer
from pm4py.objects.log.exporter.xes import factory as xes_exporter
from pm4py.objects.log.importer.xes import factory as xes_importer
from pm4py.objects.petri.importer import pnml as pnml_importer
from pm4py.evaluation.replay_fitness import factory as replay_factory
from pm4py.evaluation.precision import factory as precision_factory
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
def readFile(f_name1, f_name2, unique=False):
traces = []
skipped = 0
with open(f_name1) as file:
file_contents = file.read()
file_contents = file_contents.split("\n")
print("Number of train traces are:", str(len(file_contents)))
for row in file_contents:
if unique:
if row not in traces:
traces.append(row)
else:
skipped += 1
else:
traces.append(row)
with open(f_name2) as file:
file_contents = file.read()
file_contents = file_contents.split("\n")
print("Number of generated traces are:", str(len(file_contents)))
for row in file_contents:
if unique:
if row not in traces:
traces.append(row)
else:
skipped += 1
else:
traces.append(row)
f_traces = []
for trace in traces:
f_trace = []
t = trace.split(" ")
for i in t:
if i != "" and "<" not in i:
f_trace.append(i)
if len(f_trace) > 0:
f_traces.append(f_trace)
print("Number of traces are:", str(len(f_traces)))
print("Number of skipped traces are:", str(skipped))
return f_traces
def writeToFile(file, lst):
with open(file, 'w') as outfile:
for entry in lst:
outfile.write(str(entry) + "\n")
def convertToCsv(traces, to_path):
lines = []
case = 0
timestamp = 0
line = "concept:name,case:concept:name,time:timestamp"
lines.append(line)
for trace in traces:
for event in trace:
timestamp = timestamp + 1
dt_object = datetime.fromtimestamp(timestamp)
line = str(event) + "_" + "," + str(case) + "," + str(dt_object)
lines.append(line)
case = case + 1
writeToFile(str(to_path), lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='Which system (e.g. pb_system_5_3)', required=True)
parser.add_argument('-sfx', '--suffix', help='Suffix (chosen epoch, e.g. 1981)', required=True)
parser.add_argument('-j', '--job', help='Job (0/1)', required=True)
parser.add_argument('-pn', '--pn', help='Petri net file to evaluate', required=True)
parser.add_argument('-strategy', '--strategy', help='naive/mh', required=True)
args = parser.parse_args()
system = args.system
suffix = int(args.suffix)
job = args.job
pn = args.pn
strategy = args.strategy
if DATA_PATH is None:
train_file = os.path.join(WORK_PATH, "data", "variants", system + "_train.txt")
gen_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(WORK_PATH, "data", "pns", system, pn)
else:
train_file = os.path.join(DATA_PATH, "variants", system + "_train.txt")
gen_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(DATA_PATH, "pns", system, pn)
""" READ FILES AND CONVERT TO XES """
traces = readFile(train_file,gen_file, unique=True)
convertToCsv(traces=traces, to_path=csv_file)
time.sleep(1)
log = csv_importer.import_event_log(csv_file)
xes_exporter.export_log(log, xes_file)
time.sleep(1)
""" PERFORM MEASUREMENT ON PN AND XES"""
log = xes_importer.import_log(xes_file)
net, initial_marking, final_marking = pnml_importer.import_net(pn_file)
fitness = replay_factory.apply(log, net, initial_marking, final_marking)
print("Fitness=", fitness)
precision = precision_factory.apply(log, net, initial_marking, final_marking)
print("Precision=", precision)
fitness = fitness["log_fitness"]
generalization = 2 * ((fitness * precision) / (fitness + precision))
if strategy == "mh":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using MH SAMPLING on suffix ", str(suffix)," ***")
elif strategy == "naive":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using NAIVE SAMPLING on suffix ", str(suffix), " ***")
else:
raise ValueError("Unknown strategy.")
print("AVATAR Generalization=", generalization) | import os, time, argparse
from datetime import datetime
from pm4py.objects.log.importer.csv import factory as csv_importer
from pm4py.objects.log.exporter.xes import factory as xes_exporter
from pm4py.objects.log.importer.xes import factory as xes_importer
from pm4py.objects.petri.importer import pnml as pnml_importer
from pm4py.evaluation.replay_fitness import factory as replay_factory
from pm4py.evaluation.precision import factory as precision_factory
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
def readFile(f_name1, f_name2, unique=False):
traces = []
skipped = 0
with open(f_name1) as file:
file_contents = file.read()
file_contents = file_contents.split("\n")
print("Number of train traces are:", str(len(file_contents)))
for row in file_contents:
if unique:
if row not in traces:
traces.append(row)
else:
skipped += 1
else:
traces.append(row)
with open(f_name2) as file:
file_contents = file.read()
file_contents = file_contents.split("\n")
print("Number of generated traces are:", str(len(file_contents)))
for row in file_contents:
if unique:
if row not in traces:
traces.append(row)
else:
skipped += 1
else:
traces.append(row)
f_traces = []
for trace in traces:
f_trace = []
t = trace.split(" ")
for i in t:
if i != "" and "<" not in i:
f_trace.append(i)
if len(f_trace) > 0:
f_traces.append(f_trace)
print("Number of traces are:", str(len(f_traces)))
print("Number of skipped traces are:", str(skipped))
return f_traces
def writeToFile(file, lst):
with open(file, 'w') as outfile:
for entry in lst:
outfile.write(str(entry) + "\n")
def convertToCsv(traces, to_path):
lines = []
case = 0
timestamp = 0
line = "concept:name,case:concept:name,time:timestamp"
lines.append(line)
for trace in traces:
for event in trace:
timestamp = timestamp + 1
dt_object = datetime.fromtimestamp(timestamp)
line = str(event) + "_" + "," + str(case) + "," + str(dt_object)
lines.append(line)
case = case + 1
writeToFile(str(to_path), lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='Which system (e.g. pb_system_5_3)', required=True)
parser.add_argument('-sfx', '--suffix', help='Suffix (chosen epoch, e.g. 1981)', required=True)
parser.add_argument('-j', '--job', help='Job (0/1)', required=True)
parser.add_argument('-pn', '--pn', help='Petri net file to evaluate', required=True)
parser.add_argument('-strategy', '--strategy', help='naive/mh', required=True)
args = parser.parse_args()
system = args.system
suffix = int(args.suffix)
job = args.job
pn = args.pn
strategy = args.strategy
if DATA_PATH is None:
train_file = os.path.join(WORK_PATH, "data", "variants", system + "_train.txt")
gen_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(WORK_PATH, "data", "pns", system, pn)
else:
train_file = os.path.join(DATA_PATH, "variants", system + "_train.txt")
gen_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(DATA_PATH, "pns", system, pn)
""" READ FILES AND CONVERT TO XES """
traces = readFile(train_file,gen_file, unique=True)
convertToCsv(traces=traces, to_path=csv_file)
time.sleep(1)
log = csv_importer.import_event_log(csv_file)
xes_exporter.export_log(log, xes_file)
time.sleep(1)
""" PERFORM MEASUREMENT ON PN AND XES"""
log = xes_importer.import_log(xes_file)
net, initial_marking, final_marking = pnml_importer.import_net(pn_file)
fitness = replay_factory.apply(log, net, initial_marking, final_marking)
print("Fitness=", fitness)
precision = precision_factory.apply(log, net, initial_marking, final_marking)
print("Precision=", precision)
fitness = fitness["log_fitness"]
generalization = 2 * ((fitness * precision) / (fitness + precision))
if strategy == "mh":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using MH SAMPLING on suffix ", str(suffix)," ***")
elif strategy == "naive":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using NAIVE SAMPLING on suffix ", str(suffix), " ***")
else:
raise ValueError("Unknown strategy.")
print("AVATAR Generalization=", generalization) | en | 0.686189 | READ FILES AND CONVERT TO XES PERFORM MEASUREMENT ON PN AND XES | 2.218027 | 2 |
Introductions/The Rust Programming Language/embed/bindings/embed.py | uqtimes/Rust-SampleCodes | 0 | 672 | # $ python embed.py
from ctypes import cdll
lib = cdll.LoadLibrary("../target/release/libembed.dylib") #=> for Mac
#lib = cdll.LoadLibrary("../target/release/libembed.so") #=> for Linux
lib.process()
print("done!")
| # $ python embed.py
from ctypes import cdll
lib = cdll.LoadLibrary("../target/release/libembed.dylib") #=> for Mac
#lib = cdll.LoadLibrary("../target/release/libembed.so") #=> for Linux
lib.process()
print("done!")
| en | 0.281387 | # $ python embed.py #=> for Mac #lib = cdll.LoadLibrary("../target/release/libembed.so") #=> for Linux | 1.726415 | 2 |
huaweicloud-sdk-image/huaweicloudsdkimage/v1/image_client.py | handsome-baby/huaweicloud-sdk-python-v3 | 1 | 673 | <gh_stars>1-10
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class ImageClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(ImageClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkimage.v1.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "ImageClient":
raise TypeError("client type error, support client type is ImageClient")
return ClientBuilder(clazz)
def run_celebrity_recognition(self, request):
"""名人识别
分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。
:param RunCelebrityRecognitionRequest request
:return: RunCelebrityRecognitionResponse
"""
return self.run_celebrity_recognition_with_http_info(request)
def run_celebrity_recognition_with_http_info(self, request):
"""名人识别
分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。
:param RunCelebrityRecognitionRequest request
:return: RunCelebrityRecognitionResponse
"""
all_params = ['body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v1.0/image/celebrity-recognition',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RunCelebrityRecognitionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def run_image_tagging(self, request):
"""图像标签
自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。
:param RunImageTaggingRequest request
:return: RunImageTaggingResponse
"""
return self.run_image_tagging_with_http_info(request)
def run_image_tagging_with_http_info(self, request):
"""图像标签
自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。
:param RunImageTaggingRequest request
:return: RunImageTaggingResponse
"""
all_params = ['body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v1.0/image/tagging',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RunImageTaggingResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
| # coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class ImageClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(ImageClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkimage.v1.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "ImageClient":
raise TypeError("client type error, support client type is ImageClient")
return ClientBuilder(clazz)
def run_celebrity_recognition(self, request):
"""名人识别
分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。
:param RunCelebrityRecognitionRequest request
:return: RunCelebrityRecognitionResponse
"""
return self.run_celebrity_recognition_with_http_info(request)
def run_celebrity_recognition_with_http_info(self, request):
"""名人识别
分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。
:param RunCelebrityRecognitionRequest request
:return: RunCelebrityRecognitionResponse
"""
all_params = ['body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v1.0/image/celebrity-recognition',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RunCelebrityRecognitionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def run_image_tagging(self, request):
"""图像标签
自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。
:param RunImageTaggingRequest request
:return: RunImageTaggingResponse
"""
return self.run_image_tagging_with_http_info(request)
def run_image_tagging_with_http_info(self, request):
"""图像标签
自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。
:param RunImageTaggingRequest request
:return: RunImageTaggingResponse
"""
all_params = ['body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v1.0/image/tagging',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RunImageTaggingResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type) | zh | 0.286565 | # coding: utf-8 :param configuration: .Configuration object for this client :param pool_threads: The number of threads to use for async requests to the API. More threads means more concurrent API requests. 名人识别 分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。 :param RunCelebrityRecognitionRequest request :return: RunCelebrityRecognitionResponse 名人识别 分析并识别图片中包含的政治人物、明星及网红人物,返回人物信息及人脸坐标。 :param RunCelebrityRecognitionRequest request :return: RunCelebrityRecognitionResponse 图像标签 自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。 :param RunImageTaggingRequest request :return: RunImageTaggingResponse 图像标签 自然图像的语义内容非常丰富,一个图像包含多个标签内容,图像标签服务准确识别自然图片中数百种场景、上千种通用物体及其属性,让智能相册管理、照片检索和分类、基于场景内容或者物体的广告推荐等功能更加直观。使用时用户发送待处理图片,返回图片标签内容及相应置信度。 :param RunImageTaggingRequest request :return: RunImageTaggingResponse Makes the HTTP request and returns deserialized data. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response_type: Response data type. :param response_headers: Header should be added to response data. :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param request_type: Request data type. :return: Return the response directly. | 2.34273 | 2 |
frank_wolfe.py | ebezzam/PolyatomicFW_SPL | 0 | 674 | <gh_stars>0
import numpy as np
from typing import Optional, Any
from pandas import DataFrame
from copy import deepcopy
from abc import abstractmethod
from utils import TimedGenericIterativeAlgorithm
import pycsou.core as pcore
import pycsou.linop as pl
from pycsou.func.penalty import L1Norm
from pycsou.func.loss import SquaredL2Loss
from pycsou.opt.proxalgs import APGD
class GenericFWSolverForLasso(TimedGenericIterativeAlgorithm):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remove_positions: bool = False, remember_iterand: bool = False, decreasing: bool = False,
multi_spikes_threshold: float = .7, multi_spikes: bool = True, reweighting: str = 'ista', t_max: float = None):
self.data = data
self.forwardOp = forwardOp
self.stopping_strategy = stopping_strategy
self.accuracy_threshold = accuracy_threshold
self.multi_spikes = multi_spikes
self.multi_spikes_threshold = multi_spikes_threshold
self.reweighting = reweighting
self.remove_positions = remove_positions
self.decreasing = decreasing
self.dim = self.forwardOp.shape[1]
self.x0 = np.zeros(self.dim)
self.dual_certificate_value = 1 / lambda_factor
self.new_ind = None
self.epsilon = None
self.remember_iterand = remember_iterand
self.iterand_history = []
init_iterand = {'iterand': self.x0, 'positions': np.array([], dtype=int)}
l22_loss = (1 / 2) * SquaredL2Loss(dim=self.forwardOp.shape[0], data=self.data)
self.data_fidelity = l22_loss * self.forwardOp
if lambda_ is None:
lambda_ = lambda_factor * np.abs(self.forwardOp.adjoint(self.data)).max()
self.lambda_ = lambda_
self.penalty = self.lambda_ * L1Norm(dim=self.dim)
objective_functional = self.data_fidelity + self.penalty
self.bound = np.linalg.norm(self.data) ** 2 / (2 * self.lambda_)
self.start = None
if verbose is not None:
self.candidate_new = []
self.actual_new = []
super(GenericFWSolverForLasso, self).__init__(objective_functional=objective_functional,
init_iterand=init_iterand,
max_iter=max_iter, min_iter=min_iter,
accuracy_threshold=accuracy_threshold,
verbose=verbose, t_max=t_max)
def update_iterand(self) -> Any:
self.compute_new_impulse()
res = self.combine_new_impulse()
return res
def compute_new_impulse(self):
dual_certificate = - self.data_fidelity.gradient(self.old_iterand['iterand']) / self.lambda_
d = np.abs(dual_certificate)
if self.multi_spikes:
maxi = np.max(d)
if self.iter == 0:
threshold = self.multi_spikes_threshold * maxi
self.epsilon = (1 - self.multi_spikes_threshold) * maxi
else:
threshold = maxi - (1 / (self.iter + 2)) * self.epsilon
indices = np.where(d > max(threshold, 1.))[0]
# print("Threshold: {} / {}".format(threshold, maxi))
# print('Candidate indices: {}\n'.format(indices.shape))
self.new_ind = np.setdiff1d(indices, self.old_iterand['positions'], assume_unique=True)
if self.verbose is not None:
self.candidate_new.append(indices.shape[0])
self.actual_new.append(self.new_ind.size)
if len(self.new_ind) == 0:
self.new_ind = None
self.dual_certificate_value = max(dual_certificate.min(),
dual_certificate.max(),
key=abs)
else:
self.new_ind = np.argmax(d)
self.dual_certificate_value = dual_certificate[self.new_ind]
if self.new_ind in self.old_iterand['positions']:
self.new_ind = None # already present position
if abs(self.dual_certificate_value) < 1.:
if self.verbose is not None:
print('Warning, dual certificate lower than 1 at iteration {}'.format(self.iter))
@abstractmethod
def combine_new_impulse(self) -> Any:
pass
def update_diagnostics(self):
"""
Dual ceritificate value is computed after iteration
Returns
-------
"""
if self.iter == 0:
self.diagnostics = DataFrame(
columns=['Iter', 'Relative Improvement Objective', 'Relative Improvement Iterand',
'Dual Certificate Value', 'Objective Function'])
self.diagnostics.loc[self.iter, 'Iter'] = self.iter
if np.linalg.norm(self.old_iterand['iterand']) == 0:
self.diagnostics.loc[self.iter, 'Relative Improvement Iterand'] = np.infty
else:
self.diagnostics.loc[self.iter, 'Relative Improvement Iterand'] = np.linalg.norm(
self.old_iterand['iterand'] - self.iterand['iterand']) / np.linalg.norm(
self.old_iterand['iterand'])
self.diagnostics.loc[self.iter, 'Dual Certificate Value'] = self.dual_certificate_value # before iteration
self.diagnostics.loc[self.iter, 'Objective Function'] = self.objective_functional(self.iterand['iterand'])
if self.iter == 0:
self.diagnostics.loc[self.iter, 'Relative Improvement Objective'] = np.infty
else:
self.diagnostics.loc[self.iter, 'Relative Improvement Objective'] = (self.diagnostics.loc[
self.iter - 1,
'Objective Function'] -
self.diagnostics.loc[
self.iter,
'Objective Function']) / \
self.diagnostics.loc[
self.iter - 1,
'Objective Function']
if self.remember_iterand:
self.iterand_history.append(self.iterand['iterand'])
def print_diagnostics(self):
print(dict(self.diagnostics.loc[self.iter]))
def stopping_metric(self):
if self.iter == 0:
return np.infty
elif self.stopping_strategy == 'relative_improvement':
return abs(self.diagnostics.loc[self.iter - 1, 'Relative Improvement Objective'])
elif self.stopping_strategy == 'certificate':
value = self.diagnostics.loc[self.iter - 1, 'Dual Certificate Value']
return abs(abs(value) - 1)
else:
raise ValueError('Stopping strategy must be in ["relative_improvement", "certificate"]')
def restricted_support_lasso(self, active_indices: np.ndarray, accuracy: float, x0: np.ndarray = None, d: float = 75.):
if x0 is None:
x0 = np.zeros(active_indices.shape)
injection = pl.sampling.SubSampling(self.dim, active_indices, dtype=float).get_adjointOp()
restricted_forward = pl.DenseLinearOperator(
self.forwardOp.mat[:, active_indices])
restricted_forward.compute_lipschitz_cst(tol=1e-3)
restricted_data_fidelity = (1 / 2) * SquaredL2Loss(dim=restricted_forward.shape[0], data=self.data) \
* restricted_forward
# restricted_data_fidelity.lipschitz_cst = self.data_fidelity.lipschitz_cst
# restricted_data_fidelity.diff_lipschitz_cst = self.data_fidelity.diff_lipschitz_cst
restricted_regularization = self.lambda_ * L1Norm(dim=restricted_data_fidelity.shape[1])
if self.reweighting == 'fista':
acceleration = 'CD'
tau = None
elif self.reweighting == 'ista':
tau = 1.9 / restricted_data_fidelity.diff_lipschitz_cst
acceleration = None
else:
raise ValueError('Reweighting strategy must be in ["fista", "ista"]')
solver = APGD(dim=restricted_data_fidelity.shape[1], F=restricted_data_fidelity,
G=restricted_regularization, x0=x0, tau=tau,
acceleration=acceleration, verbose=None, accuracy_threshold=accuracy, d=d, max_iter=2000,
min_iter=1)
return injection(solver.iterate()[0]['iterand'])
class VanillaFWSolverForLasso(GenericFWSolverForLasso):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remember_iterand: bool = False, step_size: str = 'optimal', t_max: float = None):
if step_size in ['optimal', 'regular']:
self.step_size = step_size
else:
raise ValueError("Step size strategy must be in ['optimal', 'regular']")
super(VanillaFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_, lambda_factor=lambda_factor,
min_iter=min_iter, max_iter=max_iter,
stopping_strategy=stopping_strategy,
accuracy_threshold=accuracy_threshold, verbose=verbose,
remember_iterand=remember_iterand, multi_spikes=False, t_max=t_max)
def combine_new_impulse(self) -> Any:
iterand = deepcopy(self.old_iterand['iterand'])
if self.new_ind is not None:
new_positions = np.hstack([self.old_iterand['positions'], self.new_ind])
if self.step_size == 'optimal':
gamma = np.dot(self.data_fidelity.gradient(iterand), iterand) + self.lambda_ * (
1. * np.linalg.norm(iterand, 1) + (np.abs(self.dual_certificate_value) - 1.) * self.bound)
gamma /= np.linalg.norm(self.forwardOp.mat[:, self.new_ind] * self.bound * np.sign(
self.dual_certificate_value) - self.forwardOp @ iterand, 2) ** 2
else:
gamma = 2/(self.iter + 3)
else:
new_positions = self.old_iterand['positions']
if self.step_size == 'optimal':
gamma = np.dot(self.data_fidelity.gradient(iterand), iterand) + self.lambda_ * np.linalg.norm(iterand, 1)
gamma /= np.linalg.norm(self.forwardOp @ iterand, 2) ** 2
else:
gamma = 2/(self.iter + 3)
if not 0 < gamma < 1:
gamma = np.clip(gamma, 0., 1.)
iterand *= (1 - gamma)
if self.new_ind is not None:
iterand[self.new_ind] += gamma * np.sign(self.dual_certificate_value) * self.bound
return {'iterand': iterand, 'positions': new_positions}
class FullyCorrectiveFWSolverForLasso(VanillaFWSolverForLasso):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remember_iterand: bool = False, remove_positions: bool = False, reweighting_prec: float = 1e-4,
reweighting: str = 'fista', t_max: float = None):
self.remove_positions = remove_positions
self.reweighting_prec = reweighting_prec
super(FullyCorrectiveFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_,
lambda_factor=lambda_factor,
min_iter=min_iter, max_iter=max_iter,
stopping_strategy=stopping_strategy,
accuracy_threshold=accuracy_threshold, verbose=verbose,
remember_iterand=remember_iterand, t_max=t_max)
self.reweighting = reweighting
self.last_weight = self.bound
def combine_new_impulse(self) -> Any:
iterand = deepcopy(self.old_iterand['iterand'])
if self.new_ind is not None:
new_positions = np.unique(np.hstack([self.old_iterand['positions'], self.new_ind]))
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(np.hstack([iterand.nonzero()[0], self.new_ind]))
else:
active_indices = new_positions
else:
new_positions = self.old_iterand['positions']
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(iterand.nonzero()[0])
else:
active_indices = new_positions
if active_indices.shape[0] > 1:
iterand[self.new_ind] = np.sign(self.dual_certificate_value) * self.last_weight
x0 = iterand[active_indices]
iterand = self.restricted_support_lasso(active_indices, self.reweighting_prec, x0=x0)
if self.new_ind is not None:
self.last_weight = iterand[self.new_ind]
else:
tmp = np.zeros(self.dim)
tmp[active_indices] = 1.
column = self.forwardOp(tmp)
iterand[active_indices] = np.dot(self.data, column) / (np.linalg.norm(column, 2) ** 2)
self.last_weight = iterand[active_indices]
overvalue = np.abs(iterand) > self.bound
if overvalue.sum() > 0:
print("Overvalue at coordinates {}".format(np.arange(overvalue.shape[0])[overvalue]))
iterand[overvalue] = np.sign(iterand[overvalue]) * self.bound
return {'iterand': iterand, 'positions': new_positions}
class PolyatomicFWSolverForLasso(GenericFWSolverForLasso):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remove_positions: bool = False, remember_iterand: bool = False, final_reweighting_prec: float = 1e-4,
init_reweighting_prec: float = .2, decreasing: bool = False, multi_spikes_threshold: float = .7, t_max: float = None):
self.remove_positions = remove_positions
self.reweighting_prec = init_reweighting_prec
self.init_reweighting_prec = init_reweighting_prec
self.decreasing = decreasing
self.final_reweighting_prec = final_reweighting_prec
super(PolyatomicFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_,
lambda_factor=lambda_factor,
min_iter=min_iter, max_iter=max_iter,
stopping_strategy=stopping_strategy,
accuracy_threshold=accuracy_threshold,
verbose=verbose,
remember_iterand=remember_iterand,
multi_spikes=True,
multi_spikes_threshold=multi_spikes_threshold,
reweighting='ista', t_max=t_max)
def combine_new_impulse(self):
iterand = deepcopy(self.old_iterand['iterand'])
if self.new_ind is not None:
new_positions = np.unique(np.hstack([self.old_iterand['positions'], self.new_ind]))
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(np.hstack([iterand.nonzero()[0], self.new_ind]))
else:
active_indices = new_positions
else:
new_positions = self.old_iterand['positions']
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(iterand.nonzero()[0])
else:
active_indices = new_positions
if active_indices.shape[0] > 1:
x0 = iterand[active_indices]
iterand = self.restricted_support_lasso(active_indices, self.reweighting_prec, x0=x0)
else:
tmp = np.zeros(self.dim)
tmp[active_indices] = 1.
column = self.forwardOp(tmp)
iterand[active_indices] = np.dot(self.data, column) / (np.linalg.norm(column, 2) ** 2)
overvalue = np.abs(iterand) > self.bound
if overvalue.sum() > 0: #Sanity check, never been triggered in practice
print("Overvalue at coordinates {}".format(np.arange(overvalue.shape[0])[overvalue]))
iterand[overvalue] = np.sign(iterand[overvalue]) * self.bound
if self.decreasing:
self.reweighting_prec = self.init_reweighting_prec / (self.iter + 1)
self.reweighting_prec = max(self.reweighting_prec, self.final_reweighting_prec)
return {'iterand': iterand, 'positions': new_positions}
| import numpy as np
from typing import Optional, Any
from pandas import DataFrame
from copy import deepcopy
from abc import abstractmethod
from utils import TimedGenericIterativeAlgorithm
import pycsou.core as pcore
import pycsou.linop as pl
from pycsou.func.penalty import L1Norm
from pycsou.func.loss import SquaredL2Loss
from pycsou.opt.proxalgs import APGD
class GenericFWSolverForLasso(TimedGenericIterativeAlgorithm):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remove_positions: bool = False, remember_iterand: bool = False, decreasing: bool = False,
multi_spikes_threshold: float = .7, multi_spikes: bool = True, reweighting: str = 'ista', t_max: float = None):
self.data = data
self.forwardOp = forwardOp
self.stopping_strategy = stopping_strategy
self.accuracy_threshold = accuracy_threshold
self.multi_spikes = multi_spikes
self.multi_spikes_threshold = multi_spikes_threshold
self.reweighting = reweighting
self.remove_positions = remove_positions
self.decreasing = decreasing
self.dim = self.forwardOp.shape[1]
self.x0 = np.zeros(self.dim)
self.dual_certificate_value = 1 / lambda_factor
self.new_ind = None
self.epsilon = None
self.remember_iterand = remember_iterand
self.iterand_history = []
init_iterand = {'iterand': self.x0, 'positions': np.array([], dtype=int)}
l22_loss = (1 / 2) * SquaredL2Loss(dim=self.forwardOp.shape[0], data=self.data)
self.data_fidelity = l22_loss * self.forwardOp
if lambda_ is None:
lambda_ = lambda_factor * np.abs(self.forwardOp.adjoint(self.data)).max()
self.lambda_ = lambda_
self.penalty = self.lambda_ * L1Norm(dim=self.dim)
objective_functional = self.data_fidelity + self.penalty
self.bound = np.linalg.norm(self.data) ** 2 / (2 * self.lambda_)
self.start = None
if verbose is not None:
self.candidate_new = []
self.actual_new = []
super(GenericFWSolverForLasso, self).__init__(objective_functional=objective_functional,
init_iterand=init_iterand,
max_iter=max_iter, min_iter=min_iter,
accuracy_threshold=accuracy_threshold,
verbose=verbose, t_max=t_max)
def update_iterand(self) -> Any:
self.compute_new_impulse()
res = self.combine_new_impulse()
return res
def compute_new_impulse(self):
dual_certificate = - self.data_fidelity.gradient(self.old_iterand['iterand']) / self.lambda_
d = np.abs(dual_certificate)
if self.multi_spikes:
maxi = np.max(d)
if self.iter == 0:
threshold = self.multi_spikes_threshold * maxi
self.epsilon = (1 - self.multi_spikes_threshold) * maxi
else:
threshold = maxi - (1 / (self.iter + 2)) * self.epsilon
indices = np.where(d > max(threshold, 1.))[0]
# print("Threshold: {} / {}".format(threshold, maxi))
# print('Candidate indices: {}\n'.format(indices.shape))
self.new_ind = np.setdiff1d(indices, self.old_iterand['positions'], assume_unique=True)
if self.verbose is not None:
self.candidate_new.append(indices.shape[0])
self.actual_new.append(self.new_ind.size)
if len(self.new_ind) == 0:
self.new_ind = None
self.dual_certificate_value = max(dual_certificate.min(),
dual_certificate.max(),
key=abs)
else:
self.new_ind = np.argmax(d)
self.dual_certificate_value = dual_certificate[self.new_ind]
if self.new_ind in self.old_iterand['positions']:
self.new_ind = None # already present position
if abs(self.dual_certificate_value) < 1.:
if self.verbose is not None:
print('Warning, dual certificate lower than 1 at iteration {}'.format(self.iter))
@abstractmethod
def combine_new_impulse(self) -> Any:
pass
def update_diagnostics(self):
"""
Dual ceritificate value is computed after iteration
Returns
-------
"""
if self.iter == 0:
self.diagnostics = DataFrame(
columns=['Iter', 'Relative Improvement Objective', 'Relative Improvement Iterand',
'Dual Certificate Value', 'Objective Function'])
self.diagnostics.loc[self.iter, 'Iter'] = self.iter
if np.linalg.norm(self.old_iterand['iterand']) == 0:
self.diagnostics.loc[self.iter, 'Relative Improvement Iterand'] = np.infty
else:
self.diagnostics.loc[self.iter, 'Relative Improvement Iterand'] = np.linalg.norm(
self.old_iterand['iterand'] - self.iterand['iterand']) / np.linalg.norm(
self.old_iterand['iterand'])
self.diagnostics.loc[self.iter, 'Dual Certificate Value'] = self.dual_certificate_value # before iteration
self.diagnostics.loc[self.iter, 'Objective Function'] = self.objective_functional(self.iterand['iterand'])
if self.iter == 0:
self.diagnostics.loc[self.iter, 'Relative Improvement Objective'] = np.infty
else:
self.diagnostics.loc[self.iter, 'Relative Improvement Objective'] = (self.diagnostics.loc[
self.iter - 1,
'Objective Function'] -
self.diagnostics.loc[
self.iter,
'Objective Function']) / \
self.diagnostics.loc[
self.iter - 1,
'Objective Function']
if self.remember_iterand:
self.iterand_history.append(self.iterand['iterand'])
def print_diagnostics(self):
print(dict(self.diagnostics.loc[self.iter]))
def stopping_metric(self):
if self.iter == 0:
return np.infty
elif self.stopping_strategy == 'relative_improvement':
return abs(self.diagnostics.loc[self.iter - 1, 'Relative Improvement Objective'])
elif self.stopping_strategy == 'certificate':
value = self.diagnostics.loc[self.iter - 1, 'Dual Certificate Value']
return abs(abs(value) - 1)
else:
raise ValueError('Stopping strategy must be in ["relative_improvement", "certificate"]')
def restricted_support_lasso(self, active_indices: np.ndarray, accuracy: float, x0: np.ndarray = None, d: float = 75.):
if x0 is None:
x0 = np.zeros(active_indices.shape)
injection = pl.sampling.SubSampling(self.dim, active_indices, dtype=float).get_adjointOp()
restricted_forward = pl.DenseLinearOperator(
self.forwardOp.mat[:, active_indices])
restricted_forward.compute_lipschitz_cst(tol=1e-3)
restricted_data_fidelity = (1 / 2) * SquaredL2Loss(dim=restricted_forward.shape[0], data=self.data) \
* restricted_forward
# restricted_data_fidelity.lipschitz_cst = self.data_fidelity.lipschitz_cst
# restricted_data_fidelity.diff_lipschitz_cst = self.data_fidelity.diff_lipschitz_cst
restricted_regularization = self.lambda_ * L1Norm(dim=restricted_data_fidelity.shape[1])
if self.reweighting == 'fista':
acceleration = 'CD'
tau = None
elif self.reweighting == 'ista':
tau = 1.9 / restricted_data_fidelity.diff_lipschitz_cst
acceleration = None
else:
raise ValueError('Reweighting strategy must be in ["fista", "ista"]')
solver = APGD(dim=restricted_data_fidelity.shape[1], F=restricted_data_fidelity,
G=restricted_regularization, x0=x0, tau=tau,
acceleration=acceleration, verbose=None, accuracy_threshold=accuracy, d=d, max_iter=2000,
min_iter=1)
return injection(solver.iterate()[0]['iterand'])
class VanillaFWSolverForLasso(GenericFWSolverForLasso):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remember_iterand: bool = False, step_size: str = 'optimal', t_max: float = None):
if step_size in ['optimal', 'regular']:
self.step_size = step_size
else:
raise ValueError("Step size strategy must be in ['optimal', 'regular']")
super(VanillaFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_, lambda_factor=lambda_factor,
min_iter=min_iter, max_iter=max_iter,
stopping_strategy=stopping_strategy,
accuracy_threshold=accuracy_threshold, verbose=verbose,
remember_iterand=remember_iterand, multi_spikes=False, t_max=t_max)
def combine_new_impulse(self) -> Any:
iterand = deepcopy(self.old_iterand['iterand'])
if self.new_ind is not None:
new_positions = np.hstack([self.old_iterand['positions'], self.new_ind])
if self.step_size == 'optimal':
gamma = np.dot(self.data_fidelity.gradient(iterand), iterand) + self.lambda_ * (
1. * np.linalg.norm(iterand, 1) + (np.abs(self.dual_certificate_value) - 1.) * self.bound)
gamma /= np.linalg.norm(self.forwardOp.mat[:, self.new_ind] * self.bound * np.sign(
self.dual_certificate_value) - self.forwardOp @ iterand, 2) ** 2
else:
gamma = 2/(self.iter + 3)
else:
new_positions = self.old_iterand['positions']
if self.step_size == 'optimal':
gamma = np.dot(self.data_fidelity.gradient(iterand), iterand) + self.lambda_ * np.linalg.norm(iterand, 1)
gamma /= np.linalg.norm(self.forwardOp @ iterand, 2) ** 2
else:
gamma = 2/(self.iter + 3)
if not 0 < gamma < 1:
gamma = np.clip(gamma, 0., 1.)
iterand *= (1 - gamma)
if self.new_ind is not None:
iterand[self.new_ind] += gamma * np.sign(self.dual_certificate_value) * self.bound
return {'iterand': iterand, 'positions': new_positions}
class FullyCorrectiveFWSolverForLasso(VanillaFWSolverForLasso):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remember_iterand: bool = False, remove_positions: bool = False, reweighting_prec: float = 1e-4,
reweighting: str = 'fista', t_max: float = None):
self.remove_positions = remove_positions
self.reweighting_prec = reweighting_prec
super(FullyCorrectiveFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_,
lambda_factor=lambda_factor,
min_iter=min_iter, max_iter=max_iter,
stopping_strategy=stopping_strategy,
accuracy_threshold=accuracy_threshold, verbose=verbose,
remember_iterand=remember_iterand, t_max=t_max)
self.reweighting = reweighting
self.last_weight = self.bound
def combine_new_impulse(self) -> Any:
iterand = deepcopy(self.old_iterand['iterand'])
if self.new_ind is not None:
new_positions = np.unique(np.hstack([self.old_iterand['positions'], self.new_ind]))
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(np.hstack([iterand.nonzero()[0], self.new_ind]))
else:
active_indices = new_positions
else:
new_positions = self.old_iterand['positions']
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(iterand.nonzero()[0])
else:
active_indices = new_positions
if active_indices.shape[0] > 1:
iterand[self.new_ind] = np.sign(self.dual_certificate_value) * self.last_weight
x0 = iterand[active_indices]
iterand = self.restricted_support_lasso(active_indices, self.reweighting_prec, x0=x0)
if self.new_ind is not None:
self.last_weight = iterand[self.new_ind]
else:
tmp = np.zeros(self.dim)
tmp[active_indices] = 1.
column = self.forwardOp(tmp)
iterand[active_indices] = np.dot(self.data, column) / (np.linalg.norm(column, 2) ** 2)
self.last_weight = iterand[active_indices]
overvalue = np.abs(iterand) > self.bound
if overvalue.sum() > 0:
print("Overvalue at coordinates {}".format(np.arange(overvalue.shape[0])[overvalue]))
iterand[overvalue] = np.sign(iterand[overvalue]) * self.bound
return {'iterand': iterand, 'positions': new_positions}
class PolyatomicFWSolverForLasso(GenericFWSolverForLasso):
def __init__(self, data: np.ndarray, forwardOp: pcore.linop.LinearOperator, lambda_: Optional[float] = None,
lambda_factor: Optional[float] = 0.1, min_iter: int = 10, max_iter: int = 500,
stopping_strategy: str = 'certificate', accuracy_threshold: float = 1e-4, verbose: Optional[int] = 10,
remove_positions: bool = False, remember_iterand: bool = False, final_reweighting_prec: float = 1e-4,
init_reweighting_prec: float = .2, decreasing: bool = False, multi_spikes_threshold: float = .7, t_max: float = None):
self.remove_positions = remove_positions
self.reweighting_prec = init_reweighting_prec
self.init_reweighting_prec = init_reweighting_prec
self.decreasing = decreasing
self.final_reweighting_prec = final_reweighting_prec
super(PolyatomicFWSolverForLasso, self).__init__(data, forwardOp, lambda_=lambda_,
lambda_factor=lambda_factor,
min_iter=min_iter, max_iter=max_iter,
stopping_strategy=stopping_strategy,
accuracy_threshold=accuracy_threshold,
verbose=verbose,
remember_iterand=remember_iterand,
multi_spikes=True,
multi_spikes_threshold=multi_spikes_threshold,
reweighting='ista', t_max=t_max)
def combine_new_impulse(self):
iterand = deepcopy(self.old_iterand['iterand'])
if self.new_ind is not None:
new_positions = np.unique(np.hstack([self.old_iterand['positions'], self.new_ind]))
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(np.hstack([iterand.nonzero()[0], self.new_ind]))
else:
active_indices = new_positions
else:
new_positions = self.old_iterand['positions']
if self.iter > 0 and self.remove_positions:
active_indices = np.unique(iterand.nonzero()[0])
else:
active_indices = new_positions
if active_indices.shape[0] > 1:
x0 = iterand[active_indices]
iterand = self.restricted_support_lasso(active_indices, self.reweighting_prec, x0=x0)
else:
tmp = np.zeros(self.dim)
tmp[active_indices] = 1.
column = self.forwardOp(tmp)
iterand[active_indices] = np.dot(self.data, column) / (np.linalg.norm(column, 2) ** 2)
overvalue = np.abs(iterand) > self.bound
if overvalue.sum() > 0: #Sanity check, never been triggered in practice
print("Overvalue at coordinates {}".format(np.arange(overvalue.shape[0])[overvalue]))
iterand[overvalue] = np.sign(iterand[overvalue]) * self.bound
if self.decreasing:
self.reweighting_prec = self.init_reweighting_prec / (self.iter + 1)
self.reweighting_prec = max(self.reweighting_prec, self.final_reweighting_prec)
return {'iterand': iterand, 'positions': new_positions} | en | 0.527427 | # print("Threshold: {} / {}".format(threshold, maxi)) # print('Candidate indices: {}\n'.format(indices.shape)) # already present position Dual ceritificate value is computed after iteration Returns ------- # before iteration # restricted_data_fidelity.lipschitz_cst = self.data_fidelity.lipschitz_cst # restricted_data_fidelity.diff_lipschitz_cst = self.data_fidelity.diff_lipschitz_cst #Sanity check, never been triggered in practice | 2.077783 | 2 |
lib/rdflib-3.1.0/test/test_trix_serialize.py | suzuken/xbrlparser | 3 | 675 | #!/usr/bin/env python
import unittest
from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.graph import Graph
class TestTrixSerialize(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSerialize(self):
s1 = URIRef('store:1')
r1 = URIRef('resource:1')
r2 = URIRef('resource:2')
label = URIRef('predicate:label')
g1 = Graph(identifier = s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
s2 = URIRef('store:2')
g2 = Graph(identifier = s2)
g2.add((r2, label, Literal("label 3")))
g = ConjunctiveGraph()
for s,p,o in g1.triples((None, None, None)):
g.addN([(s,p,o,g1)])
for s,p,o in g2.triples((None, None, None)):
g.addN([(s,p,o,g2)])
r3 = URIRef('resource:3')
g.add((r3, label, Literal(4)))
r = g.serialize(format='trix')
g3 = ConjunctiveGraph()
from StringIO import StringIO
g3.parse(StringIO(r), format='trix')
for q in g3.quads((None,None,None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
if isinstance(q[3].identifier, URIRef):
tg=Graph(store=g.store, identifier=q[3].identifier)
else:
# BNode, this is a bit ugly
# we cannot match the bnode to the right graph automagically
# here I know there is only one anonymous graph,
# and that is the default one, but this is not always the case
tg=g.default_context
self.assertTrue(q[0:3] in tg)
if __name__=='__main__':
unittest.main()
| #!/usr/bin/env python
import unittest
from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.graph import Graph
class TestTrixSerialize(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSerialize(self):
s1 = URIRef('store:1')
r1 = URIRef('resource:1')
r2 = URIRef('resource:2')
label = URIRef('predicate:label')
g1 = Graph(identifier = s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
s2 = URIRef('store:2')
g2 = Graph(identifier = s2)
g2.add((r2, label, Literal("label 3")))
g = ConjunctiveGraph()
for s,p,o in g1.triples((None, None, None)):
g.addN([(s,p,o,g1)])
for s,p,o in g2.triples((None, None, None)):
g.addN([(s,p,o,g2)])
r3 = URIRef('resource:3')
g.add((r3, label, Literal(4)))
r = g.serialize(format='trix')
g3 = ConjunctiveGraph()
from StringIO import StringIO
g3.parse(StringIO(r), format='trix')
for q in g3.quads((None,None,None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
if isinstance(q[3].identifier, URIRef):
tg=Graph(store=g.store, identifier=q[3].identifier)
else:
# BNode, this is a bit ugly
# we cannot match the bnode to the right graph automagically
# here I know there is only one anonymous graph,
# and that is the default one, but this is not always the case
tg=g.default_context
self.assertTrue(q[0:3] in tg)
if __name__=='__main__':
unittest.main()
| en | 0.88715 | #!/usr/bin/env python # TODO: Fix once getGraph/getContext is in conjunctive graph # BNode, this is a bit ugly # we cannot match the bnode to the right graph automagically # here I know there is only one anonymous graph, # and that is the default one, but this is not always the case | 2.414291 | 2 |
dbschema/revertDBinstall.py | leschzinerlab/myami-3.2-freeHand | 0 | 676 | #!/usr/bin/env python
from sinedon import dbupgrade, dbconfig
import updatelib
project_dbupgrade = dbupgrade.DBUpgradeTools('projectdata', drop=True)
if __name__ == "__main__":
updatelib_inst = updatelib.UpdateLib(project_dbupgrade)
checkout_version = raw_input('Revert to checkout version, for example, 2.1 -->')
if checkout_version != 'trunk':
try:
map((lambda x:int(x)),checkout_version.split('.')[:2])
except:
print "valid versions are 'trunk', '2.1', or '2.1.2' etc"
raise
checkout_revision = int(raw_input('Revert to checkout revision, for example, 16500 -->'))
updatelib_inst.updateDatabaseVersion(checkout_version)
print "\033[35mVersion Updated in the database %s\033[0m" % checkout_version
updatelib_inst.updateDatabaseRevision(checkout_revision)
print "\033[35mRevision Updated in the database as %d\033[0m" % checkout_revision
| #!/usr/bin/env python
from sinedon import dbupgrade, dbconfig
import updatelib
project_dbupgrade = dbupgrade.DBUpgradeTools('projectdata', drop=True)
if __name__ == "__main__":
updatelib_inst = updatelib.UpdateLib(project_dbupgrade)
checkout_version = raw_input('Revert to checkout version, for example, 2.1 -->')
if checkout_version != 'trunk':
try:
map((lambda x:int(x)),checkout_version.split('.')[:2])
except:
print "valid versions are 'trunk', '2.1', or '2.1.2' etc"
raise
checkout_revision = int(raw_input('Revert to checkout revision, for example, 16500 -->'))
updatelib_inst.updateDatabaseVersion(checkout_version)
print "\033[35mVersion Updated in the database %s\033[0m" % checkout_version
updatelib_inst.updateDatabaseRevision(checkout_revision)
print "\033[35mRevision Updated in the database as %d\033[0m" % checkout_revision
| ru | 0.26433 | #!/usr/bin/env python | 2.47407 | 2 |
fightchurn/listings/chap9/listing_9_4_regression_cparam.py | guy4261/fight-churn | 151 | 677 | from sklearn.linear_model import LogisticRegression
from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model
from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions
def regression_cparam(data_set_path, C_param):
X,y = prepare_data(data_set_path)
retain_reg = LogisticRegression( C=C_param, penalty='l1', solver='liblinear', fit_intercept=True)
retain_reg.fit(X, y)
c_ext = '_c{:.3f}'.format(C_param)
save_regression_summary(data_set_path,retain_reg,ext=c_ext)
save_regression_model(data_set_path,retain_reg,ext=c_ext)
save_dataset_predictions(data_set_path,retain_reg,X,ext=c_ext)
| from sklearn.linear_model import LogisticRegression
from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model
from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions
def regression_cparam(data_set_path, C_param):
X,y = prepare_data(data_set_path)
retain_reg = LogisticRegression( C=C_param, penalty='l1', solver='liblinear', fit_intercept=True)
retain_reg.fit(X, y)
c_ext = '_c{:.3f}'.format(C_param)
save_regression_summary(data_set_path,retain_reg,ext=c_ext)
save_regression_model(data_set_path,retain_reg,ext=c_ext)
save_dataset_predictions(data_set_path,retain_reg,X,ext=c_ext)
| none | 1 | 2.143035 | 2 |
|
notebooks/classical_clustering.py | prise6/smart-iss-posts | 0 | 678 | #%% [markdown]
# # Clustering classique
#%% [markdown]
# ## import classique
import os
#%%
%load_ext autoreload
%autoreload 2
os.chdir('/home/jovyan/work')
#%% [markdown]
# ## Import iss
#%%
from iss.tools import Config
from iss.tools import Tools
from iss.models import SimpleConvAutoEncoder
from iss.clustering import ClassicalClustering
from iss.clustering import AdvancedClustering
from dotenv import find_dotenv, load_dotenv
import numpy as np
#%% [markdown]
# ## Chargement de la config
#%%
load_dotenv(find_dotenv())
cfg = Config(project_dir = os.getenv("PROJECT_DIR"), mode = os.getenv("MODE"))
#%% [markdown]
# ## Chargement du modèle
#%%
## charger le modèle
model_type = 'simple_conv'
cfg.get('models')[model_type]['model_name'] = 'model_colab'
model = SimpleConvAutoEncoder(cfg.get('models')[model_type])
#%% [markdown]
## Chargement des images
#%%
filenames = Tools.list_directory_filenames('data/processed/models/autoencoder/train/k/')
generator_imgs = Tools.generator_np_picture_from_filenames(filenames, target_size = (27, 48), batch = 496, nb_batch = 10)
#%%
pictures_id, pictures_preds = Tools.encoded_pictures_from_generator(generator_imgs, model)
#%%
intermediate_output = pictures_preds.reshape((pictures_preds.shape[0], 3*6*16))
#%% [markdown]
# ## ACP
# Réduction de la dimension
#%%
clustering = ClassicalClustering(cfg.get('clustering')['classical'], pictures_id, intermediate_output)
#%%
clustering.compute_pca()
#%% [markdown]
# ## Kmeans
# Premiers clusters
#%%
clustering.compute_kmeans()
clustering.compute_kmeans_centers()
#%% [markdown]
# ## CAH
# Seconds clusters
#%%
clustering.compute_cah()
clustering.compute_cah_labels()
#%% [markdown]
# ## Résultats
#%% [markdown]
# ### Clusters intermediaires
#%%
fig = plt.figure(1, figsize=(12, 7))
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.kmeans_labels)
#%% [markdown]
# ### Clusters finaux
#%%
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.final_labels)
#%% [markdown]
# ### Sauvegarde des modèles
#%%
clustering.save()
#%%
# clustering = ClassicalClustering(cfg.get('clustering')['classical'])
clustering.load()
#%% [markdown]
# ## Visualisation des clusters
#%%
def select_cluster(clustering, id_cluster):
return [os.path.join('data/processed/models/autoencoder/train/k/', res[0] + '.jpg') for res in clustering.get_zip_results() if res[2] == id_cluster]
#%%
from IPython.display import Image
#%%
for cl in range(0,19):
print("Cluster %s" % (cl))
res_tmp = select_cluster(clustering, cl)
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp[:100]]
# img = Tools.display_mosaic(image_array, nrow = 10)
# fig = plt.figure(1, figsize=(12, 7))
# plt.imshow(img, aspect = 'auto')
# plt.show()
#%% [markdown]
# ## Zoom sur le cluster 0
#%%
res_tmp = select_cluster(clustering, 1)
#%%
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp]
#%%
Tools.display_mosaic(image_array, nrow = 18)
#%%
col = [1 if l == 1 else 0 for l in clustering.kmeans_labels]
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = col)
#%%
plt.scatter(clustering.pca_reduction[np.array(col) == 1, 0], clustering.pca_reduction[np.array(col) == 1, 1])
| #%% [markdown]
# # Clustering classique
#%% [markdown]
# ## import classique
import os
#%%
%load_ext autoreload
%autoreload 2
os.chdir('/home/jovyan/work')
#%% [markdown]
# ## Import iss
#%%
from iss.tools import Config
from iss.tools import Tools
from iss.models import SimpleConvAutoEncoder
from iss.clustering import ClassicalClustering
from iss.clustering import AdvancedClustering
from dotenv import find_dotenv, load_dotenv
import numpy as np
#%% [markdown]
# ## Chargement de la config
#%%
load_dotenv(find_dotenv())
cfg = Config(project_dir = os.getenv("PROJECT_DIR"), mode = os.getenv("MODE"))
#%% [markdown]
# ## Chargement du modèle
#%%
## charger le modèle
model_type = 'simple_conv'
cfg.get('models')[model_type]['model_name'] = 'model_colab'
model = SimpleConvAutoEncoder(cfg.get('models')[model_type])
#%% [markdown]
## Chargement des images
#%%
filenames = Tools.list_directory_filenames('data/processed/models/autoencoder/train/k/')
generator_imgs = Tools.generator_np_picture_from_filenames(filenames, target_size = (27, 48), batch = 496, nb_batch = 10)
#%%
pictures_id, pictures_preds = Tools.encoded_pictures_from_generator(generator_imgs, model)
#%%
intermediate_output = pictures_preds.reshape((pictures_preds.shape[0], 3*6*16))
#%% [markdown]
# ## ACP
# Réduction de la dimension
#%%
clustering = ClassicalClustering(cfg.get('clustering')['classical'], pictures_id, intermediate_output)
#%%
clustering.compute_pca()
#%% [markdown]
# ## Kmeans
# Premiers clusters
#%%
clustering.compute_kmeans()
clustering.compute_kmeans_centers()
#%% [markdown]
# ## CAH
# Seconds clusters
#%%
clustering.compute_cah()
clustering.compute_cah_labels()
#%% [markdown]
# ## Résultats
#%% [markdown]
# ### Clusters intermediaires
#%%
fig = plt.figure(1, figsize=(12, 7))
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.kmeans_labels)
#%% [markdown]
# ### Clusters finaux
#%%
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.final_labels)
#%% [markdown]
# ### Sauvegarde des modèles
#%%
clustering.save()
#%%
# clustering = ClassicalClustering(cfg.get('clustering')['classical'])
clustering.load()
#%% [markdown]
# ## Visualisation des clusters
#%%
def select_cluster(clustering, id_cluster):
return [os.path.join('data/processed/models/autoencoder/train/k/', res[0] + '.jpg') for res in clustering.get_zip_results() if res[2] == id_cluster]
#%%
from IPython.display import Image
#%%
for cl in range(0,19):
print("Cluster %s" % (cl))
res_tmp = select_cluster(clustering, cl)
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp[:100]]
# img = Tools.display_mosaic(image_array, nrow = 10)
# fig = plt.figure(1, figsize=(12, 7))
# plt.imshow(img, aspect = 'auto')
# plt.show()
#%% [markdown]
# ## Zoom sur le cluster 0
#%%
res_tmp = select_cluster(clustering, 1)
#%%
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp]
#%%
Tools.display_mosaic(image_array, nrow = 18)
#%%
col = [1 if l == 1 else 0 for l in clustering.kmeans_labels]
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = col)
#%%
plt.scatter(clustering.pca_reduction[np.array(col) == 1, 0], clustering.pca_reduction[np.array(col) == 1, 1])
| zh | 0.260204 | #%% [markdown] # # Clustering classique #%% [markdown] # ## import classique #%% #%% [markdown] # ## Import iss #%% #%% [markdown] # ## Chargement de la config #%% #%% [markdown] # ## Chargement du modèle #%% ## charger le modèle #%% [markdown] ## Chargement des images #%% #%% #%% #%% [markdown] # ## ACP # Réduction de la dimension #%% #%% #%% [markdown] # ## Kmeans # Premiers clusters #%% #%% [markdown] # ## CAH # Seconds clusters #%% #%% [markdown] # ## Résultats #%% [markdown] # ### Clusters intermediaires #%% #%% [markdown] # ### Clusters finaux #%% #%% [markdown] # ### Sauvegarde des modèles #%% #%% # clustering = ClassicalClustering(cfg.get('clustering')['classical']) #%% [markdown] # ## Visualisation des clusters #%% #%% #%% # img = Tools.display_mosaic(image_array, nrow = 10) # fig = plt.figure(1, figsize=(12, 7)) # plt.imshow(img, aspect = 'auto') # plt.show() #%% [markdown] # ## Zoom sur le cluster 0 #%% #%% #%% #%% #%% | 2.018637 | 2 |
SM_28BYJ48/logger/logger.py | kaulketh/stepper-motor-stuff | 0 | 679 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------
# created 02.02.2021, tkaulke
# <NAME>, <EMAIL>
# https://github.com/kaulketh
# -----------------------------------------------------------
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import errno
import logging
import os
from logging.config import fileConfig
# runtime location
this_folder = os.path.dirname(os.path.abspath(__file__))
# define log folder related to location
log_folder = os.path.join(this_folder, '../logs')
# define ini and log files
ini_file = 'debug.ini'
info_log_file = log_folder + '/info.log'
error_log_file = log_folder + '/error.log'
# check if exists or create log folder
try:
os.makedirs(log_folder, exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs(log_folder)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(log_folder):
pass
else:
raise
# setup configuration
config_file = os.path.join(this_folder, ini_file)
fileConfig(config_file, disable_existing_loggers=True)
# create handlers
handler_info = logging.FileHandler(os.path.join(this_folder, info_log_file))
handler_error = logging.FileHandler(os.path.join(this_folder, error_log_file))
# set levels
handler_info.setLevel(logging.INFO)
handler_error.setLevel(logging.ERROR)
# create formatters and add to handlers
format_info = \
logging.Formatter('%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'%(message).180s', datefmt='%Y-%m-%d %H:%M:%S')
format_error = \
logging.Formatter(
'%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'[ thread: %(threadName)s ] %(message)s')
handler_info.setFormatter(format_info)
handler_error.setFormatter(format_error)
def get_logger(name: str = __name__):
logger = logging.getLogger(name)
# add handler
logger.addHandler(handler_info)
logger.addHandler(handler_error)
return logger
if __name__ == '__main__':
pass
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------
# created 02.02.2021, tkaulke
# <NAME>, <EMAIL>
# https://github.com/kaulketh
# -----------------------------------------------------------
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import errno
import logging
import os
from logging.config import fileConfig
# runtime location
this_folder = os.path.dirname(os.path.abspath(__file__))
# define log folder related to location
log_folder = os.path.join(this_folder, '../logs')
# define ini and log files
ini_file = 'debug.ini'
info_log_file = log_folder + '/info.log'
error_log_file = log_folder + '/error.log'
# check if exists or create log folder
try:
os.makedirs(log_folder, exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs(log_folder)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(log_folder):
pass
else:
raise
# setup configuration
config_file = os.path.join(this_folder, ini_file)
fileConfig(config_file, disable_existing_loggers=True)
# create handlers
handler_info = logging.FileHandler(os.path.join(this_folder, info_log_file))
handler_error = logging.FileHandler(os.path.join(this_folder, error_log_file))
# set levels
handler_info.setLevel(logging.INFO)
handler_error.setLevel(logging.ERROR)
# create formatters and add to handlers
format_info = \
logging.Formatter('%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'%(message).180s', datefmt='%Y-%m-%d %H:%M:%S')
format_error = \
logging.Formatter(
'%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'[ thread: %(threadName)s ] %(message)s')
handler_info.setFormatter(format_info)
handler_error.setFormatter(format_error)
def get_logger(name: str = __name__):
logger = logging.getLogger(name)
# add handler
logger.addHandler(handler_info)
logger.addHandler(handler_error)
return logger
if __name__ == '__main__':
pass
| en | 0.35187 | #!/usr/bin/python3 # -*- coding: utf-8 -*- # ----------------------------------------------------------- # created 02.02.2021, tkaulke # <NAME>, <EMAIL> # https://github.com/kaulketh # ----------------------------------------------------------- # runtime location # define log folder related to location # define ini and log files # check if exists or create log folder # Python>3.2 # Python >2.5 # setup configuration # create handlers # set levels # create formatters and add to handlers # add handler | 2.50964 | 3 |
tests/test_mr_uplift.py | Ibotta/mr_uplift | 48 | 680 | <gh_stars>10-100
import numpy as np
import pytest
from mr_uplift.dataset.data_simulation import get_no_noise_data, get_simple_uplift_data, get_observational_uplift_data_1
from mr_uplift.mr_uplift import MRUplift, get_t_data
from mr_uplift.keras_model_functionality import prepare_data_optimized_loss
import sys
import pandas as pd
class TestMRUplift(object):
def test_get_t_data(self):
num_obs_1 = 10
num_obs_2 = 3
test_1 = get_t_data(0, num_obs_1)
test_2 = get_t_data(np.array([0, 1]), num_obs_2)
test_1_values = np.zeros(num_obs_1).reshape(-1, 1)
test_2_values = np.concatenate([np.zeros(num_obs_2).reshape(-1, 1),
np.ones(num_obs_2).reshape(-1, 1)], axis=1)
assert np.mean(test_1 == test_1_values) == 1
assert np.mean(test_2 == test_2_values) == 1
def test_model_mean_outputs(self):
true_ATE = np.array([[0, 0], [1, .5]])
rmse_tolerance = .05
num_obs = 10000
y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1)
oos_ice = uplift_model.predict_ice(response_transformer = True)
assert np.sqrt(np.mean((oos_ice.mean(axis=1) -true_ATE)**2)) < rmse_tolerance
def test_model_pred_oos_shapes(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = np.concatenate([t.reshape(-1, 1),
np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=[
'relu'], num_layers=[1], epochs=[1], batch_size=[1000])
uplift_model = MRUplift()
uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1)
x_1 = x.copy()
x_1 = pd.DataFrame(x_1)
x_1.columns = ['var_'+str(x) for x in range(x.shape[1])]
y_1 = y.copy()
y_1 = pd.DataFrame(y_1)
y_1.columns = ['var_'+str(x) for x in range(y.shape[1])]
uplift_model_named = MRUplift()
uplift_model_named.fit(x_1, y_1, t, param_grid = param_grid, n_jobs=1)
assert uplift_model.predict_ice().shape == (
np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1])
assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0],
num_obs,
y.shape[1])
assert uplift_model.get_erupt_curves()
assert uplift_model.get_erupt_curves(x = x, y = y, t = t)
assert uplift_model_named.get_erupt_curves()
def test_model_pred_oos_shapes_single_col_tmt(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = t.reshape(-1, 1)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=[
'relu'], num_layers=[1], epochs=[1], batch_size=[1000])
uplift_model = MRUplift()
uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1)
assert uplift_model.predict_ice().shape == (
np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1])
assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0],
num_obs,
y.shape[1])
assert uplift_model.get_erupt_curves()
assert uplift_model.get_erupt_curves(x = x, y = y, t = t)
def test_model_pred_oos_shapes_single_col_tmt_propensity(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = t.reshape(-1, 1)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2],
epochs=[1], batch_size=[100],
alpha = [.5], copy_several_times = [1])
uplift_model = MRUplift()
uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1,
optimized_loss = True, use_propensity = True)
assert uplift_model.predict_ice().shape == (
np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1])
assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0],
num_obs,
y.shape[1])
assert uplift_model.get_erupt_curves()
assert uplift_model.get_erupt_curves(x = x, y = y, t = t)
def test_prepare_data_optimized_loss_one_col_tmt(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = t.reshape(len(t),1)
unique_treatments = np.unique(t, axis = 0)
masks = np.ones(num_obs).reshape(num_obs,1)
x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t, masks ,unique_treatments)
assert(utility_weights.shape == (num_obs, y.shape[1]))
assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1]))
for q in range(unique_treatments.shape[0]):
assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 )
def test_prepare_data_optimized_loss_two_col_tmt(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = np.concatenate([t.reshape(-1, 1),
np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1)
unique_treatments = np.unique(t, axis = 0)
masks = np.ones(num_obs*len(unique_treatments)).reshape(num_obs,len(unique_treatments))
x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t,masks, unique_treatments)
assert(utility_weights.shape == (num_obs, y.shape[1]))
assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1]))
for q in range(unique_treatments.shape[0]):
assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 )
def test_model_optim_mean_outputs(self):
true_ATE = np.array([[0, 0], [1, .5]])
rmse_tolerance = .05
num_obs = 10000
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100])
y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1, param_grid = param_grid, optimized_loss = False)
oos_ice = uplift_model.predict_ice(response_transformer = True)
assert np.sqrt(np.mean((oos_ice.mean(axis=1) - true_ATE)**2)) < rmse_tolerance
def test_model_get_random_erupts(self):
true_ATE = np.array([[0, 0], [1, .5]])
rmse_tolerance = .05
num_obs = 10000
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100],
alpha = [.5], copy_several_times = [2])
y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1, param_grid = param_grid, optimized_loss = True)
oos_re = uplift_model.get_random_erupts()
uplift_model_propensity = MRUplift()
uplift_model_propensity.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1, param_grid = param_grid,
optimized_loss = True, use_propensity = True)
oos_re_propensity = uplift_model_propensity.get_random_erupts()
assert oos_re['mean'].iloc[0] > 0
assert oos_re_propensity['mean'].iloc[0] > 0
def test_varimp(self):
num_obs = 10000
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100])
y, x, t = get_simple_uplift_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x, y, t.reshape(-1, 1),
n_jobs=1, param_grid = param_grid)
varimp = uplift_model.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1))
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100],
alpha = [.5], copy_several_times = [2])
uplift_model_propensity = MRUplift()
uplift_model_propensity.fit(x, y, t.reshape(-1, 1),
n_jobs=1, param_grid = param_grid,
optimized_loss = True, use_propensity = True)
varimp_propensity = uplift_model_propensity.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1))
assert varimp['permutation_varimp_metric'].iloc[0]>varimp['permutation_varimp_metric'].iloc[1]
assert varimp_propensity['permutation_varimp_metric'].iloc[0]>varimp_propensity['permutation_varimp_metric'].iloc[1]
def test_model_propensity(self):
num_obs = 10000
TOLERANCE = .98
y, x, t, rule_assignment = get_observational_uplift_data_1(num_obs)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[1],
epochs=[20], batch_size=[512],
alpha = [.9999,.99], copy_several_times = [1])
uplift_model = MRUplift()
uplift_model.fit(x, y[:,0].reshape(-1,1), t, param_grid = param_grid, n_jobs=1,
optimized_loss = True, use_propensity = True, test_size = 0)
uplift_model.best_params_net
y_test, x_test, t_test, rule_assignment_test = get_observational_uplift_data_1(num_obs)
experiment_groups = np.zeros(num_obs)+2
experiment_groups[np.where(x_test[:,-2]<.5)[0]] = 1
experiment_groups[np.where(x_test[:,-2]<.33)[0]] = 0
experiment_groups[np.where(x_test[:,-1]>.8)[0]] = 3
optim_treatments_no_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = False)
optim_treatments_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = True)
optim_treatments_cuttoff_cat = optim_treatments_cuttoff.argmax(axis = 1)
optim_treatments_no_cuttoff_cat = optim_treatments_no_cuttoff.argmax(axis = 1)
correct_tmts_1 = np.array([x in [0,1] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 0)[0]] ]).mean()
correct_tmts_2 = np.array([x in [1,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] ]).mean()
correct_tmts_3 = np.array([x in [0,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 2)[0]] ]).mean()
correct_tmts_4 = np.array([x in [0] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 3)[0]] ]).mean()
correct_tmts_experiment_groups_1 = ((optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] == 1) == x_test[np.where(experiment_groups == 1)[0],0]).mean()
correct_tmts_no_cutoff = np.mean((optim_treatments_no_cuttoff_cat==1 ) == x_test[:,0])
assert correct_tmts_1>TOLERANCE
assert correct_tmts_2>TOLERANCE
assert correct_tmts_3>TOLERANCE
assert correct_tmts_4>TOLERANCE
assert correct_tmts_experiment_groups_1>TOLERANCE
assert np.array_equal(optim_treatments_cuttoff_cat,optim_treatments_no_cuttoff_cat) is False
assert correct_tmts_no_cutoff>TOLERANCE
| import numpy as np
import pytest
from mr_uplift.dataset.data_simulation import get_no_noise_data, get_simple_uplift_data, get_observational_uplift_data_1
from mr_uplift.mr_uplift import MRUplift, get_t_data
from mr_uplift.keras_model_functionality import prepare_data_optimized_loss
import sys
import pandas as pd
class TestMRUplift(object):
def test_get_t_data(self):
num_obs_1 = 10
num_obs_2 = 3
test_1 = get_t_data(0, num_obs_1)
test_2 = get_t_data(np.array([0, 1]), num_obs_2)
test_1_values = np.zeros(num_obs_1).reshape(-1, 1)
test_2_values = np.concatenate([np.zeros(num_obs_2).reshape(-1, 1),
np.ones(num_obs_2).reshape(-1, 1)], axis=1)
assert np.mean(test_1 == test_1_values) == 1
assert np.mean(test_2 == test_2_values) == 1
def test_model_mean_outputs(self):
true_ATE = np.array([[0, 0], [1, .5]])
rmse_tolerance = .05
num_obs = 10000
y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1)
oos_ice = uplift_model.predict_ice(response_transformer = True)
assert np.sqrt(np.mean((oos_ice.mean(axis=1) -true_ATE)**2)) < rmse_tolerance
def test_model_pred_oos_shapes(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = np.concatenate([t.reshape(-1, 1),
np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=[
'relu'], num_layers=[1], epochs=[1], batch_size=[1000])
uplift_model = MRUplift()
uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1)
x_1 = x.copy()
x_1 = pd.DataFrame(x_1)
x_1.columns = ['var_'+str(x) for x in range(x.shape[1])]
y_1 = y.copy()
y_1 = pd.DataFrame(y_1)
y_1.columns = ['var_'+str(x) for x in range(y.shape[1])]
uplift_model_named = MRUplift()
uplift_model_named.fit(x_1, y_1, t, param_grid = param_grid, n_jobs=1)
assert uplift_model.predict_ice().shape == (
np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1])
assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0],
num_obs,
y.shape[1])
assert uplift_model.get_erupt_curves()
assert uplift_model.get_erupt_curves(x = x, y = y, t = t)
assert uplift_model_named.get_erupt_curves()
def test_model_pred_oos_shapes_single_col_tmt(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = t.reshape(-1, 1)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=[
'relu'], num_layers=[1], epochs=[1], batch_size=[1000])
uplift_model = MRUplift()
uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1)
assert uplift_model.predict_ice().shape == (
np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1])
assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0],
num_obs,
y.shape[1])
assert uplift_model.get_erupt_curves()
assert uplift_model.get_erupt_curves(x = x, y = y, t = t)
def test_model_pred_oos_shapes_single_col_tmt_propensity(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = t.reshape(-1, 1)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2],
epochs=[1], batch_size=[100],
alpha = [.5], copy_several_times = [1])
uplift_model = MRUplift()
uplift_model.fit(x, y, t, param_grid = param_grid, n_jobs=1,
optimized_loss = True, use_propensity = True)
assert uplift_model.predict_ice().shape == (
np.unique(t, axis=0).shape[0], num_obs * .7, y.shape[1])
assert uplift_model.predict_ice(x=x).shape == (np.unique(t,axis=0).shape[0],
num_obs,
y.shape[1])
assert uplift_model.get_erupt_curves()
assert uplift_model.get_erupt_curves(x = x, y = y, t = t)
def test_prepare_data_optimized_loss_one_col_tmt(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = t.reshape(len(t),1)
unique_treatments = np.unique(t, axis = 0)
masks = np.ones(num_obs).reshape(num_obs,1)
x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t, masks ,unique_treatments)
assert(utility_weights.shape == (num_obs, y.shape[1]))
assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1]))
for q in range(unique_treatments.shape[0]):
assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 )
def test_prepare_data_optimized_loss_two_col_tmt(self):
num_obs = 1000
y, x, t = get_simple_uplift_data(num_obs)
t = np.concatenate([t.reshape(-1, 1),
np.random.binomial(1, .5, num_obs).reshape(-1, 1)], axis=1)
unique_treatments = np.unique(t, axis = 0)
masks = np.ones(num_obs*len(unique_treatments)).reshape(num_obs,len(unique_treatments))
x, utility_weights, missing_utility, missing_y_mat, masks, weights = prepare_data_optimized_loss(x,y,t,masks, unique_treatments)
assert(utility_weights.shape == (num_obs, y.shape[1]))
assert(missing_y_mat.shape == (num_obs, unique_treatments.shape[0], y.shape[1]))
for q in range(unique_treatments.shape[0]):
assert( ((missing_utility[:,q]==0) == (missing_y_mat[:,q,0] == -999)).mean() ==1 )
def test_model_optim_mean_outputs(self):
true_ATE = np.array([[0, 0], [1, .5]])
rmse_tolerance = .05
num_obs = 10000
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100])
y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1, param_grid = param_grid, optimized_loss = False)
oos_ice = uplift_model.predict_ice(response_transformer = True)
assert np.sqrt(np.mean((oos_ice.mean(axis=1) - true_ATE)**2)) < rmse_tolerance
def test_model_get_random_erupts(self):
true_ATE = np.array([[0, 0], [1, .5]])
rmse_tolerance = .05
num_obs = 10000
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100],
alpha = [.5], copy_several_times = [2])
y_no_noise, x_no_noise, tmt_no_noise = get_no_noise_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1, param_grid = param_grid, optimized_loss = True)
oos_re = uplift_model.get_random_erupts()
uplift_model_propensity = MRUplift()
uplift_model_propensity.fit(x_no_noise, y_no_noise, tmt_no_noise.reshape(-1, 1),
n_jobs=1, param_grid = param_grid,
optimized_loss = True, use_propensity = True)
oos_re_propensity = uplift_model_propensity.get_random_erupts()
assert oos_re['mean'].iloc[0] > 0
assert oos_re_propensity['mean'].iloc[0] > 0
def test_varimp(self):
num_obs = 10000
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100])
y, x, t = get_simple_uplift_data(num_obs)
uplift_model = MRUplift()
uplift_model.fit(x, y, t.reshape(-1, 1),
n_jobs=1, param_grid = param_grid)
varimp = uplift_model.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1))
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[2], epochs=[30], batch_size=[100],
alpha = [.5], copy_several_times = [2])
uplift_model_propensity = MRUplift()
uplift_model_propensity.fit(x, y, t.reshape(-1, 1),
n_jobs=1, param_grid = param_grid,
optimized_loss = True, use_propensity = True)
varimp_propensity = uplift_model_propensity.permutation_varimp(objective_weights = np.array([.7,-.3,0]).reshape(1,-1))
assert varimp['permutation_varimp_metric'].iloc[0]>varimp['permutation_varimp_metric'].iloc[1]
assert varimp_propensity['permutation_varimp_metric'].iloc[0]>varimp_propensity['permutation_varimp_metric'].iloc[1]
def test_model_propensity(self):
num_obs = 10000
TOLERANCE = .98
y, x, t, rule_assignment = get_observational_uplift_data_1(num_obs)
param_grid = dict(num_nodes=[8], dropout=[.1], activation=['relu'], num_layers=[1],
epochs=[20], batch_size=[512],
alpha = [.9999,.99], copy_several_times = [1])
uplift_model = MRUplift()
uplift_model.fit(x, y[:,0].reshape(-1,1), t, param_grid = param_grid, n_jobs=1,
optimized_loss = True, use_propensity = True, test_size = 0)
uplift_model.best_params_net
y_test, x_test, t_test, rule_assignment_test = get_observational_uplift_data_1(num_obs)
experiment_groups = np.zeros(num_obs)+2
experiment_groups[np.where(x_test[:,-2]<.5)[0]] = 1
experiment_groups[np.where(x_test[:,-2]<.33)[0]] = 0
experiment_groups[np.where(x_test[:,-1]>.8)[0]] = 3
optim_treatments_no_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = False)
optim_treatments_cuttoff = uplift_model.predict_optimal_treatments(x = x_test, use_propensity_score_cutoff = True)
optim_treatments_cuttoff_cat = optim_treatments_cuttoff.argmax(axis = 1)
optim_treatments_no_cuttoff_cat = optim_treatments_no_cuttoff.argmax(axis = 1)
correct_tmts_1 = np.array([x in [0,1] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 0)[0]] ]).mean()
correct_tmts_2 = np.array([x in [1,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] ]).mean()
correct_tmts_3 = np.array([x in [0,2] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 2)[0]] ]).mean()
correct_tmts_4 = np.array([x in [0] for x in optim_treatments_cuttoff_cat[np.where(experiment_groups == 3)[0]] ]).mean()
correct_tmts_experiment_groups_1 = ((optim_treatments_cuttoff_cat[np.where(experiment_groups == 1)[0]] == 1) == x_test[np.where(experiment_groups == 1)[0],0]).mean()
correct_tmts_no_cutoff = np.mean((optim_treatments_no_cuttoff_cat==1 ) == x_test[:,0])
assert correct_tmts_1>TOLERANCE
assert correct_tmts_2>TOLERANCE
assert correct_tmts_3>TOLERANCE
assert correct_tmts_4>TOLERANCE
assert correct_tmts_experiment_groups_1>TOLERANCE
assert np.array_equal(optim_treatments_cuttoff_cat,optim_treatments_no_cuttoff_cat) is False
assert correct_tmts_no_cutoff>TOLERANCE | none | 1 | 2.22797 | 2 |
|
lambdataalchemani/lambda_test.py | Full-Data-Alchemist/lambdata-Mani-alch | 0 | 681 | """
"""
import unittest
from example_module import COLORS, increment
class ExampleTest(unittest.TestCase):
"""
#TODO
"""
def test_increment(self):
x0 = 0
y0 = increment(x0) #y0 == 1
self.assertEqual(y0, 1)
x1 = 100
y1 = increment(x1) #y1 == 101
self.assertEqual(y1, 101)
| """
"""
import unittest
from example_module import COLORS, increment
class ExampleTest(unittest.TestCase):
"""
#TODO
"""
def test_increment(self):
x0 = 0
y0 = increment(x0) #y0 == 1
self.assertEqual(y0, 1)
x1 = 100
y1 = increment(x1) #y1 == 101
self.assertEqual(y1, 101)
| en | 0.10984 | #TODO #y0 == 1 #y1 == 101 | 3.490497 | 3 |
desktop/core/src/desktop/auth/views.py | bopopescu/hue-5 | 1 | 682 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import oauth2 as oauth
except:
oauth = None
import cgi
import logging
import urllib
from datetime import datetime
from axes.decorators import watch_login
import django.contrib.auth.views
from django.core import urlresolvers
from django.core.exceptions import SuspiciousOperation
from django.contrib.auth import login, get_backends, authenticate
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from desktop.auth import forms as auth_forms
from desktop.lib.django_util import render
from desktop.lib.django_util import login_notrequired
from desktop.lib.django_util import JsonResponse
from desktop.log.access import access_warn, last_access_map
from desktop.conf import LDAP, OAUTH, DEMO_ENABLED
from hadoop.fs.exceptions import WebHdfsException
from useradmin.models import get_profile
from useradmin.views import ensure_home_directory, require_change_password
LOG = logging.getLogger(__name__)
def get_current_users():
"""Return dictionary of User objects and
a dictionary of the user's IP address and last access time"""
current_users = { }
for session in Session.objects.all():
try:
uid = session.get_decoded().get(django.contrib.auth.SESSION_KEY)
except SuspiciousOperation:
# If secret_key changed, this resolution won't work.
uid = None
if uid is not None:
try:
userobj = User.objects.get(pk=uid)
current_users[userobj] = last_access_map.get(userobj.username, { })
except User.DoesNotExist:
LOG.debug("User with id=%d does not exist" % uid)
return current_users
def first_login_ever():
backends = get_backends()
for backend in backends:
if hasattr(backend, 'is_first_login_ever') and backend.is_first_login_ever():
return True
return False
def get_backend_names():
return get_backends and [backend.__class__.__name__ for backend in get_backends()]
@login_notrequired
@watch_login
def dt_login(request, from_modal=False):
redirect_to = request.REQUEST.get('next', '/')
is_first_login_ever = first_login_ever()
backend_names = get_backend_names()
is_active_directory = 'LdapBackend' in backend_names and ( bool(LDAP.NT_DOMAIN.get()) or bool(LDAP.LDAP_SERVERS.get()) )
if is_active_directory:
UserCreationForm = auth_forms.LdapUserCreationForm
AuthenticationForm = auth_forms.LdapAuthenticationForm
else:
UserCreationForm = auth_forms.UserCreationForm
AuthenticationForm = auth_forms.AuthenticationForm
if request.method == 'POST':
request.audit = {
'operation': 'USER_LOGIN',
'username': request.POST.get('username')
}
# For first login, need to validate user info!
first_user_form = is_first_login_ever and UserCreationForm(data=request.POST) or None
first_user = first_user_form and first_user_form.is_valid()
if first_user or not is_first_login_ever:
auth_form = AuthenticationForm(data=request.POST)
if auth_form.is_valid():
# Must login by using the AuthenticationForm.
# It provides 'backends' on the User object.
user = auth_form.get_user()
userprofile = get_profile(user)
login(request, user)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
auto_create_home_backends = ['AllowAllBackend', 'LdapBackend', 'SpnegoDjangoBackend']
if is_first_login_ever or any(backend in backend_names for backend in auto_create_home_backends):
# Create home directory for first user.
try:
ensure_home_directory(request.fs, user.username)
except (IOError, WebHdfsException), e:
LOG.error(_('Could not create home directory.'), exc_info=e)
request.error(_('Could not create home directory.'))
if require_change_password(userprofile):
return HttpResponseRedirect(urlresolvers.reverse('useradmin.views.edit_user', kwargs={'username': user.username}))
userprofile.first_login = False
userprofile.last_activity = datetime.now()
userprofile.save()
msg = 'Successful login for user: %s' % user.username
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.REQUEST.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': True})
else:
return HttpResponseRedirect(redirect_to)
else:
request.audit['allowed'] = False
msg = 'Failed login for user: %s' % request.POST.get('username')
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.REQUEST.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': False})
else:
first_user_form = None
auth_form = AuthenticationForm()
if DEMO_ENABLED.get() and not 'admin' in request.REQUEST:
user = authenticate(username=request.user.username, password='<PASSWORD>')
login(request, user)
ensure_home_directory(request.fs, user.username)
return HttpResponseRedirect(redirect_to)
if not from_modal:
request.session.set_test_cookie()
renderable_path = 'login.mako'
if from_modal:
renderable_path = 'login_modal.mako'
return render(renderable_path, request, {
'action': urlresolvers.reverse('desktop.auth.views.dt_login'),
'form': first_user_form or auth_form,
'next': redirect_to,
'first_login_ever': is_first_login_ever,
'login_errors': request.method == 'POST',
'backend_names': backend_names,
'active_directory': is_active_directory
})
def dt_logout(request, next_page=None):
"""Log out the user"""
username = request.user.get_username()
request.audit = {
'username': username,
'operation': 'USER_LOGOUT',
'operationText': 'Logged out user: %s' % username
}
backends = get_backends()
if backends:
for backend in backends:
if hasattr(backend, 'logout'):
response = backend.logout(request, next_page)
if response:
return response
return django.contrib.auth.views.logout(request, next_page)
def profile(request):
"""
Dumps JSON for user-profile information.
"""
return render(None, request, _profile_dict(request.user))
def _profile_dict(user):
return dict(
username=user.username,
first_name=user.first_name,
last_name=user.last_name,
last_login=str(user.last_login), # datetime object needs to be converted
email=user.email)
# OAuth is based on Twitter as example.
@login_notrequired
def oauth_login(request):
assert oauth is not None
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
client = oauth.Client(consumer)
resp, content = client.request(OAUTH.REQUEST_TOKEN_URL.get(), "POST", body=urllib.urlencode({
'oauth_callback': 'http://' + request.get_host() + '/login/oauth_authenticated/'
}))
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
request.session['request_token'] = dict(cgi.parse_qsl(content))
url = "%s?oauth_token=%s" % (OAUTH.AUTHENTICATE_URL.get(), request.session['request_token']['oauth_token'])
return HttpResponseRedirect(url)
@login_notrequired
def oauth_authenticated(request):
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
token = oauth.Token(request.session['request_token']['oauth_token'], request.session['request_token']['oauth_token_secret'])
client = oauth.Client(consumer, token)
resp, content = client.request(OAUTH.ACCESS_TOKEN_URL.get(), "GET")
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
access_token = dict(cgi.parse_qsl(content))
user = authenticate(access_token=access_token)
login(request, user)
redirect_to = request.REQUEST.get('next', '/')
return HttpResponseRedirect(redirect_to)
| #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import oauth2 as oauth
except:
oauth = None
import cgi
import logging
import urllib
from datetime import datetime
from axes.decorators import watch_login
import django.contrib.auth.views
from django.core import urlresolvers
from django.core.exceptions import SuspiciousOperation
from django.contrib.auth import login, get_backends, authenticate
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from desktop.auth import forms as auth_forms
from desktop.lib.django_util import render
from desktop.lib.django_util import login_notrequired
from desktop.lib.django_util import JsonResponse
from desktop.log.access import access_warn, last_access_map
from desktop.conf import LDAP, OAUTH, DEMO_ENABLED
from hadoop.fs.exceptions import WebHdfsException
from useradmin.models import get_profile
from useradmin.views import ensure_home_directory, require_change_password
LOG = logging.getLogger(__name__)
def get_current_users():
"""Return dictionary of User objects and
a dictionary of the user's IP address and last access time"""
current_users = { }
for session in Session.objects.all():
try:
uid = session.get_decoded().get(django.contrib.auth.SESSION_KEY)
except SuspiciousOperation:
# If secret_key changed, this resolution won't work.
uid = None
if uid is not None:
try:
userobj = User.objects.get(pk=uid)
current_users[userobj] = last_access_map.get(userobj.username, { })
except User.DoesNotExist:
LOG.debug("User with id=%d does not exist" % uid)
return current_users
def first_login_ever():
backends = get_backends()
for backend in backends:
if hasattr(backend, 'is_first_login_ever') and backend.is_first_login_ever():
return True
return False
def get_backend_names():
return get_backends and [backend.__class__.__name__ for backend in get_backends()]
@login_notrequired
@watch_login
def dt_login(request, from_modal=False):
redirect_to = request.REQUEST.get('next', '/')
is_first_login_ever = first_login_ever()
backend_names = get_backend_names()
is_active_directory = 'LdapBackend' in backend_names and ( bool(LDAP.NT_DOMAIN.get()) or bool(LDAP.LDAP_SERVERS.get()) )
if is_active_directory:
UserCreationForm = auth_forms.LdapUserCreationForm
AuthenticationForm = auth_forms.LdapAuthenticationForm
else:
UserCreationForm = auth_forms.UserCreationForm
AuthenticationForm = auth_forms.AuthenticationForm
if request.method == 'POST':
request.audit = {
'operation': 'USER_LOGIN',
'username': request.POST.get('username')
}
# For first login, need to validate user info!
first_user_form = is_first_login_ever and UserCreationForm(data=request.POST) or None
first_user = first_user_form and first_user_form.is_valid()
if first_user or not is_first_login_ever:
auth_form = AuthenticationForm(data=request.POST)
if auth_form.is_valid():
# Must login by using the AuthenticationForm.
# It provides 'backends' on the User object.
user = auth_form.get_user()
userprofile = get_profile(user)
login(request, user)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
auto_create_home_backends = ['AllowAllBackend', 'LdapBackend', 'SpnegoDjangoBackend']
if is_first_login_ever or any(backend in backend_names for backend in auto_create_home_backends):
# Create home directory for first user.
try:
ensure_home_directory(request.fs, user.username)
except (IOError, WebHdfsException), e:
LOG.error(_('Could not create home directory.'), exc_info=e)
request.error(_('Could not create home directory.'))
if require_change_password(userprofile):
return HttpResponseRedirect(urlresolvers.reverse('useradmin.views.edit_user', kwargs={'username': user.username}))
userprofile.first_login = False
userprofile.last_activity = datetime.now()
userprofile.save()
msg = 'Successful login for user: %s' % user.username
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.REQUEST.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': True})
else:
return HttpResponseRedirect(redirect_to)
else:
request.audit['allowed'] = False
msg = 'Failed login for user: %s' % request.POST.get('username')
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.REQUEST.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': False})
else:
first_user_form = None
auth_form = AuthenticationForm()
if DEMO_ENABLED.get() and not 'admin' in request.REQUEST:
user = authenticate(username=request.user.username, password='<PASSWORD>')
login(request, user)
ensure_home_directory(request.fs, user.username)
return HttpResponseRedirect(redirect_to)
if not from_modal:
request.session.set_test_cookie()
renderable_path = 'login.mako'
if from_modal:
renderable_path = 'login_modal.mako'
return render(renderable_path, request, {
'action': urlresolvers.reverse('desktop.auth.views.dt_login'),
'form': first_user_form or auth_form,
'next': redirect_to,
'first_login_ever': is_first_login_ever,
'login_errors': request.method == 'POST',
'backend_names': backend_names,
'active_directory': is_active_directory
})
def dt_logout(request, next_page=None):
"""Log out the user"""
username = request.user.get_username()
request.audit = {
'username': username,
'operation': 'USER_LOGOUT',
'operationText': 'Logged out user: %s' % username
}
backends = get_backends()
if backends:
for backend in backends:
if hasattr(backend, 'logout'):
response = backend.logout(request, next_page)
if response:
return response
return django.contrib.auth.views.logout(request, next_page)
def profile(request):
"""
Dumps JSON for user-profile information.
"""
return render(None, request, _profile_dict(request.user))
def _profile_dict(user):
return dict(
username=user.username,
first_name=user.first_name,
last_name=user.last_name,
last_login=str(user.last_login), # datetime object needs to be converted
email=user.email)
# OAuth is based on Twitter as example.
@login_notrequired
def oauth_login(request):
assert oauth is not None
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
client = oauth.Client(consumer)
resp, content = client.request(OAUTH.REQUEST_TOKEN_URL.get(), "POST", body=urllib.urlencode({
'oauth_callback': 'http://' + request.get_host() + '/login/oauth_authenticated/'
}))
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
request.session['request_token'] = dict(cgi.parse_qsl(content))
url = "%s?oauth_token=%s" % (OAUTH.AUTHENTICATE_URL.get(), request.session['request_token']['oauth_token'])
return HttpResponseRedirect(url)
@login_notrequired
def oauth_authenticated(request):
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
token = oauth.Token(request.session['request_token']['oauth_token'], request.session['request_token']['oauth_token_secret'])
client = oauth.Client(consumer, token)
resp, content = client.request(OAUTH.ACCESS_TOKEN_URL.get(), "GET")
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
access_token = dict(cgi.parse_qsl(content))
user = authenticate(access_token=access_token)
login(request, user)
redirect_to = request.REQUEST.get('next', '/')
return HttpResponseRedirect(redirect_to)
| en | 0.852755 | #!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Return dictionary of User objects and a dictionary of the user's IP address and last access time # If secret_key changed, this resolution won't work. # For first login, need to validate user info! # Must login by using the AuthenticationForm. # It provides 'backends' on the User object. # Create home directory for first user. Log out the user Dumps JSON for user-profile information. # datetime object needs to be converted # OAuth is based on Twitter as example. | 1.70082 | 2 |
models/node.py | AlonsoReyes/t-intersection-graph | 0 | 683 |
class Node(object):
def __init__(self, name, follow_list, intention, lane):
self.name = name
self.follow_list = follow_list
self.intention = intention
self.lane = lane
def __eq__(self, other):
if isinstance(other, Node):
if self.name == other.get_name() and self.follow_list == other.get_follow_list() \
and self.intention == other.get_intention() and self.lane == other.get_lane():
return True
return False
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_follow_list(self):
return self.follow_list
def set_follow_list(self, follow_list):
self.follow_list = follow_list
def get_intention(self):
return self.intention
def set_intention(self, intention):
self.intention = intention
def get_lane(self):
return self.lane
def set_lane(self, lane):
self.lane = lane
|
class Node(object):
def __init__(self, name, follow_list, intention, lane):
self.name = name
self.follow_list = follow_list
self.intention = intention
self.lane = lane
def __eq__(self, other):
if isinstance(other, Node):
if self.name == other.get_name() and self.follow_list == other.get_follow_list() \
and self.intention == other.get_intention() and self.lane == other.get_lane():
return True
return False
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_follow_list(self):
return self.follow_list
def set_follow_list(self, follow_list):
self.follow_list = follow_list
def get_intention(self):
return self.intention
def set_intention(self, intention):
self.intention = intention
def get_lane(self):
return self.lane
def set_lane(self, lane):
self.lane = lane
| none | 1 | 3.515883 | 4 |
|
gsheetsdb/url.py | tim-werner/gsheets-db-api | 3 | 684 | <reponame>tim-werner/gsheets-db-api<filename>gsheetsdb/url.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from moz_sql_parser import parse as parse_sql
import pyparsing
import re
from six.moves.urllib import parse
FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE)
def get_url(url, headers=0, gid=0, sheet=None):
parts = parse.urlparse(url)
if parts.path.endswith('/edit'):
path = parts.path[:-len('/edit')]
else:
path = parts.path
path = '/'.join((path.rstrip('/'), 'gviz/tq'))
qs = parse.parse_qs(parts.query)
if 'headers' in qs:
headers = int(qs['headers'][-1])
if 'gid' in qs:
gid = qs['gid'][-1]
if 'sheet' in qs:
sheet = qs['sheet'][-1]
if parts.fragment.startswith('gid='):
gid = parts.fragment[len('gid='):]
args = OrderedDict()
if headers > 0:
args['headers'] = headers
if sheet is not None:
args['sheet'] = sheet
else:
args['gid'] = gid
params = parse.urlencode(args)
return parse.urlunparse(
(parts.scheme, parts.netloc, path, None, params, None))
def extract_url(sql):
try:
return parse_sql(sql)['from']
except pyparsing.ParseException:
# fallback to regex to extract from
match = FROM_REGEX.search(sql)
if match:
return match.group(1).strip('"')
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from moz_sql_parser import parse as parse_sql
import pyparsing
import re
from six.moves.urllib import parse
FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE)
def get_url(url, headers=0, gid=0, sheet=None):
parts = parse.urlparse(url)
if parts.path.endswith('/edit'):
path = parts.path[:-len('/edit')]
else:
path = parts.path
path = '/'.join((path.rstrip('/'), 'gviz/tq'))
qs = parse.parse_qs(parts.query)
if 'headers' in qs:
headers = int(qs['headers'][-1])
if 'gid' in qs:
gid = qs['gid'][-1]
if 'sheet' in qs:
sheet = qs['sheet'][-1]
if parts.fragment.startswith('gid='):
gid = parts.fragment[len('gid='):]
args = OrderedDict()
if headers > 0:
args['headers'] = headers
if sheet is not None:
args['sheet'] = sheet
else:
args['gid'] = gid
params = parse.urlencode(args)
return parse.urlunparse(
(parts.scheme, parts.netloc, path, None, params, None))
def extract_url(sql):
try:
return parse_sql(sql)['from']
except pyparsing.ParseException:
# fallback to regex to extract from
match = FROM_REGEX.search(sql)
if match:
return match.group(1).strip('"') | en | 0.923934 | # fallback to regex to extract from | 2.135828 | 2 |
detr/datasets/construction_panoptic.py | joyjeni/detr-fine | 0 | 685 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
# from util.box_ops import masks_to_boxes
from .construction import make_construction_transforms
import logging
def box_xywh_to_xyxy(x):
xs, ys, w, h = x.unbind(-1)
b = [xs, ys, (xs + w), (ys + h)]
return torch.stack(b, dim=-1)
def masks_to_boxes(segments):
boxes = []
labels = []
iscrowd = []
area = []
for ann in segments:
if len(ann["bbox"]) == 4:
boxes.append(ann["bbox"])
area.append(ann['area'])
else:
boxes.append([0, 0, 2, 2])
area.append(4)
labels.append(ann["category_id"])
iscrowd.append(ann['iscrowd'])
if len(boxes) == 0 and len(labels) == 0:
boxes.append([0, 0, 2, 2])
labels.append(1)
area.append(4)
iscrowd.append(0)
boxes = torch.tensor(boxes, dtype=torch.int64)
labels = torch.tensor(labels, dtype=torch.int64)
iscrowd = torch.tensor(iscrowd)
area = torch.tensor(area)
boxes = box_xywh_to_xyxy(boxes)
return boxes, labels, iscrowd, area
class ConstructionPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
with open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco["images"], self.coco["annotations"]):
assert img["file_name"][:-4] == ann["file_name"][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
try:
ann_info = (
self.coco["annotations"][idx]
if "annotations" in self.coco
else self.coco["images"][idx]
)
img_path = Path(self.img_folder) / ann_info["file_name"].replace(".png", ".jpg")
ann_path = Path(self.ann_folder) / ann_info["file_name"]
img = Image.open(img_path).convert("RGB")
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
# labels = torch.tensor(
# [ann["category_id"] for ann in ann_info["segments_info"]],
# dtype=torch.int64,
# )
target = {}
target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
if self.return_masks:
target['masks'] = masks
boxes, labels, iscrowd, area = masks_to_boxes(ann_info["segments_info"])
target['labels'] = labels
# Instead of finding boxes, just take the one from json info available
# target["boxes"] = masks_to_boxes(ann_info["segments_info"])
target["boxes"] = boxes
target['size'] = torch.as_tensor([int(h), int(w)])
target['orig_size'] = torch.as_tensor([int(h), int(w)])
target['iscrowd'] = iscrowd
target['area'] = area
# if "segments_info" in ann_info:
# for name in ['iscrowd', 'area']:
# target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
except Exception as e:
logging.error(ann_info)
raise e
def __len__(self):
return len(self.coco['images'])
def get_height_and_width(self, idx):
img_info = self.coco['images'][idx]
height = img_info['height']
width = img_info['width']
return height, width
def build(image_set, args):
root = Path(args.data_path)
assert (
root.exists()
), f"provided Panoptic path {root} does not exist"
mode = "panoptic"
PATHS = {
"train": ("images", f"{mode}", f"{mode}.json"),
"val": ("images", f"val_{mode}", f"val_{mode}.json"),
}
img_folder, ann_folder, ann_file = PATHS[image_set]
img_folder_path = root / img_folder
ann_folder_path = root / ann_folder
ann_file = root / ann_file
dataset = ConstructionPanoptic(
img_folder_path,
ann_folder_path,
ann_file,
transforms=make_construction_transforms(image_set),
return_masks=args.masks,
)
return dataset
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
# from util.box_ops import masks_to_boxes
from .construction import make_construction_transforms
import logging
def box_xywh_to_xyxy(x):
xs, ys, w, h = x.unbind(-1)
b = [xs, ys, (xs + w), (ys + h)]
return torch.stack(b, dim=-1)
def masks_to_boxes(segments):
boxes = []
labels = []
iscrowd = []
area = []
for ann in segments:
if len(ann["bbox"]) == 4:
boxes.append(ann["bbox"])
area.append(ann['area'])
else:
boxes.append([0, 0, 2, 2])
area.append(4)
labels.append(ann["category_id"])
iscrowd.append(ann['iscrowd'])
if len(boxes) == 0 and len(labels) == 0:
boxes.append([0, 0, 2, 2])
labels.append(1)
area.append(4)
iscrowd.append(0)
boxes = torch.tensor(boxes, dtype=torch.int64)
labels = torch.tensor(labels, dtype=torch.int64)
iscrowd = torch.tensor(iscrowd)
area = torch.tensor(area)
boxes = box_xywh_to_xyxy(boxes)
return boxes, labels, iscrowd, area
class ConstructionPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
with open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco["images"], self.coco["annotations"]):
assert img["file_name"][:-4] == ann["file_name"][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
try:
ann_info = (
self.coco["annotations"][idx]
if "annotations" in self.coco
else self.coco["images"][idx]
)
img_path = Path(self.img_folder) / ann_info["file_name"].replace(".png", ".jpg")
ann_path = Path(self.ann_folder) / ann_info["file_name"]
img = Image.open(img_path).convert("RGB")
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
# labels = torch.tensor(
# [ann["category_id"] for ann in ann_info["segments_info"]],
# dtype=torch.int64,
# )
target = {}
target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
if self.return_masks:
target['masks'] = masks
boxes, labels, iscrowd, area = masks_to_boxes(ann_info["segments_info"])
target['labels'] = labels
# Instead of finding boxes, just take the one from json info available
# target["boxes"] = masks_to_boxes(ann_info["segments_info"])
target["boxes"] = boxes
target['size'] = torch.as_tensor([int(h), int(w)])
target['orig_size'] = torch.as_tensor([int(h), int(w)])
target['iscrowd'] = iscrowd
target['area'] = area
# if "segments_info" in ann_info:
# for name in ['iscrowd', 'area']:
# target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
except Exception as e:
logging.error(ann_info)
raise e
def __len__(self):
return len(self.coco['images'])
def get_height_and_width(self, idx):
img_info = self.coco['images'][idx]
height = img_info['height']
width = img_info['width']
return height, width
def build(image_set, args):
root = Path(args.data_path)
assert (
root.exists()
), f"provided Panoptic path {root} does not exist"
mode = "panoptic"
PATHS = {
"train": ("images", f"{mode}", f"{mode}.json"),
"val": ("images", f"val_{mode}", f"val_{mode}.json"),
}
img_folder, ann_folder, ann_file = PATHS[image_set]
img_folder_path = root / img_folder
ann_folder_path = root / ann_folder
ann_file = root / ann_file
dataset = ConstructionPanoptic(
img_folder_path,
ann_folder_path,
ann_file,
transforms=make_construction_transforms(image_set),
return_masks=args.masks,
)
return dataset
| en | 0.757888 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # from util.box_ops import masks_to_boxes # sort 'images' field so that they are aligned with 'annotations' # i.e., in alphabetical order # sanity check # labels = torch.tensor( # [ann["category_id"] for ann in ann_info["segments_info"]], # dtype=torch.int64, # ) # Instead of finding boxes, just take the one from json info available # target["boxes"] = masks_to_boxes(ann_info["segments_info"]) # if "segments_info" in ann_info: # for name in ['iscrowd', 'area']: # target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) | 2.265004 | 2 |
Code/all-starter-code/search.py | diyarkudrat/CS-1.3-Core-Data-Structures | 0 | 686 | #!python
"""
ANNOTATE FUNCTIONS WITH TIME AND SPACE COMPLEXITY!!!!!
"""
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
return linear_search_iterative(array, item)
# return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
"""Time complexity: O(n) because you iterate through n amount of items in array
Space Complexity: O(n) because there are n amount of items"""
# loop over all array values until item is found
for index, value in enumerate(array): #O(n)
if item == value: #O(1)
return index # found O(1)
return None # not found O(1)
def linear_search_recursive(array, item, index=0):
"""Time complexity: O(n) because you are returning the function continuously until index equals to nth-item
"""
if len(array) <= index:
return index
if array[index] == item:
return index
else:
return linear_search_recursive(array, item, index + 1)
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
return binary_search_iterative(array, item)
# return binary_search_recursive(array, item)
def binary_search_iterative(array, item):
"""Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1
Space Complexity: O(1) """
left, right = 0, len(array) - 1
if len(array) == 0:
return None
while left <= right:
middle = left + (right - left) // 2
if item == array[middle]:
return middle
elif item > array[middle]:
left = middle + 1
else:
right = middle - 1
return None
def binary_search_recursive(array, item, left=None, right=None):
"""Time Complexity: O(log*n)
Space Complexity: 0(log*n) recursion call stack space"""
# TODO: implement binary search recursively here
if left is None and right is None:
left, right = 0, len(array) - 1
middle = left + (right - left) // 2
if left > right:
return None
if array[middle] == item:
return middle
elif item > array[middle]:
return binary_search_recursive(array, item, middle + 1, right)
else:
return binary_search_recursive(array, item, left, middle - 1)
| #!python
"""
ANNOTATE FUNCTIONS WITH TIME AND SPACE COMPLEXITY!!!!!
"""
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
return linear_search_iterative(array, item)
# return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
"""Time complexity: O(n) because you iterate through n amount of items in array
Space Complexity: O(n) because there are n amount of items"""
# loop over all array values until item is found
for index, value in enumerate(array): #O(n)
if item == value: #O(1)
return index # found O(1)
return None # not found O(1)
def linear_search_recursive(array, item, index=0):
"""Time complexity: O(n) because you are returning the function continuously until index equals to nth-item
"""
if len(array) <= index:
return index
if array[index] == item:
return index
else:
return linear_search_recursive(array, item, index + 1)
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
return binary_search_iterative(array, item)
# return binary_search_recursive(array, item)
def binary_search_iterative(array, item):
"""Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1
Space Complexity: O(1) """
left, right = 0, len(array) - 1
if len(array) == 0:
return None
while left <= right:
middle = left + (right - left) // 2
if item == array[middle]:
return middle
elif item > array[middle]:
left = middle + 1
else:
right = middle - 1
return None
def binary_search_recursive(array, item, left=None, right=None):
"""Time Complexity: O(log*n)
Space Complexity: 0(log*n) recursion call stack space"""
# TODO: implement binary search recursively here
if left is None and right is None:
left, right = 0, len(array) - 1
middle = left + (right - left) // 2
if left > right:
return None
if array[middle] == item:
return middle
elif item > array[middle]:
return binary_search_recursive(array, item, middle + 1, right)
else:
return binary_search_recursive(array, item, left, middle - 1)
| en | 0.758516 | #!python ANNOTATE FUNCTIONS WITH TIME AND SPACE COMPLEXITY!!!!! return the first index of item in array or None if item is not found # return linear_search_recursive(array, item) Time complexity: O(n) because you iterate through n amount of items in array Space Complexity: O(n) because there are n amount of items # loop over all array values until item is found #O(n) #O(1) # found O(1) # not found O(1) Time complexity: O(n) because you are returning the function continuously until index equals to nth-item return the index of item in sorted array or None if item is not found # return binary_search_recursive(array, item) Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1 Space Complexity: O(1) Time Complexity: O(log*n) Space Complexity: 0(log*n) recursion call stack space # TODO: implement binary search recursively here | 4.43564 | 4 |
max_ai/src/max_ai/mem_db.py | mat-heim/max_ros | 0 | 687 | #!/usr/bin/python
'''
memory class
stored in sqlite data base
holds raw input and memories in parse taged columns
'''
import sys
import re
import sqlite3
import os
from datetime import date, datetime
from pattern.en import parse
from pattern.en import pprint
from pattern.en import parsetree
from pattern.en import wordnet
from pattern.en import pluralize, singularize
from pattern.en import conjugate, lemma, lexeme
#dir = os.path.dirname(os.path.abspath(__file__))
dir = '/home/erni/catkin_ws/src/max_ros/max_ai/src/max_ai/'
RM = sqlite3.connect(dir +'robbie_memory.sqlite')
#RM = sqlite3.connect(dir + '/data/robbie_memory.db')
cursor = RM.cursor()
# Information about a single concept
class conceptClass:
def __init__(self, state='none', locality='none'):
self.state = state # what/how is 'concept'
self.reference = 'none' # unused
self.locality = locality # where is 'concept'
self.person = '3sg' # e.g. a thing is 3rd-person, singular
self.isProperNoun = False # True if proper noun: e.g. Robert
self.properties = {} # Dict of custom properties, e.g. 'age' = 39, 'color' = 'blue'
# Robbie memory class. Collection of concepts
class memoryClass():
def __init__(self):
self.concepts = {}
self.person = {'I': '1sg',
'you': '2sg'
}
self.posessivePronouns = {'1sg': 'my',
'2sg': 'your',
'3sg': 'its'
}
# Add a concept to memory
def add(self, c):
# add oncept to raw_input table in robbie_memory
# x=
# dt = datetime.now()
# RM.execute("insert into RAW_INPUT (RAW, DATE) values (?, ?)",(c, dt))
# RM.commit()
self.concepts[c] = conceptClass()
if c in self.person:
self.concepts[c].person = self.person[c]
else:
self.concepts[c].person = '3sg'
# Return True if concept 'c' (string) is in memory
def known(self, c):
cursor.execute('''SELECT concept, location FROM memory WHERE concept =?''', (c,))
user = cursor.fetchone()
# if user == 'None':
return user
def add_memory(self, a, b):
c = '3sg'
dt = datetime.now()
RM.execute("insert into memory (concept, location, person,DATE) values (?, ?, ?, ?)", (a, b, c, dt))
RM.commit()
def update_memory(self, a, b):
cursor.execute('''UPDATE memory SET location = ? WHERE concept = ? ''', (b, a))
RM.commit()
def search_memory(self, a):
cursor.execute('''SELECT concept,location, person FROM memory WHERE concept =?''', (a,))
user = cursor.fetchone()
return user
def search_profile(self, a):
cursor.execute('''SELECT value FROM profile WHERE item =?''', (a,))
user = cursor.fetchone()
return user
def Dump(self):
return (self.concepts.state)
| #!/usr/bin/python
'''
memory class
stored in sqlite data base
holds raw input and memories in parse taged columns
'''
import sys
import re
import sqlite3
import os
from datetime import date, datetime
from pattern.en import parse
from pattern.en import pprint
from pattern.en import parsetree
from pattern.en import wordnet
from pattern.en import pluralize, singularize
from pattern.en import conjugate, lemma, lexeme
#dir = os.path.dirname(os.path.abspath(__file__))
dir = '/home/erni/catkin_ws/src/max_ros/max_ai/src/max_ai/'
RM = sqlite3.connect(dir +'robbie_memory.sqlite')
#RM = sqlite3.connect(dir + '/data/robbie_memory.db')
cursor = RM.cursor()
# Information about a single concept
class conceptClass:
def __init__(self, state='none', locality='none'):
self.state = state # what/how is 'concept'
self.reference = 'none' # unused
self.locality = locality # where is 'concept'
self.person = '3sg' # e.g. a thing is 3rd-person, singular
self.isProperNoun = False # True if proper noun: e.g. Robert
self.properties = {} # Dict of custom properties, e.g. 'age' = 39, 'color' = 'blue'
# Robbie memory class. Collection of concepts
class memoryClass():
def __init__(self):
self.concepts = {}
self.person = {'I': '1sg',
'you': '2sg'
}
self.posessivePronouns = {'1sg': 'my',
'2sg': 'your',
'3sg': 'its'
}
# Add a concept to memory
def add(self, c):
# add oncept to raw_input table in robbie_memory
# x=
# dt = datetime.now()
# RM.execute("insert into RAW_INPUT (RAW, DATE) values (?, ?)",(c, dt))
# RM.commit()
self.concepts[c] = conceptClass()
if c in self.person:
self.concepts[c].person = self.person[c]
else:
self.concepts[c].person = '3sg'
# Return True if concept 'c' (string) is in memory
def known(self, c):
cursor.execute('''SELECT concept, location FROM memory WHERE concept =?''', (c,))
user = cursor.fetchone()
# if user == 'None':
return user
def add_memory(self, a, b):
c = '3sg'
dt = datetime.now()
RM.execute("insert into memory (concept, location, person,DATE) values (?, ?, ?, ?)", (a, b, c, dt))
RM.commit()
def update_memory(self, a, b):
cursor.execute('''UPDATE memory SET location = ? WHERE concept = ? ''', (b, a))
RM.commit()
def search_memory(self, a):
cursor.execute('''SELECT concept,location, person FROM memory WHERE concept =?''', (a,))
user = cursor.fetchone()
return user
def search_profile(self, a):
cursor.execute('''SELECT value FROM profile WHERE item =?''', (a,))
user = cursor.fetchone()
return user
def Dump(self):
return (self.concepts.state)
| en | 0.625569 | #!/usr/bin/python memory class stored in sqlite data base holds raw input and memories in parse taged columns #dir = os.path.dirname(os.path.abspath(__file__)) #RM = sqlite3.connect(dir + '/data/robbie_memory.db') # Information about a single concept # what/how is 'concept' # unused # where is 'concept' # e.g. a thing is 3rd-person, singular # True if proper noun: e.g. Robert # Dict of custom properties, e.g. 'age' = 39, 'color' = 'blue' # Robbie memory class. Collection of concepts # Add a concept to memory # add oncept to raw_input table in robbie_memory # x= # dt = datetime.now() # RM.execute("insert into RAW_INPUT (RAW, DATE) values (?, ?)",(c, dt)) # RM.commit() # Return True if concept 'c' (string) is in memory SELECT concept, location FROM memory WHERE concept =? # if user == 'None': UPDATE memory SET location = ? WHERE concept = ? SELECT concept,location, person FROM memory WHERE concept =? SELECT value FROM profile WHERE item =? | 3.410358 | 3 |
examples/pylab_examples/image_masked.py | pierre-haessig/matplotlib | 16 | 688 | <filename>examples/pylab_examples/image_masked.py
#!/usr/bin/env python
'''imshow with masked array input and out-of-range colors.
The second subplot illustrates the use of BoundaryNorm to
get a filled contour effect.
'''
from pylab import *
from numpy import ma
import matplotlib.colors as colors
delta = 0.025
x = y = arange(-3.0, 3.0, delta)
X, Y = meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = 10 * (Z2-Z1) # difference of Gaussians
# Set up a colormap:
palette = cm.gray
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('b', 1.0)
# Alternatively, we could use
# palette.set_bad(alpha = 0.0)
# to make the bad region transparent. This is the default.
# If you comment out all the palette.set* lines, you will see
# all the defaults; under and over will be colored with the
# first and last colors in the palette, respectively.
Zm = ma.masked_where(Z > 1.2, Z)
# By setting vmin and vmax in the norm, we establish the
# range to which the regular palette color scale is applied.
# Anything above that range is colored based on palette.set_over, etc.
subplot(1,2,1)
im = imshow(Zm, interpolation='bilinear',
cmap=palette,
norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('Green=low, Red=high, Blue=bad')
colorbar(im, extend='both', orientation='horizontal', shrink=0.8)
subplot(1,2,2)
im = imshow(Zm, interpolation='nearest',
cmap=palette,
norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
ncolors=256, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('With BoundaryNorm')
colorbar(im, extend='both', spacing='proportional',
orientation='horizontal', shrink=0.8)
show()
| <filename>examples/pylab_examples/image_masked.py
#!/usr/bin/env python
'''imshow with masked array input and out-of-range colors.
The second subplot illustrates the use of BoundaryNorm to
get a filled contour effect.
'''
from pylab import *
from numpy import ma
import matplotlib.colors as colors
delta = 0.025
x = y = arange(-3.0, 3.0, delta)
X, Y = meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = 10 * (Z2-Z1) # difference of Gaussians
# Set up a colormap:
palette = cm.gray
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('b', 1.0)
# Alternatively, we could use
# palette.set_bad(alpha = 0.0)
# to make the bad region transparent. This is the default.
# If you comment out all the palette.set* lines, you will see
# all the defaults; under and over will be colored with the
# first and last colors in the palette, respectively.
Zm = ma.masked_where(Z > 1.2, Z)
# By setting vmin and vmax in the norm, we establish the
# range to which the regular palette color scale is applied.
# Anything above that range is colored based on palette.set_over, etc.
subplot(1,2,1)
im = imshow(Zm, interpolation='bilinear',
cmap=palette,
norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('Green=low, Red=high, Blue=bad')
colorbar(im, extend='both', orientation='horizontal', shrink=0.8)
subplot(1,2,2)
im = imshow(Zm, interpolation='nearest',
cmap=palette,
norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
ncolors=256, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('With BoundaryNorm')
colorbar(im, extend='both', spacing='proportional',
orientation='horizontal', shrink=0.8)
show()
| en | 0.821941 | #!/usr/bin/env python imshow with masked array input and out-of-range colors. The second subplot illustrates the use of BoundaryNorm to get a filled contour effect. # difference of Gaussians # Set up a colormap: # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is the default. # If you comment out all the palette.set* lines, you will see # all the defaults; under and over will be colored with the # first and last colors in the palette, respectively. # By setting vmin and vmax in the norm, we establish the # range to which the regular palette color scale is applied. # Anything above that range is colored based on palette.set_over, etc. | 2.836006 | 3 |
app/schemas/socket.py | d3vzer0/reternal-backend | 6 | 689 |
from pydantic import BaseModel, validator, Field
from typing import List, Dict
from datetime import datetime
class Authenticate(BaseModel):
access_token: str |
from pydantic import BaseModel, validator, Field
from typing import List, Dict
from datetime import datetime
class Authenticate(BaseModel):
access_token: str | none | 1 | 2.281393 | 2 |
|
meme/meme.py | aniket091/modmail-plugins-1 | 8 | 690 | <reponame>aniket091/modmail-plugins-1
import discord
from discord.ext import commands
import requests
import random
from box import Box
class WildMemes(commands.Cog):
"""
Randomly spawns memes.
"""
subreddits = [
"dankmemes",
"wholesomememes",
"memes",
"terriblefacebookmemes",
"historymemes",
"me_irl",
"2meirl4meirl",
"fellowkids",
"tumblr"
]
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
elif random.randint(0, 100) < 25:
async with message.channel.typing():
chosen_sub = random.choice(self.subreddits)
r = requests.get(f"https://api.reddit.com/r/{chosen_sub}/top.json?sort=top&t=day&limit=500",
headers={'User-agent': 'Super Bot 9000'})
r = r.json()
boxed = Box(r)
data = (random.choice(boxed.data.children)).data
image = data.url
upvotes = data.ups
title = data.title
subreddit = data.subreddit_name_prefixed
embed = discord.Embed(title=f'Meme Title: {title}', color=0x6bdcd7)
embed.set_author(name="A wild meme has appeared!")
embed.set_image(url=image)
embed.set_footer(text=f"On {subreddit} with {upvotes} upvotes.")
await message.channel.send(embed=embed)
def setup(bot):
bot.add_cog(WildMemes(bot)) | import discord
from discord.ext import commands
import requests
import random
from box import Box
class WildMemes(commands.Cog):
"""
Randomly spawns memes.
"""
subreddits = [
"dankmemes",
"wholesomememes",
"memes",
"terriblefacebookmemes",
"historymemes",
"me_irl",
"2meirl4meirl",
"fellowkids",
"tumblr"
]
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
elif random.randint(0, 100) < 25:
async with message.channel.typing():
chosen_sub = random.choice(self.subreddits)
r = requests.get(f"https://api.reddit.com/r/{chosen_sub}/top.json?sort=top&t=day&limit=500",
headers={'User-agent': 'Super Bot 9000'})
r = r.json()
boxed = Box(r)
data = (random.choice(boxed.data.children)).data
image = data.url
upvotes = data.ups
title = data.title
subreddit = data.subreddit_name_prefixed
embed = discord.Embed(title=f'Meme Title: {title}', color=0x6bdcd7)
embed.set_author(name="A wild meme has appeared!")
embed.set_image(url=image)
embed.set_footer(text=f"On {subreddit} with {upvotes} upvotes.")
await message.channel.send(embed=embed)
def setup(bot):
bot.add_cog(WildMemes(bot)) | en | 0.659378 | Randomly spawns memes. | 2.789676 | 3 |
pcat2py/class/20bdcef0-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 0 | 691 | #!/usr/bin/python
################################################################################
# 20bdcef0-5cc5-11e4-af55-00155d01fe08
#
# <NAME>
# <EMAIL>
# <EMAIL>
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20bdcef0-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Registry MultiSZ
multi_sz = cli.get_reg_multi_sz(r'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths', 'Machine')
# Output Lines
self.output = [r'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths', ('Machine=')] + multi_sz
# Recommended MultiSZ
rec_multi_sz = ("System\CurrentControlSet\Control\ProductOptions,System\CurrentControlSet\Control\Server Applications,Software\Microsoft\Windows NT\CurrentVersion")
for sz in multi_sz:
if sz.lower() not in rec_multi_sz.lower():
self.is_compliant = False
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths' -name 'Machine' -Type MultiString -value System\CurrentControlSet\Control\ProductOptions,System\CurrentControlSet\Control\Server Applications,Software\Microsoft\Windows NT\CurrentVersion")
| #!/usr/bin/python
################################################################################
# 20bdcef0-5cc5-11e4-af55-00155d01fe08
#
# <NAME>
# <EMAIL>
# <EMAIL>
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20bdcef0-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Registry MultiSZ
multi_sz = cli.get_reg_multi_sz(r'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths', 'Machine')
# Output Lines
self.output = [r'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths', ('Machine=')] + multi_sz
# Recommended MultiSZ
rec_multi_sz = ("System\CurrentControlSet\Control\ProductOptions,System\CurrentControlSet\Control\Server Applications,Software\Microsoft\Windows NT\CurrentVersion")
for sz in multi_sz:
if sz.lower() not in rec_multi_sz.lower():
self.is_compliant = False
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths' -name 'Machine' -Type MultiString -value System\CurrentControlSet\Control\ProductOptions,System\CurrentControlSet\Control\Server Applications,Software\Microsoft\Windows NT\CurrentVersion")
| de | 0.574357 | #!/usr/bin/python ################################################################################ # 20bdcef0-5cc5-11e4-af55-00155d01fe08 # # <NAME> # <EMAIL> # <EMAIL> # # 10/24/2014 Original Construction ################################################################################ # Initialize Compliance # Get Registry MultiSZ # Output Lines # Recommended MultiSZ | 2.156621 | 2 |
mikan/exceptions.py | dzzhvks94vd2/mikan | 1 | 692 | <reponame>dzzhvks94vd2/mikan
class MikanException(Exception):
"""Generic Mikan exception"""
class ConversionError(MikanException, ValueError):
"""Cannot convert a string"""
| class MikanException(Exception):
"""Generic Mikan exception"""
class ConversionError(MikanException, ValueError):
"""Cannot convert a string""" | en | 0.138963 | Generic Mikan exception Cannot convert a string | 2.198275 | 2 |
cv_recommender/account/urls.py | hhhameem/CV-Recommender | 1 | 693 | <reponame>hhhameem/CV-Recommender<gh_stars>1-10
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('register/', views.register, name='register'),
path('login/', views.userlogin, name='login'),
path('logout/', views.userlogout, name='logout'),
path('password_change/', auth_views.PasswordChangeView.as_view(),
name='password_change'),
path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(),
name='password_change_done'),
path('password_reset/', auth_views.PasswordResetView.as_view(),
name='password_reset'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(),
name='password_reset_done'),
path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(),
name='password_reset_confirm'),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(),
name='password_reset_complete'),
path('applicantdashboard/', views.applicantdashboard,
name='applicantdashboard'),
path('recruiterdashboard/', views.recruiterdashboard,
name='recruiterdashboard'),
path('applicantdashboard/profile-edit/', views.applicantedit,
name='editapplicantprofile'),
path('recruiterdashboard/profile-edit/', views.recruiteredit,
name='editrecruiterprofile'),
]
| from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('register/', views.register, name='register'),
path('login/', views.userlogin, name='login'),
path('logout/', views.userlogout, name='logout'),
path('password_change/', auth_views.PasswordChangeView.as_view(),
name='password_change'),
path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(),
name='password_change_done'),
path('password_reset/', auth_views.PasswordResetView.as_view(),
name='password_reset'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(),
name='password_reset_done'),
path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(),
name='password_reset_confirm'),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(),
name='password_reset_complete'),
path('applicantdashboard/', views.applicantdashboard,
name='applicantdashboard'),
path('recruiterdashboard/', views.recruiterdashboard,
name='recruiterdashboard'),
path('applicantdashboard/profile-edit/', views.applicantedit,
name='editapplicantprofile'),
path('recruiterdashboard/profile-edit/', views.recruiteredit,
name='editrecruiterprofile'),
] | none | 1 | 1.803471 | 2 |
|
Moodle/scripts/edit_conf.py | nii-gakunin-cloud/ocs-templates | 4 | 694 | from datetime import datetime
from difflib import unified_diff
from logging import basicConfig, getLogger, INFO
import os
from pathlib import Path
import shutil
import subprocess
import sys
import yaml
from urllib.parse import urlparse
from notebook import notebookapp
from IPython.core.display import HTML
WORKDIR = 'edit'
META_YML = '.vcp-meta.yml'
MOODLE_DIR = '/opt/moodle'
CONF_RELATIVE = '/etc'
ENV_INHERIT = ['VAULT_ADDR', 'VAULT_TOKEN', 'PATH', 'REQUESTS_CA_BUNDLE']
logger = getLogger(__name__)
basicConfig(level=INFO, format='%(message)s')
def generate_local_path(host, conf_path, version=None):
ret = Path(WORKDIR).absolute() / host
if version is None:
ret /= datetime.now().strftime("%Y%m%d%H%M%S%f")
else:
ret /= version
ret /= Path(conf_path).name
return ret
def generate_remote_path(container, conf_path, relative_to=CONF_RELATIVE):
return (Path(MOODLE_DIR) / container / 'conf' /
Path(conf_path).relative_to(relative_to))
def get_local_path(host, container, conf_path, version=None):
if version is None:
version = find_latest_version(host, container, conf_path)
return generate_local_path(host, conf_path, version)
def _match_metainfo(parent, container, conf_path):
p = parent / META_YML
if not p.exists():
return False
with p.open() as f:
params = yaml.safe_load(f)
return (
isinstance(params, dict) and
'container' in params and
'container_path' in params and
params['container'] == container and
params['container_path'] == conf_path)
def _match_metainfo_by_remote_path(parent, remote_path):
p = parent / META_YML
if not p.exists():
return False
with p.open() as f:
params = yaml.safe_load(f)
return (
isinstance(params, dict) and
'remote_path' in params and
params['remote_path'] == remote_path)
def get_versions(host, *args, match=_match_metainfo):
pdir = Path(WORKDIR).absolute() / host
return sorted([
x.name for x in pdir.glob('*')
if x.is_dir() and match(x, *args)])
def find_latest_version(host, container, conf_path):
return get_versions(host, container, conf_path)[-1]
def find_latest_version_by_remote_path(host, remote_path):
return get_versions(
host, remote_path, match=_match_metainfo_by_remote_path)[-1]
def download_file(host, remote_path, conf_path=None):
if conf_path is None:
conf_path = Path(remote_path).name
dest = generate_local_path(host, conf_path)
ansible_arg = f'src={remote_path} dest={dest} flat=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'fetch', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Downloading {remote_path} from {host_1} to {dest}')
return dest
def download_conf_file(host, container, conf_path, relative_to=CONF_RELATIVE):
src = generate_remote_path(container, conf_path, relative_to)
return download_file(host, src, conf_path)
def create_conf_file(host, conf_path):
dest = generate_local_path(host, conf_path)
dest.parent.mkdir(parents=True, exist_ok=True)
dest.touch()
return dest
def _to_backup(conf):
return conf.parent / (conf.name + '.orig')
def make_backup(conf, quiet=False):
org = _to_backup(conf)
if not quiet:
logger.info(f'Copy {conf} {org}')
shutil.copy2(conf, org)
def make_metainfo(local_path, container, conf_path, relative_to=CONF_RELATIVE):
params = {
'container': container,
'container_path': conf_path,
'remote_path':
str(generate_remote_path(container, conf_path, relative_to)),
'version': list(local_path.parts)[-2],
}
with (local_path.parent / META_YML).open(mode='w') as f:
yaml.safe_dump(params, stream=f, default_flow_style=False)
def make_simple_metainfo(local_path, remote_path):
params = {
'remote_path': remote_path,
'version': list(local_path.parts)[-2],
}
with (local_path.parent / META_YML).open(mode='w') as f:
yaml.safe_dump(params, stream=f, default_flow_style=False)
def generate_edit_link(conf):
nb_conf = list(notebookapp.list_running_servers())[0]
p = (Path(nb_conf['base_url']) / 'edit' /
conf.absolute().relative_to(nb_conf['notebook_dir']))
return HTML(f'<a href={p} target="_blank">{p.name}</a>')
def show_diff(path_a, path_b):
lines_a = []
lines_b = []
with path_a.open() as f:
lines_a = f.readlines()
with path_b.open() as f:
lines_b = f.readlines()
diff = list(unified_diff(
lines_a, lines_b, fromfile=path_a.name, tofile=path_b.name))
sys.stdout.writelines(diff)
return len(diff)
def upload_conf_file(src, host, container, conf_path,
relative_to=CONF_RELATIVE):
dest = generate_remote_path(container, conf_path, relative_to)
ansible_arg = f'mkdir -p {dest.parent}'
subprocess.run(
['ansible', host, '-a', ansible_arg])
ansible_arg = f'dest={dest} src={src} backup=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Uploading {dest} from {src} to {host_1}')
def restart_container(host, container):
cmd = f'chdir={MOODLE_DIR} docker-compose restart {container}'
logger.info(f'Restart container {container}')
subprocess.check_call(['ansible', host, '-a', cmd])
def fetch_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, create=False):
local_path = download_conf_file(host, container, conf_path, relative_to)
make_backup(local_path)
make_metainfo(local_path, container, conf_path, relative_to)
return generate_edit_link(local_path)
def create_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, create=False):
local_path = create_conf_file(host, conf_path)
make_backup(local_path, quiet=True)
make_metainfo(local_path, container, conf_path, relative_to)
return generate_edit_link(local_path)
def apply_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None, restart=True):
diff = show_local_conf_diff(host, container, conf_path, version)
local_path = get_local_path(host, container, conf_path, version)
upload_conf_file(local_path, host, container, conf_path, relative_to)
if restart:
restart_container(host, container)
def revert_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
local_path = get_local_path(host, container, conf_path, version)
backup_path = _to_backup(local_path)
show_diff(local_path, backup_path)
upload_conf_file(backup_path, host, container, conf_path, relative_to)
restart_container(host, container)
local_path.rename(local_path.parent / (local_path.name + '.revert'))
def show_local_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
conf = get_local_path(host, container, conf_path, version)
with conf.open() as f:
print(f.read())
def edit_local_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
conf = get_local_path(host, container, conf_path, version)
return generate_edit_link(conf)
def show_local_conf_diff(host, container, conf_path, version=None):
local_path = get_local_path(host, container, conf_path, version)
show_diff(_to_backup(local_path), local_path)
def save_shibboleth_part(conf_path):
with conf_path.open() as f:
data = yaml.safe_load(f)
params = {}
if 'shibboleth' in data['services']:
params['shibboleth_container'] = yaml.safe_dump(
data['services']['shibboleth'])
vars_path = conf_path.parent / 'extra_vars.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
return vars_path
def init_shibboleth_part(conf_dir, hostname, volumes):
shibboleth_volumes = ['/sys/fs/cgroup:/sys/fs/cgroup']
shibboleth_volumes.extend(volumes)
params = {
'shibboleth_container': yaml.safe_dump({
'image': 'harbor.vcloud.nii.ac.jp/vcp/moodle:shibboleth-3.0.4',
'privileged': True,
'ports': ['443:443'],
'volumes': shibboleth_volumes,
'container_name': 'shibboleth',
'hostname': hostname,
}),
}
vars_path = conf_dir / 'shibboleth.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
return vars_path
def setup_shibboleth_part(local_path, **params):
if params is None or len(params) == 0:
return save_shibboleth_part(local_path)
else:
return init_shibboleth_part(local_path.parent, **params)
def generate_docker_compose(host, conf_path, extra_vars, extra_vars_file):
template = 'template/docker/compose/docker-compose.yml'
ansible_arg = f'src={template} dest={conf_path.parent}/'
env = dict([(x, os.environ[x]) for x in ENV_INHERIT])
args = ['ansible', host, '-m', 'template', '-c', 'local',
'-a', ansible_arg]
for k, v in extra_vars.items():
args.extend(['-e', f'{k}={v}'])
for x in extra_vars_file:
args.extend(['-e', f'@{str(x)}'])
subprocess.run(args=args, env=env, check=True)
def update_docker_compose(host, extra_vars={}, shibboleth_params={}):
remote_path = MOODLE_DIR + '/docker-compose.yml'
local_path = download_file(host, remote_path)
make_backup(local_path)
make_simple_metainfo(local_path, remote_path)
shibboleth_vars = setup_shibboleth_part(local_path, **shibboleth_params)
generate_docker_compose(host, local_path, extra_vars, [shibboleth_vars])
show_diff(_to_backup(local_path), local_path)
return generate_edit_link(local_path)
def append_shibboleth_container(host, moodle_url, volumes=[], extra_vars={}):
hostname = urlparse(moodle_url).netloc
return update_docker_compose(
host, extra_vars,
shibboleth_params={'hostname': hostname, 'volumes': volumes},
)
def upload_docker_compose(host, version=None, apply=False):
remote_path = MOODLE_DIR + '/docker-compose.yml'
if version is None:
version = find_latest_version_by_remote_path(host, remote_path)
local_path = (
Path(WORKDIR).absolute() / host / version / 'docker-compose.yml')
ansible_arg = f'dest={remote_path} src={local_path} backup=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Uploading {remote_path} from {local_path} to {host_1}')
if not apply:
return
ansible_arg = f'chdir=/opt/moodle docker-compose up -d --remove-orphans'
args = ['ansible', host, '-a', ansible_arg]
logger.info('Apply the changes in docker-compose.yml.')
subprocess.run(args=args, check=True)
def generate_proxy_conf(host, conf_path, extra_vars):
template = 'template/docker/compose/moodle-proxy.conf.template'
ansible_arg = f'src={template} dest={conf_path.parent}/moodle-proxy.conf'
env = dict([(x, os.environ[x]) for x in ENV_INHERIT])
args = [
'ansible', host, '-m', 'template', '-c', 'local', '-a', ansible_arg]
for k, v in extra_vars.items():
args.extend(['-e', f'{k}={v}'])
subprocess.run(args=args, env=env, check=True)
def update_proxy_conf(host, extra_vars={}):
conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf')
container = 'proxy'
link = fetch_conf(host, container, str(conf_path), str(conf_path.parent))
version = find_latest_version(host, container, str(conf_path))
local_path = generate_local_path(host, conf_path, version)
generate_proxy_conf(host, local_path, extra_vars)
show_local_conf_diff(host, container, conf_path, version)
return link
def apply_proxy_conf(host, version=None, restart=True):
conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf')
apply_conf(host, 'proxy', str(conf_path), str(conf_path.parent),
version, restart)
| from datetime import datetime
from difflib import unified_diff
from logging import basicConfig, getLogger, INFO
import os
from pathlib import Path
import shutil
import subprocess
import sys
import yaml
from urllib.parse import urlparse
from notebook import notebookapp
from IPython.core.display import HTML
WORKDIR = 'edit'
META_YML = '.vcp-meta.yml'
MOODLE_DIR = '/opt/moodle'
CONF_RELATIVE = '/etc'
ENV_INHERIT = ['VAULT_ADDR', 'VAULT_TOKEN', 'PATH', 'REQUESTS_CA_BUNDLE']
logger = getLogger(__name__)
basicConfig(level=INFO, format='%(message)s')
def generate_local_path(host, conf_path, version=None):
ret = Path(WORKDIR).absolute() / host
if version is None:
ret /= datetime.now().strftime("%Y%m%d%H%M%S%f")
else:
ret /= version
ret /= Path(conf_path).name
return ret
def generate_remote_path(container, conf_path, relative_to=CONF_RELATIVE):
return (Path(MOODLE_DIR) / container / 'conf' /
Path(conf_path).relative_to(relative_to))
def get_local_path(host, container, conf_path, version=None):
if version is None:
version = find_latest_version(host, container, conf_path)
return generate_local_path(host, conf_path, version)
def _match_metainfo(parent, container, conf_path):
p = parent / META_YML
if not p.exists():
return False
with p.open() as f:
params = yaml.safe_load(f)
return (
isinstance(params, dict) and
'container' in params and
'container_path' in params and
params['container'] == container and
params['container_path'] == conf_path)
def _match_metainfo_by_remote_path(parent, remote_path):
p = parent / META_YML
if not p.exists():
return False
with p.open() as f:
params = yaml.safe_load(f)
return (
isinstance(params, dict) and
'remote_path' in params and
params['remote_path'] == remote_path)
def get_versions(host, *args, match=_match_metainfo):
pdir = Path(WORKDIR).absolute() / host
return sorted([
x.name for x in pdir.glob('*')
if x.is_dir() and match(x, *args)])
def find_latest_version(host, container, conf_path):
return get_versions(host, container, conf_path)[-1]
def find_latest_version_by_remote_path(host, remote_path):
return get_versions(
host, remote_path, match=_match_metainfo_by_remote_path)[-1]
def download_file(host, remote_path, conf_path=None):
if conf_path is None:
conf_path = Path(remote_path).name
dest = generate_local_path(host, conf_path)
ansible_arg = f'src={remote_path} dest={dest} flat=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'fetch', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Downloading {remote_path} from {host_1} to {dest}')
return dest
def download_conf_file(host, container, conf_path, relative_to=CONF_RELATIVE):
src = generate_remote_path(container, conf_path, relative_to)
return download_file(host, src, conf_path)
def create_conf_file(host, conf_path):
dest = generate_local_path(host, conf_path)
dest.parent.mkdir(parents=True, exist_ok=True)
dest.touch()
return dest
def _to_backup(conf):
return conf.parent / (conf.name + '.orig')
def make_backup(conf, quiet=False):
org = _to_backup(conf)
if not quiet:
logger.info(f'Copy {conf} {org}')
shutil.copy2(conf, org)
def make_metainfo(local_path, container, conf_path, relative_to=CONF_RELATIVE):
params = {
'container': container,
'container_path': conf_path,
'remote_path':
str(generate_remote_path(container, conf_path, relative_to)),
'version': list(local_path.parts)[-2],
}
with (local_path.parent / META_YML).open(mode='w') as f:
yaml.safe_dump(params, stream=f, default_flow_style=False)
def make_simple_metainfo(local_path, remote_path):
params = {
'remote_path': remote_path,
'version': list(local_path.parts)[-2],
}
with (local_path.parent / META_YML).open(mode='w') as f:
yaml.safe_dump(params, stream=f, default_flow_style=False)
def generate_edit_link(conf):
nb_conf = list(notebookapp.list_running_servers())[0]
p = (Path(nb_conf['base_url']) / 'edit' /
conf.absolute().relative_to(nb_conf['notebook_dir']))
return HTML(f'<a href={p} target="_blank">{p.name}</a>')
def show_diff(path_a, path_b):
lines_a = []
lines_b = []
with path_a.open() as f:
lines_a = f.readlines()
with path_b.open() as f:
lines_b = f.readlines()
diff = list(unified_diff(
lines_a, lines_b, fromfile=path_a.name, tofile=path_b.name))
sys.stdout.writelines(diff)
return len(diff)
def upload_conf_file(src, host, container, conf_path,
relative_to=CONF_RELATIVE):
dest = generate_remote_path(container, conf_path, relative_to)
ansible_arg = f'mkdir -p {dest.parent}'
subprocess.run(
['ansible', host, '-a', ansible_arg])
ansible_arg = f'dest={dest} src={src} backup=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Uploading {dest} from {src} to {host_1}')
def restart_container(host, container):
cmd = f'chdir={MOODLE_DIR} docker-compose restart {container}'
logger.info(f'Restart container {container}')
subprocess.check_call(['ansible', host, '-a', cmd])
def fetch_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, create=False):
local_path = download_conf_file(host, container, conf_path, relative_to)
make_backup(local_path)
make_metainfo(local_path, container, conf_path, relative_to)
return generate_edit_link(local_path)
def create_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, create=False):
local_path = create_conf_file(host, conf_path)
make_backup(local_path, quiet=True)
make_metainfo(local_path, container, conf_path, relative_to)
return generate_edit_link(local_path)
def apply_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None, restart=True):
diff = show_local_conf_diff(host, container, conf_path, version)
local_path = get_local_path(host, container, conf_path, version)
upload_conf_file(local_path, host, container, conf_path, relative_to)
if restart:
restart_container(host, container)
def revert_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
local_path = get_local_path(host, container, conf_path, version)
backup_path = _to_backup(local_path)
show_diff(local_path, backup_path)
upload_conf_file(backup_path, host, container, conf_path, relative_to)
restart_container(host, container)
local_path.rename(local_path.parent / (local_path.name + '.revert'))
def show_local_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
conf = get_local_path(host, container, conf_path, version)
with conf.open() as f:
print(f.read())
def edit_local_conf(host, container, conf_path,
relative_to=CONF_RELATIVE, version=None):
conf = get_local_path(host, container, conf_path, version)
return generate_edit_link(conf)
def show_local_conf_diff(host, container, conf_path, version=None):
local_path = get_local_path(host, container, conf_path, version)
show_diff(_to_backup(local_path), local_path)
def save_shibboleth_part(conf_path):
with conf_path.open() as f:
data = yaml.safe_load(f)
params = {}
if 'shibboleth' in data['services']:
params['shibboleth_container'] = yaml.safe_dump(
data['services']['shibboleth'])
vars_path = conf_path.parent / 'extra_vars.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
return vars_path
def init_shibboleth_part(conf_dir, hostname, volumes):
shibboleth_volumes = ['/sys/fs/cgroup:/sys/fs/cgroup']
shibboleth_volumes.extend(volumes)
params = {
'shibboleth_container': yaml.safe_dump({
'image': 'harbor.vcloud.nii.ac.jp/vcp/moodle:shibboleth-3.0.4',
'privileged': True,
'ports': ['443:443'],
'volumes': shibboleth_volumes,
'container_name': 'shibboleth',
'hostname': hostname,
}),
}
vars_path = conf_dir / 'shibboleth.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
return vars_path
def setup_shibboleth_part(local_path, **params):
if params is None or len(params) == 0:
return save_shibboleth_part(local_path)
else:
return init_shibboleth_part(local_path.parent, **params)
def generate_docker_compose(host, conf_path, extra_vars, extra_vars_file):
template = 'template/docker/compose/docker-compose.yml'
ansible_arg = f'src={template} dest={conf_path.parent}/'
env = dict([(x, os.environ[x]) for x in ENV_INHERIT])
args = ['ansible', host, '-m', 'template', '-c', 'local',
'-a', ansible_arg]
for k, v in extra_vars.items():
args.extend(['-e', f'{k}={v}'])
for x in extra_vars_file:
args.extend(['-e', f'@{str(x)}'])
subprocess.run(args=args, env=env, check=True)
def update_docker_compose(host, extra_vars={}, shibboleth_params={}):
remote_path = MOODLE_DIR + '/docker-compose.yml'
local_path = download_file(host, remote_path)
make_backup(local_path)
make_simple_metainfo(local_path, remote_path)
shibboleth_vars = setup_shibboleth_part(local_path, **shibboleth_params)
generate_docker_compose(host, local_path, extra_vars, [shibboleth_vars])
show_diff(_to_backup(local_path), local_path)
return generate_edit_link(local_path)
def append_shibboleth_container(host, moodle_url, volumes=[], extra_vars={}):
hostname = urlparse(moodle_url).netloc
return update_docker_compose(
host, extra_vars,
shibboleth_params={'hostname': hostname, 'volumes': volumes},
)
def upload_docker_compose(host, version=None, apply=False):
remote_path = MOODLE_DIR + '/docker-compose.yml'
if version is None:
version = find_latest_version_by_remote_path(host, remote_path)
local_path = (
Path(WORKDIR).absolute() / host / version / 'docker-compose.yml')
ansible_arg = f'dest={remote_path} src={local_path} backup=yes'
out = subprocess.check_output(
['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg])
host_1 = out.decode('utf-8').split("\n")[0].split()[0]
logger.info(f'Uploading {remote_path} from {local_path} to {host_1}')
if not apply:
return
ansible_arg = f'chdir=/opt/moodle docker-compose up -d --remove-orphans'
args = ['ansible', host, '-a', ansible_arg]
logger.info('Apply the changes in docker-compose.yml.')
subprocess.run(args=args, check=True)
def generate_proxy_conf(host, conf_path, extra_vars):
template = 'template/docker/compose/moodle-proxy.conf.template'
ansible_arg = f'src={template} dest={conf_path.parent}/moodle-proxy.conf'
env = dict([(x, os.environ[x]) for x in ENV_INHERIT])
args = [
'ansible', host, '-m', 'template', '-c', 'local', '-a', ansible_arg]
for k, v in extra_vars.items():
args.extend(['-e', f'{k}={v}'])
subprocess.run(args=args, env=env, check=True)
def update_proxy_conf(host, extra_vars={}):
conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf')
container = 'proxy'
link = fetch_conf(host, container, str(conf_path), str(conf_path.parent))
version = find_latest_version(host, container, str(conf_path))
local_path = generate_local_path(host, conf_path, version)
generate_proxy_conf(host, local_path, extra_vars)
show_local_conf_diff(host, container, conf_path, version)
return link
def apply_proxy_conf(host, version=None, restart=True):
conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf')
apply_conf(host, 'proxy', str(conf_path), str(conf_path.parent),
version, restart)
| none | 1 | 2.001923 | 2 |
|
other/minimum_edit_distance.py | newvicklee/nlp_algorithms | 0 | 695 | """
Minimum edit distance computes the cost it takes to get from one string to another string.
This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions.
Resource: https://en.wikipedia.org/wiki/Edit_distance
For example, getting from "intention" to "execution" is a cost of 8.
minimum_edit_distance("intention", "execution")
# 8
"""
def minimum_edit_distance(source, target):
n = len(source)
m = len(target)
D = {}
# Initialization
for i in range(0, n+1):
D[i,0] = i
for j in range(0, m+1):
D[0,j] = j
for i in range(1, n+1):
for j in range(1, m+1):
if source[i-1] == target[j-1]:
D[i,j] = D[i-1, j-1]
else:
D[i,j] = min(
D[i-1, j] + 1,
D[i, j-1] + 1,
D[i-1, j-1] + 2
)
return D[n-1, m-1]
| """
Minimum edit distance computes the cost it takes to get from one string to another string.
This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions.
Resource: https://en.wikipedia.org/wiki/Edit_distance
For example, getting from "intention" to "execution" is a cost of 8.
minimum_edit_distance("intention", "execution")
# 8
"""
def minimum_edit_distance(source, target):
n = len(source)
m = len(target)
D = {}
# Initialization
for i in range(0, n+1):
D[i,0] = i
for j in range(0, m+1):
D[0,j] = j
for i in range(1, n+1):
for j in range(1, m+1):
if source[i-1] == target[j-1]:
D[i,j] = D[i-1, j-1]
else:
D[i,j] = min(
D[i-1, j] + 1,
D[i, j-1] + 1,
D[i-1, j-1] + 2
)
return D[n-1, m-1]
| en | 0.867158 | Minimum edit distance computes the cost it takes to get from one string to another string. This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions. Resource: https://en.wikipedia.org/wiki/Edit_distance For example, getting from "intention" to "execution" is a cost of 8. minimum_edit_distance("intention", "execution") # 8 # Initialization | 3.882446 | 4 |
varifier/dnadiff.py | iqbal-lab-org/varifier | 11 | 696 | from operator import attrgetter
import logging
import os
import shutil
import subprocess
import pyfastaq
import pymummer
from cluster_vcf_records import vcf_record
from varifier import utils
# We only want the .snps file from the dnadiff script from MUMmer. From reading
# the docs inspecting that script, we need to run these commands:
#
# nucmer --maxmatch --delta out.delta ref.fasta query.fasta
# delta-filter -1 out.delta > out.1delta
# show-snps -rlTHC out.1delta > out.snps
#
# This is instead of just running show-snps, which runs several other commands
# in addition to making the snps file.
def _run_dnadiff_one_split(ref_fasta, query_fasta, outfile, threads=1, maxmatch=True):
delta = f"{outfile}.tmp.delta"
delta_1 = f"{outfile}.tmp.1delta"
subprocess.check_output(f"rm -f {delta} {delta_1}", shell=True)
maxmatch_opt = "--maxmatch" if maxmatch else ""
commands = [
f"nucmer --threads {threads} {maxmatch_opt} --delta {delta} {ref_fasta} {query_fasta}",
f"delta-filter -1 {delta} > {delta_1}",
f"show-snps -rlTHC {delta_1} > {outfile}",
]
for command in commands:
logging.info("Start run command: " + command)
subprocess.check_output(command, shell=True)
logging.info("Finish run command: " + command)
os.unlink(delta)
os.unlink(delta_1)
def _run_dnadiff(
ref_fasta,
query_fasta,
outfile,
split_query=False,
debug=False,
threads=1,
maxmatch=True,
):
if not split_query:
_run_dnadiff_one_split(
ref_fasta, query_fasta, outfile, threads=threads, maxmatch=maxmatch
)
else:
tmp_snp_files = []
seq_reader = pyfastaq.sequences.file_reader(query_fasta)
for seq in seq_reader:
prefix = f"{outfile}.tmp.split.{len(tmp_snp_files)}"
tmp_fasta = f"{prefix}.fasta"
with open(tmp_fasta, "w") as f:
print(seq, file=f)
snp_file = f"{prefix}.snps"
_run_dnadiff_one_split(
ref_fasta, tmp_fasta, snp_file, threads=threads, maxmatch=maxmatch
)
os.unlink(tmp_fasta)
tmp_snp_files.append(snp_file)
with open(outfile, "wb") as f_out:
for snp_file in tmp_snp_files:
with open(snp_file, "rb") as f_in:
shutil.copyfileobj(f_in, f_out)
if not debug:
os.unlink(snp_file)
def _snps_file_to_vcf(snps_file, query_fasta, outfile):
"""Loads the .snps file made by dnadiff.
query_fasta = fasta file of query sequences.
Writes a new VCF file unmerged records."""
vcf_records = {}
variants = pymummer.snp_file.get_all_variants(snps_file)
query_seqs = utils.file_to_dict_of_seqs(query_fasta)
for variant in variants:
# If the variant is reversed, it means that either the ref or query had to be
# reverse complemented when aligned by mummer. Need to do the appropriate
# reverse (complement) fixes so the VCF has the correct REF and ALT sequences
if variant.reverse:
qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base)
qry_seq.revcomp()
variant.qry_base = "".join(reversed(qry_seq.seq))
ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base)
ref_seq.revcomp()
variant.ref_base = ref_seq.seq
if variant.var_type == pymummer.variant.SNP:
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
variant.qry_base,
variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_SNP",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.DEL:
# The query has sequence missing, compared to the
# reference. We're making VCF records w.r.t. the
# query, so this is an insertion. So need to
# get the nucleotide before the insertion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
query_seqs[variant.qry_name][variant.qry_start],
query_seqs[variant.qry_name][variant.qry_start]
+ variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_INS",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.INS:
# The ref has sequence missing, compared to the
# query. We're making VCF records w.r.t. the
# query, so this is a deletion. So need to
# get the nucleotide before the deletion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start),
".",
query_seqs[variant.qry_name][variant.qry_start - 1]
+ variant.qry_base,
query_seqs[variant.qry_name][variant.qry_start - 1],
".",
".",
"SVTYPE=DNADIFF_DEL",
"GT",
"1/1",
]
)
)
else:
raise Exception("Unknown variant type: " + str(variant))
assert (
new_record.REF
== query_seqs[new_record.CHROM][
new_record.POS : new_record.POS + len(new_record.REF)
]
)
if new_record.CHROM not in vcf_records:
vcf_records[new_record.CHROM] = []
vcf_records[new_record.CHROM].append(new_record)
for vcf_list in vcf_records.values():
vcf_list.sort(key=attrgetter("POS"))
with open(outfile, "w") as f:
print("##fileformat=VCFv4.2", file=f)
for seq in query_seqs.values():
print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f)
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f)
for key, vcf_list in sorted(vcf_records.items()):
for record in vcf_list:
print(record, file=f)
def make_truth_vcf(
ref_fasta,
truth_fasta,
outfile,
debug=False,
split_ref=False,
threads=1,
maxmatch=True,
):
snps_file = f"{outfile}.tmp.snps"
_run_dnadiff(
truth_fasta,
ref_fasta,
snps_file,
split_query=split_ref,
debug=debug,
threads=threads,
maxmatch=maxmatch,
)
_snps_file_to_vcf(snps_file, ref_fasta, outfile)
if not debug:
os.unlink(snps_file)
| from operator import attrgetter
import logging
import os
import shutil
import subprocess
import pyfastaq
import pymummer
from cluster_vcf_records import vcf_record
from varifier import utils
# We only want the .snps file from the dnadiff script from MUMmer. From reading
# the docs inspecting that script, we need to run these commands:
#
# nucmer --maxmatch --delta out.delta ref.fasta query.fasta
# delta-filter -1 out.delta > out.1delta
# show-snps -rlTHC out.1delta > out.snps
#
# This is instead of just running show-snps, which runs several other commands
# in addition to making the snps file.
def _run_dnadiff_one_split(ref_fasta, query_fasta, outfile, threads=1, maxmatch=True):
delta = f"{outfile}.tmp.delta"
delta_1 = f"{outfile}.tmp.1delta"
subprocess.check_output(f"rm -f {delta} {delta_1}", shell=True)
maxmatch_opt = "--maxmatch" if maxmatch else ""
commands = [
f"nucmer --threads {threads} {maxmatch_opt} --delta {delta} {ref_fasta} {query_fasta}",
f"delta-filter -1 {delta} > {delta_1}",
f"show-snps -rlTHC {delta_1} > {outfile}",
]
for command in commands:
logging.info("Start run command: " + command)
subprocess.check_output(command, shell=True)
logging.info("Finish run command: " + command)
os.unlink(delta)
os.unlink(delta_1)
def _run_dnadiff(
ref_fasta,
query_fasta,
outfile,
split_query=False,
debug=False,
threads=1,
maxmatch=True,
):
if not split_query:
_run_dnadiff_one_split(
ref_fasta, query_fasta, outfile, threads=threads, maxmatch=maxmatch
)
else:
tmp_snp_files = []
seq_reader = pyfastaq.sequences.file_reader(query_fasta)
for seq in seq_reader:
prefix = f"{outfile}.tmp.split.{len(tmp_snp_files)}"
tmp_fasta = f"{prefix}.fasta"
with open(tmp_fasta, "w") as f:
print(seq, file=f)
snp_file = f"{prefix}.snps"
_run_dnadiff_one_split(
ref_fasta, tmp_fasta, snp_file, threads=threads, maxmatch=maxmatch
)
os.unlink(tmp_fasta)
tmp_snp_files.append(snp_file)
with open(outfile, "wb") as f_out:
for snp_file in tmp_snp_files:
with open(snp_file, "rb") as f_in:
shutil.copyfileobj(f_in, f_out)
if not debug:
os.unlink(snp_file)
def _snps_file_to_vcf(snps_file, query_fasta, outfile):
"""Loads the .snps file made by dnadiff.
query_fasta = fasta file of query sequences.
Writes a new VCF file unmerged records."""
vcf_records = {}
variants = pymummer.snp_file.get_all_variants(snps_file)
query_seqs = utils.file_to_dict_of_seqs(query_fasta)
for variant in variants:
# If the variant is reversed, it means that either the ref or query had to be
# reverse complemented when aligned by mummer. Need to do the appropriate
# reverse (complement) fixes so the VCF has the correct REF and ALT sequences
if variant.reverse:
qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base)
qry_seq.revcomp()
variant.qry_base = "".join(reversed(qry_seq.seq))
ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base)
ref_seq.revcomp()
variant.ref_base = ref_seq.seq
if variant.var_type == pymummer.variant.SNP:
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
variant.qry_base,
variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_SNP",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.DEL:
# The query has sequence missing, compared to the
# reference. We're making VCF records w.r.t. the
# query, so this is an insertion. So need to
# get the nucleotide before the insertion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
query_seqs[variant.qry_name][variant.qry_start],
query_seqs[variant.qry_name][variant.qry_start]
+ variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_INS",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.INS:
# The ref has sequence missing, compared to the
# query. We're making VCF records w.r.t. the
# query, so this is a deletion. So need to
# get the nucleotide before the deletion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start),
".",
query_seqs[variant.qry_name][variant.qry_start - 1]
+ variant.qry_base,
query_seqs[variant.qry_name][variant.qry_start - 1],
".",
".",
"SVTYPE=DNADIFF_DEL",
"GT",
"1/1",
]
)
)
else:
raise Exception("Unknown variant type: " + str(variant))
assert (
new_record.REF
== query_seqs[new_record.CHROM][
new_record.POS : new_record.POS + len(new_record.REF)
]
)
if new_record.CHROM not in vcf_records:
vcf_records[new_record.CHROM] = []
vcf_records[new_record.CHROM].append(new_record)
for vcf_list in vcf_records.values():
vcf_list.sort(key=attrgetter("POS"))
with open(outfile, "w") as f:
print("##fileformat=VCFv4.2", file=f)
for seq in query_seqs.values():
print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f)
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f)
for key, vcf_list in sorted(vcf_records.items()):
for record in vcf_list:
print(record, file=f)
def make_truth_vcf(
ref_fasta,
truth_fasta,
outfile,
debug=False,
split_ref=False,
threads=1,
maxmatch=True,
):
snps_file = f"{outfile}.tmp.snps"
_run_dnadiff(
truth_fasta,
ref_fasta,
snps_file,
split_query=split_ref,
debug=debug,
threads=threads,
maxmatch=maxmatch,
)
_snps_file_to_vcf(snps_file, ref_fasta, outfile)
if not debug:
os.unlink(snps_file)
| en | 0.919413 | # We only want the .snps file from the dnadiff script from MUMmer. From reading # the docs inspecting that script, we need to run these commands: # # nucmer --maxmatch --delta out.delta ref.fasta query.fasta # delta-filter -1 out.delta > out.1delta # show-snps -rlTHC out.1delta > out.snps # # This is instead of just running show-snps, which runs several other commands # in addition to making the snps file. Loads the .snps file made by dnadiff. query_fasta = fasta file of query sequences. Writes a new VCF file unmerged records. # If the variant is reversed, it means that either the ref or query had to be # reverse complemented when aligned by mummer. Need to do the appropriate # reverse (complement) fixes so the VCF has the correct REF and ALT sequences # The query has sequence missing, compared to the # reference. We're making VCF records w.r.t. the # query, so this is an insertion. So need to # get the nucleotide before the insertion as well. # The ref has sequence missing, compared to the # query. We're making VCF records w.r.t. the # query, so this is a deletion. So need to # get the nucleotide before the deletion as well. #fileformat=VCFv4.2", file=f) #contig=<ID={seq.id},length={len(seq)}>", file=f) | 2.425909 | 2 |
modules/models.py | sbj-ss/github-watcher | 0 | 697 | <gh_stars>0
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Type
@dataclass(frozen=True)
class StatsBaseModel:
"""Base model for various reports"""
@classmethod
def key(cls: Type) -> str:
name = cls.__name__
return name[0].lower() + name[1:]
def to_table(self) -> List[str]:
raise NotImplementedError
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@dataclass(frozen=True)
class Contributor:
name: str
commit_count: int
@dataclass(frozen=True)
class ContributorStats(StatsBaseModel):
contributors: List[Contributor]
def to_table(self) -> List[str]:
return [
'Most active contributors:',
'-------------------------',
'Name' + (' ' * 20) + 'Commits',
] + [f'{c.name.ljust(24)}{c.commit_count}' for c in self.contributors]
@dataclass(frozen=True)
class PullRequestStats(StatsBaseModel):
open_count: int
closed_count: int
old_count: int
def to_table(self) -> List[str]:
return [
'Pull requests:',
'--------------',
'Open Closed Old',
f'{str(self.open_count).ljust(8)}{str(self.closed_count).ljust(8)}{str(self.old_count).ljust(8)}'
]
@dataclass(frozen=True)
class IssueStats(StatsBaseModel):
open_count: int
closed_count: int
old_count: int
def to_table(self) -> List[str]:
return [
'Issues:',
'-------',
'Open Closed Old',
f'{str(self.open_count).ljust(8)}{str(self.closed_count).ljust(8)}{str(self.old_count).ljust(8)}'
]
| from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Type
@dataclass(frozen=True)
class StatsBaseModel:
"""Base model for various reports"""
@classmethod
def key(cls: Type) -> str:
name = cls.__name__
return name[0].lower() + name[1:]
def to_table(self) -> List[str]:
raise NotImplementedError
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@dataclass(frozen=True)
class Contributor:
name: str
commit_count: int
@dataclass(frozen=True)
class ContributorStats(StatsBaseModel):
contributors: List[Contributor]
def to_table(self) -> List[str]:
return [
'Most active contributors:',
'-------------------------',
'Name' + (' ' * 20) + 'Commits',
] + [f'{c.name.ljust(24)}{c.commit_count}' for c in self.contributors]
@dataclass(frozen=True)
class PullRequestStats(StatsBaseModel):
open_count: int
closed_count: int
old_count: int
def to_table(self) -> List[str]:
return [
'Pull requests:',
'--------------',
'Open Closed Old',
f'{str(self.open_count).ljust(8)}{str(self.closed_count).ljust(8)}{str(self.old_count).ljust(8)}'
]
@dataclass(frozen=True)
class IssueStats(StatsBaseModel):
open_count: int
closed_count: int
old_count: int
def to_table(self) -> List[str]:
return [
'Issues:',
'-------',
'Open Closed Old',
f'{str(self.open_count).ljust(8)}{str(self.closed_count).ljust(8)}{str(self.old_count).ljust(8)}'
] | en | 0.950531 | Base model for various reports | 2.83256 | 3 |
queryfilter/datetimefilter.py | iCHEF/queryfilter | 4 | 698 | from __future__ import absolute_import
import datetime
from dateutil import parser
import pytz
from .base import FieldFilter, DictFilterMixin, DjangoQueryFilterMixin
from .queryfilter import QueryFilter
WHOLE_DAY = datetime.timedelta(days=1)
ONE_SECOND = datetime.timedelta(seconds=1)
@QueryFilter.register_type_condition('datetime')
class DatetimeRangeFilter(DjangoQueryFilterMixin, DictFilterMixin,
FieldFilter):
@property
def start(self):
return get_start(self.filter_args.get("start"))
@property
def end(self):
end_datetime = get_end(self.filter_args.get("end"))
if not end_datetime:
return None
if _has_no_time_info(end_datetime):
end_datetime = end_datetime + WHOLE_DAY - ONE_SECOND
return end_datetime
def on_dicts(self, dicts):
def in_range(datum):
datetime_string = self.get(datum, self.field_name)
if isinstance(datetime_string, datetime.datetime):
to_compare = datetime_string
else:
to_compare = parse(datetime_string)
if not self.start and not self.end:
return False
if self.start and (to_compare < self.start):
return False
if self.end and (self.end < to_compare):
return False
return True
return list(filter(in_range, dicts))
@property
def query_params(self):
if not any((self.start, self.end)):
return None
query_params = dict()
if self.start:
query_params["{}__gte".format(self.field_name)] = self.start
if self.end:
query_params["{}__lte".format(self.field_name)] = self.end
return query_params
def _do_django_query(self, queryset):
query_params = self.query_params
if query_params:
return queryset.filter(**query_params)
else:
return queryset.none()
min_datetime = datetime.datetime.min.replace(tzinfo=pytz.utc)
max_datetime = datetime.datetime.max.replace(tzinfo=pytz.utc)
def get_start(start_date_str):
if not start_date_str:
return None
return parse(start_date_str)
def get_end(end_date_str):
if not end_date_str:
return None
return parse(end_date_str)
def parse(datetime_string):
return make_time_aware(parser.parse(datetime_string))
def make_time_aware(datetime_data):
if not datetime_data.tzinfo:
datetime_data = datetime_data.replace(tzinfo=pytz.utc)
return datetime_data
def _has_no_time_info(value):
return value.hour == 0 and \
value.minute == 0 and \
value.second == 0 and \
value.microsecond == 0
| from __future__ import absolute_import
import datetime
from dateutil import parser
import pytz
from .base import FieldFilter, DictFilterMixin, DjangoQueryFilterMixin
from .queryfilter import QueryFilter
WHOLE_DAY = datetime.timedelta(days=1)
ONE_SECOND = datetime.timedelta(seconds=1)
@QueryFilter.register_type_condition('datetime')
class DatetimeRangeFilter(DjangoQueryFilterMixin, DictFilterMixin,
FieldFilter):
@property
def start(self):
return get_start(self.filter_args.get("start"))
@property
def end(self):
end_datetime = get_end(self.filter_args.get("end"))
if not end_datetime:
return None
if _has_no_time_info(end_datetime):
end_datetime = end_datetime + WHOLE_DAY - ONE_SECOND
return end_datetime
def on_dicts(self, dicts):
def in_range(datum):
datetime_string = self.get(datum, self.field_name)
if isinstance(datetime_string, datetime.datetime):
to_compare = datetime_string
else:
to_compare = parse(datetime_string)
if not self.start and not self.end:
return False
if self.start and (to_compare < self.start):
return False
if self.end and (self.end < to_compare):
return False
return True
return list(filter(in_range, dicts))
@property
def query_params(self):
if not any((self.start, self.end)):
return None
query_params = dict()
if self.start:
query_params["{}__gte".format(self.field_name)] = self.start
if self.end:
query_params["{}__lte".format(self.field_name)] = self.end
return query_params
def _do_django_query(self, queryset):
query_params = self.query_params
if query_params:
return queryset.filter(**query_params)
else:
return queryset.none()
min_datetime = datetime.datetime.min.replace(tzinfo=pytz.utc)
max_datetime = datetime.datetime.max.replace(tzinfo=pytz.utc)
def get_start(start_date_str):
if not start_date_str:
return None
return parse(start_date_str)
def get_end(end_date_str):
if not end_date_str:
return None
return parse(end_date_str)
def parse(datetime_string):
return make_time_aware(parser.parse(datetime_string))
def make_time_aware(datetime_data):
if not datetime_data.tzinfo:
datetime_data = datetime_data.replace(tzinfo=pytz.utc)
return datetime_data
def _has_no_time_info(value):
return value.hour == 0 and \
value.minute == 0 and \
value.second == 0 and \
value.microsecond == 0
| none | 1 | 2.30712 | 2 |
|
built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py | Huawei-Ascend/modelzoo | 12 | 699 | <reponame>Huawei-Ascend/modelzoo<filename>built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Import all torch operators."""
import torch.nn.functional as F
import torch.nn as nn
import torch
from vega.search_space.networks.network_factory import NetworkFactory
from vega.search_space.networks.net_utils import NetTypes
from vega.search_space.networks.pytorch.utils.anchor_utils.anchor_target import AnchorTarget
from vega.search_space.networks.pytorch.utils.bbox_utils.anchor_generator import AnchorGenerator
from vega.core.common.config import Config
from functools import partial
import numpy as np
from six.moves import map, zip
from vega.search_space.networks.pytorch.losses.reduce_loss import weighted_loss
@NetworkFactory.register(NetTypes.Operator)
class RpnClsLossInput(nn.Module):
"""Rpn input."""
def __init__(self):
super(RpnClsLossInput, self).__init__()
def forward(self, x):
"""Get cls score and bbox preds."""
cls_scores = x[0]
bbox_preds = x[1]
return cls_scores, bbox_preds
@NetworkFactory.register(NetTypes.Operator)
class RpnLossInput(nn.Module):
"""Rpn loss input."""
def __init__(self):
super(RpnLossInput, self).__init__()
def forward(self, x):
"""Get cls score."""
cls_scores = x[2][0]
bbox_preds = x[2][1]
gt_bboxes = x[0]['gt_bboxes'].cuda()
img_metas = [x[0]['img_meta']]
gt_bboxes_ignore = x[0]['gt_bboxes_ignore'].cuda()
return cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore
@NetworkFactory.register(NetTypes.Operator)
class AnchorTargetOp(nn.Module):
"""Anchor Target."""
def __init__(self, target_means=None, target_stds=None, num_classes=2, use_sigmoid_cls=False, cfg=None,
sampling=True):
self.target_means = target_means or (.0, .0, .0, .0)
self.target_stds = target_stds or (1.0, 1.0, 1.0, 1.0)
self.label_channels = num_classes if use_sigmoid_cls else 1
self.cfg = Config({'assigner': {'name': 'MaxIoUAllNegAssigner', 'pos_iou_thr': 0.7,
'neg_iou_thr': tuple([-1, 0.3]), 'min_pos_iou': 0.3, 'ignore_iof_thr': 0.5},
'sampler': {'name': 'RandomSampler', 'num': 256, 'pos_fraction': 0.5, 'neg_pos_ub': -1,
'add_gt_as_proposals': False}, 'allowed_border': 0, 'pos_weight': -1,
'debug': False})
self.sampling = sampling
super(AnchorTargetOp, self).__init__()
def forward(self, x):
"""Create X=(anchor_list,valid_flag_list,gt_bboxes,img_metas,)."""
anchor_list, valid_flag_list, original_anchors, gt_bboxes, img_metas, gt_bboxes_ignore = x
# out=(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos,num_total_neg).
return AnchorTarget(anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means,
self.target_stds,
self.cfg, gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=None,
label_channels=self.label_channels,
sampling=self.sampling)
@NetworkFactory.register(NetTypes.Operator)
class Anchors(nn.Module):
"""Get anchors according to feature map sizes."""
def __init__(self, anchor_base_sizes_cfg=None, anchor_scales=None, anchor_ratios=None, anchor_strides=None):
self.anchor_base_sizes_cfg = anchor_base_sizes_cfg
self.anchor_scales = anchor_scales or [8, 16, 32]
self.anchor_ratios = anchor_ratios or [0.5, 1.0, 2.0]
self.anchor_strides = anchor_strides or [4, 8, 16, 32, 64]
self.anchor_base_sizes = list(
self.anchor_strides) if self.anchor_base_sizes_cfg is None else self.anchor_base_sizes_cfg
super(Anchors, self).__init__()
def forward(self, x):
"""Create anchor."""
cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore = x
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
anchor_generators = []
for anchor_base in self.anchor_base_sizes:
anchor_generators.append(AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios))
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
multi_level_anchors = []
for i in range(num_levels):
anchors = anchor_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i])
multi_level_anchors.append(anchors)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = anchor_generators[i].valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w))
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list, multi_level_anchors, gt_bboxes, img_metas, gt_bboxes_ignore
def multi_apply(func, *args, **kwargs):
"""Multi apply.
:param func: function
:param args: args of function
:return: result
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
@NetworkFactory.register(NetTypes.Operator)
class RpnClsLoss(nn.Module):
"""Rpn Class Loss."""
def __init__(self, out_channels=2):
super(RpnClsLoss, self).__init__()
self.loss_cls = CustomCrossEntropyLoss()
self.loss_bbox = CustomSmoothL1Loss()
self.out_channels = out_channels
def forward(self, x):
"""Get x."""
(cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_pos, num_total_neg,
num_total_samples) = x
losses_cls, losses_bbox = multi_apply(self.loss, cls_score, bbox_pred, labels, label_weights, bbox_targets,
bbox_weights, num_total_samples=num_total_samples)
return losses_cls, losses_bbox
def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples):
"""Get loss."""
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.out_channels)
loss_cls = self.loss_cls(cls_score, labels, label_weights, avg_factor=num_total_samples)
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
loss_bbox = self.loss_bbox(bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples)
return loss_cls, loss_bbox
@NetworkFactory.register(NetTypes.Operator)
class CustomCrossEntropyLoss(nn.Module):
"""Cross Entropy Loss."""
def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean',
loss_weight=1.0):
"""Init Cross Entropy loss.
:param desc: config dict
"""
super(CustomCrossEntropyLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
if self.use_sigmoid:
self.loss_function = binary_cross_entropy
elif self.use_mask:
self.loss_function = mask_cross_entropy
else:
self.loss_function = cross_entropy
def forward(self, cls_score, label, weight, avg_factor, reduction_override=None, **kwargs):
"""Forward compute."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction,
avg_factor=avg_factor, **kwargs)
return loss_cls
@NetworkFactory.register(NetTypes.Operator)
class CustomSmoothL1Loss(nn.Module):
"""Smooth L1 Loss."""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
"""Init smooth l1 loss."""
super(CustomSmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
"""Forward compute.
:param pred: predict
:param target: target
:param weight: weight
:param avg_factor: avg factor
:param reduction_override: reduce override
:return: loss
"""
reduction = (
reduction_override if reduction_override else self.reduction)
if target.numel() > 0:
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
else:
return torch.FloatTensor([0.0]).cuda()
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth l1 loss.
:param pred: predict
:param target: target
:param beta: beta
:return: loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta)
return loss
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
"""Cross entropy losses.
:param pred: predict result
:param label: gt label
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
loss = F.cross_entropy(pred, label, reduction='none')
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
"""Expand binary labels.
:param labels: labels
:param label_weights: label weights
:param label_channels: label channels
:return: binary label and label weights
"""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
"""Binary cross entropy loss.
:param pred: predict result
:param label: gt label
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight, reduction='none')
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None):
"""Mask cross entropy loss.
:param pred: predict result
:param target: target
:param label: gt label
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None]
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Weight reduce loss.
:param loss: losses
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
if reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def reduce_loss(loss, reduction):
"""Reduce loss compute.
:param loss: losses
:param reduction: reduce funtion
:return: loss
"""
reduction_function = F._Reduction.get_enum(reduction)
if reduction_function == 0:
return loss
elif reduction_function == 1:
return loss.mean()
elif reduction_function == 2:
return loss.sum()
| # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Import all torch operators."""
import torch.nn.functional as F
import torch.nn as nn
import torch
from vega.search_space.networks.network_factory import NetworkFactory
from vega.search_space.networks.net_utils import NetTypes
from vega.search_space.networks.pytorch.utils.anchor_utils.anchor_target import AnchorTarget
from vega.search_space.networks.pytorch.utils.bbox_utils.anchor_generator import AnchorGenerator
from vega.core.common.config import Config
from functools import partial
import numpy as np
from six.moves import map, zip
from vega.search_space.networks.pytorch.losses.reduce_loss import weighted_loss
@NetworkFactory.register(NetTypes.Operator)
class RpnClsLossInput(nn.Module):
"""Rpn input."""
def __init__(self):
super(RpnClsLossInput, self).__init__()
def forward(self, x):
"""Get cls score and bbox preds."""
cls_scores = x[0]
bbox_preds = x[1]
return cls_scores, bbox_preds
@NetworkFactory.register(NetTypes.Operator)
class RpnLossInput(nn.Module):
"""Rpn loss input."""
def __init__(self):
super(RpnLossInput, self).__init__()
def forward(self, x):
"""Get cls score."""
cls_scores = x[2][0]
bbox_preds = x[2][1]
gt_bboxes = x[0]['gt_bboxes'].cuda()
img_metas = [x[0]['img_meta']]
gt_bboxes_ignore = x[0]['gt_bboxes_ignore'].cuda()
return cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore
@NetworkFactory.register(NetTypes.Operator)
class AnchorTargetOp(nn.Module):
"""Anchor Target."""
def __init__(self, target_means=None, target_stds=None, num_classes=2, use_sigmoid_cls=False, cfg=None,
sampling=True):
self.target_means = target_means or (.0, .0, .0, .0)
self.target_stds = target_stds or (1.0, 1.0, 1.0, 1.0)
self.label_channels = num_classes if use_sigmoid_cls else 1
self.cfg = Config({'assigner': {'name': 'MaxIoUAllNegAssigner', 'pos_iou_thr': 0.7,
'neg_iou_thr': tuple([-1, 0.3]), 'min_pos_iou': 0.3, 'ignore_iof_thr': 0.5},
'sampler': {'name': 'RandomSampler', 'num': 256, 'pos_fraction': 0.5, 'neg_pos_ub': -1,
'add_gt_as_proposals': False}, 'allowed_border': 0, 'pos_weight': -1,
'debug': False})
self.sampling = sampling
super(AnchorTargetOp, self).__init__()
def forward(self, x):
"""Create X=(anchor_list,valid_flag_list,gt_bboxes,img_metas,)."""
anchor_list, valid_flag_list, original_anchors, gt_bboxes, img_metas, gt_bboxes_ignore = x
# out=(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos,num_total_neg).
return AnchorTarget(anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means,
self.target_stds,
self.cfg, gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=None,
label_channels=self.label_channels,
sampling=self.sampling)
@NetworkFactory.register(NetTypes.Operator)
class Anchors(nn.Module):
"""Get anchors according to feature map sizes."""
def __init__(self, anchor_base_sizes_cfg=None, anchor_scales=None, anchor_ratios=None, anchor_strides=None):
self.anchor_base_sizes_cfg = anchor_base_sizes_cfg
self.anchor_scales = anchor_scales or [8, 16, 32]
self.anchor_ratios = anchor_ratios or [0.5, 1.0, 2.0]
self.anchor_strides = anchor_strides or [4, 8, 16, 32, 64]
self.anchor_base_sizes = list(
self.anchor_strides) if self.anchor_base_sizes_cfg is None else self.anchor_base_sizes_cfg
super(Anchors, self).__init__()
def forward(self, x):
"""Create anchor."""
cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore = x
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
anchor_generators = []
for anchor_base in self.anchor_base_sizes:
anchor_generators.append(AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios))
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
multi_level_anchors = []
for i in range(num_levels):
anchors = anchor_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i])
multi_level_anchors.append(anchors)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = anchor_generators[i].valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w))
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list, multi_level_anchors, gt_bboxes, img_metas, gt_bboxes_ignore
def multi_apply(func, *args, **kwargs):
"""Multi apply.
:param func: function
:param args: args of function
:return: result
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
@NetworkFactory.register(NetTypes.Operator)
class RpnClsLoss(nn.Module):
"""Rpn Class Loss."""
def __init__(self, out_channels=2):
super(RpnClsLoss, self).__init__()
self.loss_cls = CustomCrossEntropyLoss()
self.loss_bbox = CustomSmoothL1Loss()
self.out_channels = out_channels
def forward(self, x):
"""Get x."""
(cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_pos, num_total_neg,
num_total_samples) = x
losses_cls, losses_bbox = multi_apply(self.loss, cls_score, bbox_pred, labels, label_weights, bbox_targets,
bbox_weights, num_total_samples=num_total_samples)
return losses_cls, losses_bbox
def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples):
"""Get loss."""
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.out_channels)
loss_cls = self.loss_cls(cls_score, labels, label_weights, avg_factor=num_total_samples)
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
loss_bbox = self.loss_bbox(bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples)
return loss_cls, loss_bbox
@NetworkFactory.register(NetTypes.Operator)
class CustomCrossEntropyLoss(nn.Module):
"""Cross Entropy Loss."""
def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean',
loss_weight=1.0):
"""Init Cross Entropy loss.
:param desc: config dict
"""
super(CustomCrossEntropyLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
if self.use_sigmoid:
self.loss_function = binary_cross_entropy
elif self.use_mask:
self.loss_function = mask_cross_entropy
else:
self.loss_function = cross_entropy
def forward(self, cls_score, label, weight, avg_factor, reduction_override=None, **kwargs):
"""Forward compute."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction,
avg_factor=avg_factor, **kwargs)
return loss_cls
@NetworkFactory.register(NetTypes.Operator)
class CustomSmoothL1Loss(nn.Module):
"""Smooth L1 Loss."""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
"""Init smooth l1 loss."""
super(CustomSmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
"""Forward compute.
:param pred: predict
:param target: target
:param weight: weight
:param avg_factor: avg factor
:param reduction_override: reduce override
:return: loss
"""
reduction = (
reduction_override if reduction_override else self.reduction)
if target.numel() > 0:
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
else:
return torch.FloatTensor([0.0]).cuda()
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth l1 loss.
:param pred: predict
:param target: target
:param beta: beta
:return: loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta)
return loss
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
"""Cross entropy losses.
:param pred: predict result
:param label: gt label
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
loss = F.cross_entropy(pred, label, reduction='none')
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
"""Expand binary labels.
:param labels: labels
:param label_weights: label weights
:param label_channels: label channels
:return: binary label and label weights
"""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
"""Binary cross entropy loss.
:param pred: predict result
:param label: gt label
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight, reduction='none')
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None):
"""Mask cross entropy loss.
:param pred: predict result
:param target: target
:param label: gt label
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None]
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Weight reduce loss.
:param loss: losses
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
if reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def reduce_loss(loss, reduction):
"""Reduce loss compute.
:param loss: losses
:param reduction: reduce funtion
:return: loss
"""
reduction_function = F._Reduction.get_enum(reduction)
if reduction_function == 0:
return loss
elif reduction_function == 1:
return loss.mean()
elif reduction_function == 2:
return loss.sum() | en | 0.683285 | # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. Import all torch operators. Rpn input. Get cls score and bbox preds. Rpn loss input. Get cls score. Anchor Target. Create X=(anchor_list,valid_flag_list,gt_bboxes,img_metas,). # out=(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos,num_total_neg). Get anchors according to feature map sizes. Create anchor. Multi apply. :param func: function :param args: args of function :return: result Rpn Class Loss. Get x. Get loss. Cross Entropy Loss. Init Cross Entropy loss. :param desc: config dict Forward compute. Smooth L1 Loss. Init smooth l1 loss. Forward compute. :param pred: predict :param target: target :param weight: weight :param avg_factor: avg factor :param reduction_override: reduce override :return: loss Smooth l1 loss. :param pred: predict :param target: target :param beta: beta :return: loss Cross entropy losses. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss Expand binary labels. :param labels: labels :param label_weights: label weights :param label_channels: label channels :return: binary label and label weights Binary cross entropy loss. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss Mask cross entropy loss. :param pred: predict result :param target: target :param label: gt label :param reduction: reduce function :param avg_factor: avg factor :return: loss Weight reduce loss. :param loss: losses :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss Reduce loss compute. :param loss: losses :param reduction: reduce funtion :return: loss | 1.837565 | 2 |