content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
def get_key():
"""
Gets the private key used to access Transcriptic's services.
Returns
-------
str
"""
if TRANSCRIPTIC_KEY is not None:
return TRANSCRIPTIC_KEY
return os.environ['TRANSCRIPTIC_KEY'] | 76213bc4b2a07d24b2f183eaa0fca18dcfec508b | 2,700 |
from typing import Any
def linear_search_while(lst: list, value: Any) -> int:
"""Return the index of the first occurrence of value in lst, or return
-1 if value is not in lst.
>>> linear_search([2, 5, 1, -3], 5)
1
>>> linear_search([2, 4, 2], 2)
0
>>> linear_search([2, 5, 1, -3], 4)
-1
>>> linear_search([], 5)
-1
"""
i = 0 # The index of the next item in lst to examine.
# Keep going until we reach the end of lst or until we find value.
while i != len(lst) and lst[i] != value:
i = i + 1
# If we fell off the end of the list, we didn't find value.
if i == len(lst):
return -1
else:
return i | c90c39148b5c30fbb4f6732e322d03632ad63b39 | 2,701 |
def get_pagerduty_secret_name():
"""
Get name of the PagerDuty secret for currently used addon.
Returns:
string: name of the secret
"""
return config.DEPLOYMENT["addon_name"] + constants.MANAGED_PAGERDUTY_SECRET_SUFFIX | be731e1dcebc3f8a225e249def332abd0d8ea71b | 2,702 |
from typing import Dict
def check_docs(
doc_path: str, recurse: bool = True, max_threads: int = 10, delay: float = 0
) -> Dict[str, Dict[str, UrlResult]]:
"""
Check multiple HTML files in `doc_path`.
Parameters
----------
doc_path : str
Path
recurse: bool
If True, recurse subfolders, default is True
max_threads: int, optional
The maximum number of async threads to run
delay: float, optional
Seconds delay between requests
Returns
-------
Dict[str, Dict[str, UrlResult]]
Dictionary of pages checked. Results for each page
is a dictionary of checked links for the page.
"""
page_results: Dict[str, Dict[str, UrlResult]] = defaultdict(dict)
link_results: Dict[str, UrlResult] = {}
links_to_check = _get_links_from_files(doc_path, recurse)
print(f"Checking links {len(links_to_check)}...")
checked_links = check_uris(links_to_check, max_threads, delay)
print("\ndone")
for result in checked_links:
link_results[result.url] = result
src_pages = links_to_check[result.url]
for src_page in src_pages:
page_results[src_page][result.url] = result
_print_url_results(page_results)
return page_results | c3cc03a61f633143d03f8bfa1063242de8797bfa | 2,703 |
import os
def create_graph(filepath, nodes_data, legend=False):
"""Visualizes the energy system as graph.
Creates, using the library Graphviz, a graph containing all
components and connections from "nodes_data" and returns this as a
PNG file.
----
Keyword arguments:
filepath : obj:'str'
-- path, where the PNG-result shall be saved
nodes_data : obj:'dict'
-- dictionary containing data from excel scenario file.
legend : obj:'bool'
-- specifies, whether a legend will be added to the graph or
not
----
@ Christian Klemm - christian.klemm@fh-muenster.de, 14.04.2020
"""
def linebreaks(text):
"""Adds linebreaks a given string.
Function which adds a line break to strings every ten
characters. Up to four strings are added.
----
Keyword arguments:
text : obj:'str'
-- string to which line breaks will be added
----
@ Christian Klemm - christian.klemm@fh-muenster.de, 14.04.2020
"""
text_length = len(text)
if text_length > 10:
text = str(text[0:9] + "-\n" + text[9:])
if text_length > 20:
text = str(text[0:21] + "-\n" + text[21:])
if text_length > 30:
text = str(text[0:33] + "-\n" + text[33:])
if text_length > 40:
text = str(text[0:45] + "-\n" + text[45:])
return text
# Defines the location of Graphviz as path necessary for windows
os.environ["PATH"] += \
os.pathsep + 'C:\\Program Files (x86)\\Graphviz2.38\\bin'
# Creates the Directed-Graph
dot = Digraph(format='png')
# Creates a Legend if Legend = True
if legend:
component = ['Bus', 'Source', 'Sink', 'Transformer\nLinks', 'Storage']
shape = {'Bus': ['ellipse'], 'Source': ['trapezium'],
'Sink': ['invtrapezium'], 'Transformer\nLinks': ['box'],
'Storage': ['box']}
for i in component:
dot.node(i, shape=shape[i][0], fontsize="10", fixedsize='shape',
width='1.1', height='0.6',
style='dashed' if i == 'Storage' else '')
components = ["buses", "sources", "demand", "transformers", "storages",
"links"]
shapes = {'sources': ['trapezium'], 'demand': ['invtrapezium'],
'transformers': ['box'], 'storages': ['box'],
'links': ['box']}
bus = {'buses': ['label'], 'sources': ['output'], 'demand': ['input'],
'transformers': ['input'], 'storages': ['bus'], 'links': ['bus_1']}
for i in components:
for j, b in nodes_data[i].iterrows():
if b['active']:
# sets component label
label = b['label']
if i == 'buses':
if b['shortage']:
label = b['label'] + '_shortage'
elif b['excess']:
label = b['label'] + '_excess'
label = linebreaks(label)
if i != 'buses':
dot.node(label, shape=shapes[i][0], fontsize="10",
fixedsize='shape', width='1.1', height='0.6',
style='dashed' if i == 'storages' else '')
else:
if b['shortage']:
dot.node(label, shape='trapezium', fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
if b['excess'] and not b['shortage']:
dot.node(label, shape='invtrapezium', fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
# creates bus nodes
dot.node(b[bus[i][0]], shape='ellipse', fontsize="10")
if i == 'links':
dot.node(b['bus_2'], shape='ellipse')
# creates edges
if i == 'demand' or i == 'storages' or i == 'links' \
or (i == 'buses' and b['excess']
and not b['shortage']):
dot.edge(b[bus[i][0]], label)
if i == 'sources' or i == 'storages' \
or (i == 'buses' and b['shortage']):
dot.edge(label, b[bus[i][0]])
if i == 'links':
dot.edge(label, b['bus_2'])
if b['(un)directed'] == 'undirected':
dot.edge(b['bus_2'], label)
dot.edge(label, b['bus_1'])
elif i == 'transformers':
dot.node(b['output'], shape='ellipse', fontsize="10")
dot.edge(b[bus[i][0]], label)
dot.edge(label, b['output'])
if b['output2'] != "None":
dot.node(b['output2'], shape='ellipse', fontsize="10")
dot.edge(label, b['output2'])
if b['transformer type'] == "HeatPump":
# adds "_low_temp_source" to the label
low_temp_source = label + '_low_temp_source'
# Linebreaks, so that the labels fit the boxes
low_temp_source = linebreaks(low_temp_source)
# Adds a second input and a heat source (node and edge)
# for heat pumps
dot.node(label + '_low_temp_bus',
shape='ellipse',
fontsize="10")
dot.edge(label + '_low_temp_bus', label)
dot.node(low_temp_source, shape='trapezium',
fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
dot.edge(low_temp_source,
label + '_low_temp_bus')
elif i == 'buses':
if b['excess'] and b['shortage']:
label = b['label'] + '_excess'
label = linebreaks(label)
dot.node(label, shape='invtrapezium', fontsize="10",
fixedsize='shape', width='1.1', height='0.6')
dot.node(b[bus[i][0]], shape='ellipse', fontsize="10")
dot.edge(b[bus[i][0]], label)
dot.render(filepath + '/graph.gv', view=True) | 85ef4f541750845ec9147d5faf89c1f89dfa7f31 | 2,704 |
import shlex
import subprocess
def exec_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
"""
Run an arbitrary command locally
Args:
cmd (str): command to run
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): Timeout for the command, defaults to 600 seconds.
ignore_error (bool): True if ignore non zero return code and do not
raise the exception.
Raises:
CommandFailed: In case the command execution fails
Returns:
(CompletedProcess) A CompletedProcess object of the command that was executed
CompletedProcess attributes:
args: The list or str args passed to run().
returncode (str): The exit code of the process, negative for signals.
stdout (str): The standard output (None if not captured).
stderr (str): The standard error (None if not captured).
"""
masked_cmd = mask_secrets(cmd, secrets)
log.info(f"Executing command: {masked_cmd}")
if isinstance(cmd, str):
cmd = shlex.split(cmd)
completed_process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
timeout=timeout,
**kwargs,
)
masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets)
if len(completed_process.stdout) > 0:
log.debug(f"Command stdout: {masked_stdout}")
else:
log.debug("Command stdout is empty")
masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets)
if len(completed_process.stderr) > 0:
log.warning(f"Command stderr: {masked_stderr}")
else:
log.debug("Command stderr is empty")
log.debug(f"Command return code: {completed_process.returncode}")
if completed_process.returncode and not ignore_error:
raise CommandFailed(
f"Error during execution of command: {masked_cmd}."
f"\nError is {masked_stderr}"
)
return completed_process | be6defec9e131f1d22992cb6dae955fd2372c091 | 2,705 |
def timeParser(dstr):
"""
parse clock time string into array
"""
hh, mm, ss = dstr.split(':')
return np.array([hh, mm, ss]).astype(int) | 3b4f72ceaf4f2e9bd5fc93664d896537ad0f9884 | 2,706 |
import struct
def get_43_ai_core_data(input_file=None):
"""Function for getting datas from aicore: ov/cnt/total_cyc/ov_cyc/pmu_cnt/stream_id."""
result_data = []
with open(input_file, 'rb') as ai_core_file:
while True:
line_ = ai_core_file.read(128)
if line_:
if not line_.strip():
continue
else:
break
format_ = "BBHHHIIqqqqqqqqqqIIIIIIII"
result_ = [hex(i) for i in struct.unpack(format_, line_)]
byte01 = bin(int(result_[0].replace('0x', ''), 16)).replace('0b', '').zfill(8)
ov = byte01[-4]
cnt = byte01[0:4]
total_cyc = int(result_[7].replace('0x', ''), 16)
ov_cyc = int(result_[8].replace('0x', ''), 16)
pmu_cnt = tuple(int(i.replace('0x', ''), 16) for i in result_[9:17])
stream_id = int(result_[17].replace('0x', ''), 16)
result_data.append((ov, cnt, total_cyc, ov_cyc, stream_id, pmu_cnt))
return result_data | 03c9a62a4fd2a2041489cbcb19e2e8e4788e6b0d | 2,707 |
from typing import Type
def get_configuration_class_with_attributes(
klass: Type[AlgorithmConfiguration],
) -> Type[AlgorithmConfiguration]:
"""Get AlgorithmConfiguration with set attributes.
Args:
klass: a class to be used to extract attributes from.
Returns:
a class with the attributes set.
"""
configuration_class = deepcopy(AlgorithmConfiguration)
setattr(configuration_class, "algorithm_type", klass.algorithm_type)
setattr(configuration_class, "algorithm_name", klass.algorithm_name)
setattr(configuration_class, "algorithm_application", klass.__name__)
setattr(configuration_class, "algorithm_version", klass.algorithm_version)
return configuration_class | ae8ea9b30781854269eb97da6889d1a61ae29935 | 2,708 |
def get_token_symbol(token_address: str):
"""
Gets the token symbol
If not have the external method `symbol` to get the score symbol,
it will raise JSONRPCException.
"""
call = CallBuilder()\
.from_(wallet.get_address())\
.to(token_address)\
.method("symbol")\
.build()
return icon_service.call(call) | 9c84fbcb893345f701d8ce0d24509dd595139d8c | 2,709 |
def iceil(x):
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such
that `i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {numpy.ndarray, scalar}
The ceiling of each element in `x`, with `int` dtype.
"""
return np.ceil(x).astype(int) | bdc893fe00f073393240b1a861e79c9c4667abc4 | 2,710 |
from typing import Optional
import yaml
def get_repo_version(filename: str, repo: str) -> Optional[str]:
"""Return the version (i.e., rev) of a repo
Args:
filename (str): .pre-commit-config.yaml
repo (str): repo URL
Returns:
Optional[str]: the version of the repo
"""
with open(filename, "r") as stream:
pre_commit_data = yaml.safe_load(stream)
pre_config_repo = next(
(item for item in pre_commit_data["repos"] if item["repo"] == repo), None
)
if pre_config_repo:
return pre_config_repo["rev"]
return None | 821653bdeb60a86fce83fb3a05609996231ec5d4 | 2,711 |
from typing import Iterator
from typing import Tuple
import torch
def plot_grad_flow(named_parameters: Iterator[Tuple[str, torch.nn.Parameter]]) -> plt.Figure:
"""
Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow
"""
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and ("bias" not in n):
layers.append(n.replace('.weight', ''))
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
fig, ax = plt.subplots()
ax.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
ax.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
ax.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
ax.set_xticks(range(0, len(ave_grads), 1))
ax.set_xticklabels(layers, rotation=45)
ax.set_xlim(left=0, right=len(ave_grads))
ax.set_ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
ax.set_xlabel("Layers")
ax.set_ylabel("average gradient")
ax.set_title("Gradient flow")
ax.grid(True)
ax.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
return fig | cc0bd23b8cea9359e003cff7c414ce80fcbf5b1b | 2,712 |
from typing import Dict
from typing import Any
def mk_cli_context_settings(
mk_db: CliCtxDbBase.MkFnT,
) -> Dict[str, Any]:
"""Create initial click context parameters for this cli application.
This is currently used as input for autocompletion.
Example:
`@click.group(context_settings=mk_cli_context_settings())`
See `init_cli_ctx` which depends on this.
"""
obj_d = mk_cli_db_obj_d(mk_db)
return dict(
obj=obj_d,
# It it also possible to customize cli default values from here.
# <https://click.palletsprojects.com/en/7.x/commands/#overriding-defaults>
# default_map
) | 09f232936ef3c09a5c00edada04a31a64058aaad | 2,713 |
import requests
import ast
def get_file_action(header: 'dict[str,str]') -> str:
"""Gets action file form main repo
Args:
header (dict[str,str]): Header with auth token
Raises:
get_aciton_file_e: Raised when no aciton file was collected
Returns:
str: The content of the action file
"""
response = requests.get("https://api.github.com/repos/vovsike/ImageBuilderAPIScript/contents/action_raw.yaml", headers=header)
try:
response.raise_for_status()
except HTTPError as get_aciton_file_e:
print("Error getting action file")
raise get_aciton_file_e
content = ast.literal_eval(response.content.decode("utf-8")).get("content")
return content | d1c6eb349aea156e2180f5218e247e86a8a60f3c | 2,714 |
from typing import Union
from typing import Callable
import warnings
def sensor(raw_input_shape: StandardizedTensorShape, f: SensorFunction = None,
sensor_id: str = None, history: int = None) \
-> Union[Callable[[SensorFunction], SensorLambda], SensorLambda]:
"""Decorator for creating sensors from functions.
Usage:
@sensor((5, 8))
def my_sensor(env, frame):
sensor_reading = np.random.uniform(0, 1, (5, 8))
return sensor_reading
kernel.add_module(my_sensor)
"""
if f is None:
kwargs = {}
if sensor_id is not None:
kwargs.update(sensor_id=sensor_id)
return partial(sensor, raw_input_shape, **kwargs)
if sensor_id is None:
sensor_id = get_default_sensor_id(f)
if sensor_id in _SENSOR_MAP:
sensor_obj = _SENSOR_MAP[sensor_id]
if isinstance(sensor_obj, SensorHistory):
wrapped = sensor_obj.wrapped
else:
wrapped = sensor_obj
if wrapped.f != f or wrapped.raw_input_shape != raw_input_shape:
warnings.warn("Redefining sensor %s with function %s and shape %s.\n"
"Original function: %s\nOriginal shape: %s" %
(sensor_id, f, raw_input_shape, wrapped.f, wrapped.raw_input_shape))
else:
return sensor_obj
sensor_obj = wraps(f)(SensorLambda(sensor_id, raw_input_shape, f))
if history is not None:
sensor_obj = SensorHistory(sensor_obj, history)
_SENSOR_MAP[sensor_id] = sensor_obj
return sensor_obj | 0bfa60c70cc43cd0929a8db840469d8fd6ffbac7 | 2,715 |
def convert_event(ui_event):
"""Converts ui.event into ecs.event
This maps keyboard entries into something that the system can handle
TODO: Add a movement system
"""
if isinstance(ui_event, KeyboardEvent):
vim_movement_mapper = {
# Cardinal
KeyboardEvent("h"): vector.LEFT,
KeyboardEvent("j"): vector.DOWN,
KeyboardEvent("k"): vector.UP,
KeyboardEvent("l"): vector.RIGHT,
# Diagonals
KeyboardEvent("y"): vector.UP_LEFT,
KeyboardEvent("u"): vector.UP_RIGHT,
KeyboardEvent("b"): vector.DOWN_LEFT,
KeyboardEvent("n"): vector.DOWN_RIGHT,
# No movement
KeyboardEvent("."): vector.NONE,
}
movement = vim_movement_mapper.get(ui_event)
if movement:
ecs_event = Event("MOVE", settings.player, movement)
return ecs_event
if ui_event == KeyboardEvent('return', 13, meta=True):
tdl.console_set_fullscreen(not tdl.console_is_fullscreen())
return None
if ui_event == KeyboardEvent("escape"):
exit(0) | ecf396823f3bc48fda696abd96a13e34b82a6b06 | 2,716 |
def collection(collection, _pod=None):
"""Retrieves a collection from the pod."""
return _pod.get_collection(collection) | 6d95c9afbcdbb2fe81f71b9d4f17be50aec1aea4 | 2,717 |
def appif(cfg):
"""
Return interface belonging to application
"""
return get_interface_of_network(appnet(cfg)['name']) | e13f7cee8f4785e82bc558500d5d4938a3c728f2 | 2,718 |
def f(x):
"""
Try and have the NN approximate the
xor function.
"""
if x[0] == x[1]:
return 0.
else:
return 1. | 8111e53f0ff0dfdd75f08d845e5176bc287a65e1 | 2,719 |
import pandas
import json
def dataframe_to_list(df: pandas.DataFrame) -> list:
"""
Use caution with datetime columns, as they may not be de/serialized as desired
"""
return json.loads(df.to_json(orient="records")) | 244f76f1970364f13ddf6bb53a6280962d0ae45a | 2,720 |
def decimal_to_binary(integer,nbits=8,grouped=0):
"""Converts integer to binary string of length nbits, sign bit and
then m.s.b. on the left. Negative numbers are twos-complements, i.e.,
bitwise complement + 1."""
# Just remember that minus sign and ignore it
if integer < 0:
negative = True
integer = abs(integer+1)
else:
negative = False
# build up the strin
result = ''
# part of number left to process
remaining_integer = integer
while (remaining_integer > 0) & (nbits > 0):
lsb = remaining_integer % 2
if negative:
lsb = 1-lsb
result = ''.join((str(lsb),result))
remaining_integer = remaining_integer >> 1
nbits -= 1
while nbits > 0:
if negative:
result = ''.join(('1',result))
else:
result = ''.join(('0',result))
nbits -= 1
if grouped:
temp = result
result = ""
for bit in range(len(temp)):
if bit and (bit % grouped) == 0:
result += ' '
result += temp[bit]
return result | 89cef0feaad6d1c25dd67b97a0caf2212ea4a55d | 2,721 |
def line_integrals(state, uloc, vloc, kind="same"):
"""
calculate line integrals along all islands
Arguments:
kind: 'same' calculates only line integral contributions of an island with itself,
while 'full' calculates all possible pairings between all islands.
"""
vs = state.variables
nisle = state.dimensions["isle"]
ipx, ipy = runtime_state.proc_idx
if ipx == 0:
i = slice(1, -2)
ip1 = slice(2, -1)
else:
i = slice(2, -2)
ip1 = slice(3, -1)
if ipy == 0:
j = slice(1, -2)
jp1 = slice(2, -1)
else:
j = slice(2, -2)
jp1 = slice(3, -1)
east = (
vloc[i, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
+ uloc[i, jp1, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, jp1, npx.newaxis]
)
west = (
-vloc[ip1, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
- uloc[i, j, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, j, npx.newaxis]
)
north = (
vloc[i, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
- uloc[i, j, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, j, npx.newaxis]
)
south = (
-vloc[ip1, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
+ uloc[i, jp1, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, jp1, npx.newaxis]
)
if kind == "same":
east = npx.sum(east * vs.line_dir_east_mask[i, j], axis=(0, 1))
west = npx.sum(west * vs.line_dir_west_mask[i, j], axis=(0, 1))
north = npx.sum(north * vs.line_dir_north_mask[i, j], axis=(0, 1))
south = npx.sum(south * vs.line_dir_south_mask[i, j], axis=(0, 1))
return global_sum(east + west + north + south)
elif kind == "full":
isle_int = npx.empty((nisle, nisle))
def loop_body(isle, isle_int):
east_isle = npx.sum(
east[..., isle, npx.newaxis] * vs.line_dir_east_mask[i, j],
axis=(0, 1),
)
west_isle = npx.sum(
west[..., isle, npx.newaxis] * vs.line_dir_west_mask[i, j],
axis=(0, 1),
)
north_isle = npx.sum(
north[..., isle, npx.newaxis] * vs.line_dir_north_mask[i, j],
axis=(0, 1),
)
south_isle = npx.sum(
south[..., isle, npx.newaxis] * vs.line_dir_south_mask[i, j],
axis=(0, 1),
)
isle_int = update(isle_int, at[:, isle], east_isle + west_isle + north_isle + south_isle)
return isle_int
isle_int = for_loop(0, nisle, loop_body, isle_int)
return global_sum(isle_int)
else:
raise ValueError('"kind" argument must be "same" or "full"') | 4a8b32246a9a60d9a42368d7643bc6ddea1c44d0 | 2,722 |
def _BBANDS(kwargs):
"""
布林带
技术参数
-------
使用21天,2倍
"""
df = kwargs.get('df')
limit_start = kwargs.get('limit_start')
limit_end = kwargs.get('limit_end')
ndays = 21
inds = indicators(
'BBANDS', df, timeperiod=ndays).loc[limit_start:limit_end, :]
traces = []
for c in inds.columns:
name = 'price_{}_{}'.format(c, ndays)
trace = go.Scatter(
x=np.arange(inds.shape[0]),
y=inds[c],
name=name,
)
traces.append(trace)
return traces | ee19ee06b5fb6a306f6d43285a616e61584c65a8 | 2,723 |
from typing import Optional
from typing import Iterable
from typing import Tuple
def make_colors(color: OpColor, fill_color: OpColor, colors: Optional[Iterable[OpColor]]) -> Tuple[OpColor, ...]:
"""Creates final colors tuple."""
if colors is None:
return conform_color(color), conform_color(fill_color), *DEFAULT_COLORS[2:]
colors = [conform_color(c) for c, _ in zip(colors, range(len(DEFAULT_COLORS)))]
colors.extend(DEFAULT_COLORS[len(colors):])
return tuple(colors) | 8bba1bef72543fb4bd497ada924d23ebf2692f7c | 2,724 |
def print_qa(questions,
answers_gt,
answers_gt_original,
answers_pred,
era,
similarity=dirac,
path=''):
"""
In:
questions - list of questions
answers_gt - list of answers (after modifications like truncation)
answers_gt_original - list of answers (before modifications)
answers_pred - list of predicted answers
era - current era
similarity - measure that measures similarity between gt_original and prediction;
by default dirac measure
path - path for the output (if empty then stdout is used)
by fedault an empty path
Out:
the similarity score
"""
if len(questions) != len(answers_gt):
raise AssertionError('Diferent questions and answers_gt lengths.')
if len(questions) != len(answers_pred):
raise AssertionError('Diferent questions and answers_pred lengths.')
output = ['-' * 50, 'Era {0}'.format(era)]
score = 0.0
for k, q in list(enumerate(questions)):
a_gt = answers_gt[k]
a_gt_original = answers_gt_original[k]
a_p = answers_pred[k]
score += dirac(a_p, a_gt_original)
if isinstance(q[0], unicode_fn):
tmp = unicode_fn('question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n')
else:
tmp = 'question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n'
output.append(tmp.format(q, a_gt, a_gt_original, a_p))
score = (score / len(questions)) * 100.0
output.append('Score: {0}'.format(score))
if path == '':
print('%s' % '\n'.join(map(str, output)))
else:
list2file(path, output)
return score | 01b44361066668462868abed00f49811e0648d11 | 2,725 |
def recast_to_supercell(z, z_min, z_max):
"""Gets the position of the particle at ``z`` within the simulation
supercell with boundaries ``z_min`` y ``z_max``. If the particle is
outside the supercell, it returns the position of its closest image.
:param z:
:param z_min:
:param z_max:
:return:
"""
sc_size = (z_max - z_min)
return z_min + (z - z_min) % sc_size | 2d144a656a92eaf3a4d259cf5ad2eadb6cfdf970 | 2,726 |
def list_services(request):
""" Should probably move this to an Ajax JSON request like the probe. """
if request.method == "POST":
action = request.POST.get("action")
sid = request.POST.get("id")
logger.debug(f"-- action: {action} sid: {sid}")
if action == "delete":
logger.debug(f"-- deleting: {sid}")
response = tycho.delete({"name": sid})
sleep(2)
logger.debug(f"-- delete response: status: {response}")
return HttpResponseRedirect("/apps/") | bd4d9b9dd9300c127b97fb838aa5085979904701 | 2,727 |
from typing import Union
from pathlib import Path
def open_sat_data(
zarr_path: Union[Path, str],
convert_to_uint8: bool = True,
) -> xr.DataArray:
"""Lazily opens the Zarr store.
Args:
zarr_path: Cloud URL or local path pattern. If GCP URL, must start with 'gs://'
"""
_log.debug("Opening satellite data: %s", zarr_path)
# Silence the warning about large chunks.
# Alternatively, we could set this to True, but that slows down loading a Satellite batch
# from 8 seconds to 50 seconds!
dask.config.set({"array.slicing.split_large_chunks": False})
# Open the data
dataset = xr.open_dataset(zarr_path, engine="zarr", chunks="auto")
# Flip coordinates to top-left first
dataset = dataset.reindex(y=dataset.y[::-1])
dataset = dataset.reindex(x=dataset.x[::-1])
# Rename
# These renamings will no longer be necessary when the Zarr uses the 'correct' names,
# see https://github.com/openclimatefix/Satip/issues/66
if "variable" in dataset:
dataset = dataset.rename({"variable": "channel"})
elif "channel" not in dataset:
# This is HRV version 3, which doesn't have a channels dim. So add one.
dataset = dataset.expand_dims(dim={"channel": ["HRV"]}, axis=1)
# Rename coords to be more explicit about exactly what some coordinates hold:
# Note that `rename` renames *both* the coordinates and dimensions, and keeps
# the connection between the dims and coordinates, so we don't have to manually
# use `data_array.set_index()`.
dataset = dataset.rename(
{
"time": "time_utc",
"y": "y_geostationary",
"x": "x_geostationary",
}
)
data_array = dataset["data"]
del dataset
# Ensure the y and x coords are in the right order (top-left first):
assert data_array.y_geostationary[0] > data_array.y_geostationary[-1]
assert data_array.x_geostationary[0] < data_array.x_geostationary[-1]
assert data_array.y_osgb[0, 0] > data_array.y_osgb[-1, 0]
assert data_array.x_osgb[0, 0] < data_array.x_osgb[0, -1]
if convert_to_uint8:
data_array = data_array.clip(min=0, max=1023)
data_array.data = (data_array.astype(np.float32).data / 4.0).round().astype(np.uint8)
# Sanity checks!
assert data_array.dims == ("time_utc", "channel", "y_geostationary", "x_geostationary")
datetime_index = pd.DatetimeIndex(data_array.time_utc)
assert datetime_index.is_unique
assert datetime_index.is_monotonic_increasing
# Satellite datetimes can sometimes be 04, 09, minutes past the hour, or other slight offsets.
# These slight offsets will break downstream code, which expects satellite data to be at
# exactly 5 minutes past the hour.
assert (datetime_index == datetime_index.round("5T")).all()
return data_array | ccc79f02bb086b552f69d2e8f9440f02ef121b95 | 2,728 |
from typing import List
from typing import Optional
import io
import csv
async def get_pedigree(
internal_family_ids: List[int] = Query(None),
response_type: ContentType = ContentType.JSON,
replace_with_participant_external_ids: bool = True,
replace_with_family_external_ids: bool = True,
include_header: bool = True,
empty_participant_value: Optional[str] = None,
connection: Connection = get_project_readonly_connection,
include_participants_not_in_families: bool = False,
):
"""
Generate tab-separated Pedigree file for ALL families
unless internal_family_ids is specified.
Allow replacement of internal participant and family IDs
with their external counterparts.
"""
family_layer = FamilyLayer(connection)
assert connection.project
pedigree_dicts = await family_layer.get_pedigree(
project=connection.project,
family_ids=internal_family_ids,
replace_with_participant_external_ids=replace_with_participant_external_ids,
replace_with_family_external_ids=replace_with_family_external_ids,
empty_participant_value=empty_participant_value,
include_participants_not_in_families=include_participants_not_in_families,
)
if response_type in (ContentType.CSV, ContentType.TSV):
delim = '\t' if response_type == ContentType.TSV else ','
output = io.StringIO()
writer = csv.writer(output, delimiter=delim)
if include_header:
writer.writerow(PedRow.row_header())
keys = [
'family_id',
'individual_id',
'paternal_id',
'maternal_id',
'sex',
'affected',
]
pedigree_rows = [[(row[k] or '') for k in keys] for row in pedigree_dicts]
writer.writerows(pedigree_rows)
basefn = f'{connection.project}-{date.today().isoformat()}'
if internal_family_ids:
basefn += '-'.join(str(fm) for fm in internal_family_ids)
extension = 'ped' if response_type == ContentType.TSV else 'csv'
return StreamingResponse(
iter(output.getvalue()),
media_type=f'text/{response_type}',
headers={'Content-Disposition': f'filename={basefn}.{extension}'},
)
return pedigree_dicts | 5ecf064d82a6391d3ed025aeac2bf070710d5ebe | 2,729 |
def lang_string_set_to_xml(obj: model.LangStringSet, tag: str) -> etree.Element:
"""
serialization of objects of class LangStringSet to XML
:param obj: object of class LangStringSet
:param tag: tag name of the returned XML element (incl. namespace)
:return: serialized ElementTree object
"""
et_lss = _generate_element(name=tag)
for language in obj:
et_lss.append(_generate_element(name=NS_AAS + "langString",
text=obj[language],
attributes={"lang": language}))
return et_lss | f49a1d73f1fd4354c245427bc1277600c67a5d99 | 2,730 |
def grasp_from_contacts(contact1,contact2):
"""Helper: if you have two contacts, this returns an AntipodalGrasp"""
d = vectorops.unit(vectorops.sub(contact2.x,contact1.x))
grasp = AntipodalGrasp(vectorops.interpolate(contact1.x,contact2.x,0.5),d)
grasp.finger_width = vectorops.distance(contact1.x,contact2.x)
grasp.contact1 = contact1
grasp.contact2 = contact2
return grasp | 945ff950a59b1442efc6abdce68861957b0a60a7 | 2,731 |
import random
def choose_move(data: dict) -> str:
"""
data: Dictionary of all Game Board data as received from the Battlesnake Engine.
For a full example of 'data', see https://docs.battlesnake.com/references/api/sample-move-request
return: A String, the single move to make. One of "up", "down", "left" or "right".
Use the information in 'data' to decide your next move. The 'data' variable can be interacted
with as a Python Dictionary, and contains all of the information about the Battlesnake board
for each move of the game.
"""
my_head = data["you"]["head"] # A dictionary of x/y coordinates like {"x": 0, "y": 0}
my_body = data["you"]["body"] # A list of x/y coordinate dictionaries like [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ]
# TODO: uncomment the lines below so you can see what this data looks like in your output!
print(f"~~~ Turn: {data['turn']} Game Mode: {data['game']['ruleset']['name']} ~~~")
print(f"All board data this turn: {data}")
print(f"My Battlesnakes head this turn is: {my_head}")
print(f"My Battlesnakes body this turn is: {my_body}")
possible_moves = ["up", "down", "left", "right"]
# Don't allow your Battlesnake to move back in on it's own neck
possible_moves = avoid_my_neck(my_head, my_body, possible_moves)
# TODO: Using information from 'data', find the edges of the board and don't let your Battlesnake move beyond them
board_height = data["board"]["height"]
board_width = data["board"]["width"]
if my_head["x"] == 0:
possible_moves = remove("left", possible_moves)
if my_head["y"] == 0:
possible_moves = remove("down", possible_moves)
if my_head["x"] == (board_width - 1):
possible_moves = remove("right", possible_moves)
if my_head["y"] == (board_height - 1):
possible_moves = remove("up", possible_moves)
# TODO Using information from 'data', don't let your Battlesnake pick a move that would hit its own body
for square in my_body:
if square["x"] == my_head["x"] and (square["y"] - my_head["y"]) == 1:
possible_moves = remove("up", possible_moves)
elif square["x"] == my_head["x"] and (square["y"] - my_head["y"]) == -1:
possible_moves = remove("down", possible_moves)
elif (square["x"] - my_head["x"]) == 1 and square["y"] == my_head["y"]:
possible_moves = remove("right", possible_moves)
elif (square["x"] - my_head["x"]) == -1 and square["y"] == my_head["y"]:
possible_moves = remove("left", possible_moves)
# TODO: Using information from 'data', don't let your Battlesnake pick a move that would collide with another Battlesnake
opponents = data["board"]["snakes"][1:]
for opp in opponents:
if {"x": my_head["x"], "y": (my_head["y"] + 1)} in opp["body"]:
possible_moves = remove("up", possible_moves)
if {"x": my_head["x"], "y": (my_head["y"] - 1)} in opp["body"]:
possible_moves = remove("down", possible_moves)
if {"x": (my_head["x"] + 1), "y": my_head["y"]} in opp["body"]:
possible_moves = remove("right", possible_moves)
if {"x": (my_head["x"] - 1), "y": my_head["y"]} in opp["body"]:
possible_moves = remove("left", possible_moves)
# TODO: Using information from 'data', make your Battlesnake move towards a piece of food on the board
food = data["board"]["food"]
health = data["you"]["health"]
length = data["you"]["length"]
if health <= 20 and length < 11:
closeFood = closestFood(my_head, food)
if closeFood[0] == 1 and safe(opponents, closeFood[1]):
move = directionToMove(my_head, closeFood[1])
if move in possible_moves:
return move
else:
point = closeFood[1]
# moves towards the closest piece of food
if point["x"] > my_head["x"]:
possible_moves = remove("left", possible_moves)
if point["x"] < my_head["x"]:
possible_moves = remove("right", possible_moves)
if point["y"] > my_head["y"]:
possible_moves = remove("down", possible_moves)
if point["y"] < my_head["y"]:
possible_moves = remove("up", possible_moves)
# Choose a random direction from the remaining possible_moves to move in, and then return that move
# TODO: Explore new strategies for picking a move that are better than random
# makes sure not to collide with itself in the future
if ({"x": my_head["x"], "y": (my_head["y"] + 2)} in my_body):
possible_moves = remove("up", possible_moves)
if ({"x": my_head["x"], "y": (my_head["y"] - 2)} in my_body):
possible_moves = remove("down", possible_moves)
if ({"x": (my_head["x"] + 2), "y": my_head["y"]} in my_body):
possible_moves = remove("right", possible_moves)
if ({"x": (my_head["x"] - 2), "y": my_head["y"]} in my_body):
possible_moves = remove("left", possible_moves)
if len(possible_moves) > 1:
# checks for head to heads
if ("up" in possible_moves) and not safe(opponents, {"x": my_head["x"], "y": (my_head["y"] + 1)}):
possible_moves = remove("up", possible_moves)
if ("down" in possible_moves) and not safe(opponents, {"x": my_head["x"], "y": (my_head["y"] - 1)}):
possible_moves = remove("down", possible_moves)
if ("right" in possible_moves) and not safe(opponents, {"x": (my_head["x"] + 1), "y": my_head["y"]}):
possible_moves = remove("right", possible_moves)
if ("left" in possible_moves) and not safe(opponents, {"x": (my_head["x"] - 1), "y": my_head["y"]}):
possible_moves = remove("left", possible_moves)
# if len(possible_moves) > 1:
# # prevents getting stuck in a corner
# awayFromCorners(my_head, possible_moves, board_height, board_width)
move = random.choice(possible_moves)
print(f"{data['game']['id']} MOVE {data['turn']}: {move} picked from all valid options in {possible_moves}")
return move | 62d663720c4592c6e97215cd6e4ec772f3038ae2 | 2,732 |
import os
def get_fremont_data(filename='Fremont.csv', url=FREMONT_URL,
force_download=False):
"""Download and cache the fremont data
Parameters
----------
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force redownload of data
Returns
-------
data : pandas.DataFrame
The fremont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
#Before:
#data = pd.read_csv('Fremont.csv', index_col='Date', parse_dates=True)
#make parse string of Date and make it an index
#After: x20 faster
# look at http://strftime.org/
data = pd.read_csv('Fremont.csv', index_col='Date')
try:
#data.index = pd.to_datetime(data.index, format='%m/%d/%Y %H:%M:%S %p')
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p') #I for 12 hour color with AM/PM
except TypeError:
data.index = pd.to_datetime(data.index) #infer it automatically takes alot of time x10 at least
data.columns = ['East', 'West']
data['Total'] = data['West'] + data['East']
return data | 28a2abdf390f42964aebe058206c77f3e51a0d88 | 2,733 |
def boolean_dumper(dumper, value):
"""
Dump booleans as yes or no strings.
"""
value = u'yes' if value else u'no'
style = None
return dumper.represent_scalar(u'tag:yaml.org,2002:bool', value, style=style) | 40a6a270d1ad1a289947c064c7f85edb1d589bb7 | 2,734 |
def preprocess_data_4_catboost(data_df, output_path=None):
"""
preprocess data for working with gradient boosting techniques
specifically with the catboost library. since this is going to use
the preprocessing built into the catboost library there are slightly
different steps to be done
"""
"""
train_data = Pool(
data=FeaturesData(
num_feature_data=np.array([[1, 4, 5, 6],
[4, 5, 6, 7],
[30, 40, 50, 60]],
dtype=np.float32),
cat_feature_data=np.array([[b"a", b"b"],
[b"a", b"b"],
[b"c", b"d"]],
dtype=object)
),
label=[1, 1, -1]
)
"""
new_df_w_labels = data_df.copy()
for idx, odds_string in data_df.ODDS.iteritems():
# skip data qual errors and abnormalities
if not isinstance(odds_string, str):
continue
divied_list = _preprocess_odds_string(odds_string)
for school_or_perc in divied_list:
if school_or_perc in SCHOOLS_REVERSED.keys():
school_idx = divied_list.index(school_or_perc)
# the percent is always the next index after the school
perc = divied_list[school_idx + 1]
# print "School: {};Odds: {}".format(school_or_perc,perc)
# use the standardized name
standard_school_name = SCHOOLS_REVERSED[school_or_perc]
# insert the specific name value for the correct row
new_df_w_labels.at[idx, standard_school_name] = _parse_str_nums(perc)
new_df_w_labels = _reduce_majors_dimensionality(new_df_w_labels)
# drop unused columns
data_after_drop = new_df_w_labels.drop(['ODDS', 'INTERNATIONAL', 'JOBTITLE'], axis=1, inplace=False)
# change categorical data into numeric
categorical_cols = ['UNIVERSITY', 'MAJOR', 'GENDER', 'RACE']
# a dataframe of ONLY the features
features_only_df = data_after_drop.drop(TARGET_LABELS, axis=1, inplace=False)
# determine the columns that are features by subtracting from labels
feature_cols = set(data_after_drop.columns) - set(TARGET_LABELS)
# a dataframe with ONLY labels
labels = data_after_drop.drop(feature_cols, axis=1, inplace=False)
multi_data_set_dict = {}
for school in labels.columns:
df_for_school = features_only_df.join(pd.DataFrame({school: labels[school]}))
# a holder dictionary that contains the features numpy ndarray for features and numpy ndarray for school label
school_dict = {}
# drop the NaNs from the dataset in any feature column or label. otherwise model training will fail
df_for_school.dropna(inplace=True)
# store the features as a numpy ndarray to be fed directly to model training
numerical_features_np_array = df_for_school.drop([school] + categorical_cols, axis=1, inplace=False).values
categorical_features_np_array = df_for_school[categorical_cols].values
# store the labels for a particular school as a numpy ndarray to be fed directly to model training
labels_as_list = df_for_school.drop(feature_cols, axis=1, inplace=False)[school].tolist()
datasetpool = Pool(
data=FeaturesData(
num_feature_data=np.array(numerical_features_np_array,
dtype=np.float32),
cat_feature_data=np.array(categorical_features_np_array,
dtype=object)
),
label=labels_as_list
)
multi_data_set_dict[school] = datasetpool
return multi_data_set_dict | 9bc60ca096963fe6fb8a30e19442d870694f1339 | 2,735 |
def conv_current_to_electrons_second(current):
"""
Convert a current in Amps to a number of
electrons per second.
"""
return int(current / const.electron_charge) | 76051a529c230b54a6d07f282c97b48d4ea59758 | 2,736 |
import json
def get_users():
"""
Use urllib3 to make a REST call to get list of Okta
Users for a given Okta Application
"""
request_url = f"{OKTA_URL}/apps/{OKTA_APP_ID}/users"
okta_users_request = HTTP.request(
'GET',
request_url,
headers={'Content-Type': 'application/json', 'Authorization': OKTA_AUTH},
retries=False,
)
LOGGER.info(f"Retrieved Okta Users Information from {request_url}")
users = json.loads(okta_users_request.data.decode('utf-8'))
return users | b94816de46d843a3a80a53c569d52b17e142d4e9 | 2,737 |
def n_sample_per_class_train_set(df, n_samples=3, class_column="category"):
"""
returns a subset of the provided df that contains n_samples instances of each class
:param df: panda dataframe that contains hidden_reps with class labels
:param n_samples: number of samples per class
:param class_column: column with class labels in the df
:return: subset of the original df that contains maximum n_samples instances of each class
"""
assert class_column in df.columns
classes = list(set(df[class_column]))
class_count_dict = dict([(c, 0) for c in classes])
selection_array = []
for i, c in zip(df.index, df[class_column]):
if class_count_dict[c] >= n_samples:
selection_array.append(False)
continue
else:
selection_array.append(True)
class_count_dict[c] += 1
print(len(class_count_dict), len(selection_array))
assert len(selection_array) == len(df.index)
return df.copy()[selection_array] | 107d9845b8e5efb3da09f13d11ae796fc560b874 | 2,738 |
def clip_count(cand_d, ref_ds):
"""Count the clip count for each ngram considering all references."""
count = 0
for m in cand_d.keys():
m_w = cand_d[m]
m_max = 0
for ref in ref_ds:
if m in ref:
m_max = max(m_max, ref[m])
m_w = min(m_w, m_max)
count += m_w
return count | f33ad8c5a9de8e136ea97684de3bd64779471bb6 | 2,739 |
def container_wrapper(directive, literal_node, caption, classes):
"""adapted from
https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/code.py
"""
container_node = docutils.nodes.container(
'', literal_block=True, classes=classes) # ['literal-block-wrapper']
parsed = docutils.nodes.Element()
directive.state.nested_parse(StringList([caption], source=''),
directive.content_offset, parsed)
if isinstance(parsed[0], docutils.nodes.system_message):
msg = 'Invalid caption: %s' % parsed[0].astext()
raise ValueError(msg)
elif isinstance(parsed[0], docutils.nodes.Element):
caption_node = docutils.nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = literal_node.source
caption_node.line = literal_node.line
container_node += caption_node
container_node += literal_node
return container_node
else:
raise RuntimeError | 17e9db3f494174a721cd80c179514bbc3db773c1 | 2,740 |
def get_current_and_next_quarters(request, num):
"""
Returns the current and next num uw_sws.models.Term objects in a list
for the current quarter refered in the user session. Returns the next
num -1 quarters along with the current one.
"""
term = get_current_quarter(request)
quarters = [term]
for x in range(1, num):
term = get_term_after(term)
quarters.append(term)
return quarters | 26f5d268148d3f0395d1d41739d51b1f06a0bd6a | 2,741 |
from typing import Tuple
from typing import Dict
def _create_metadata_from_dat_df(
csv_df: pd.DataFrame,
) -> Tuple[Dict[int, tuple], Pitch]:
"""Creates meta information from the CSV file as parsed by pd.read_csv().
Parameters
----------
csv_df: DataFrame
Containing all data from the positions CSV file as DataFrame.
Returns
-------
periods: Dict[int, int]
Dictionary with start and endframes:
``periods[segment] = (startframe, endframe)``.
pitch: Pitch
Playing Pitch object.
"""
# create pitch
pi_len = csv_df["pitch_dimension_long_side"].values[0]
pi_wid = csv_df["pitch_dimension_short_side"].values[0]
pitch = Pitch.from_template(
"statsperform",
length=pi_len,
width=pi_wid,
sport="football",
)
# create periods for segments, coded as jumps in the frame sequence
periods = {}
frame_values = csv_df["frame_count"].unique()
seg_idx = np.where(np.diff(frame_values, prepend=frame_values[0]) > 1)
seg_idx = np.insert(seg_idx, 0, 0)
seg_idx = np.append(seg_idx, len(frame_values))
for segment in range(len(seg_idx) - 1):
start = int(frame_values[seg_idx[segment]])
end = int(frame_values[seg_idx[segment + 1] - 1])
periods[segment] = (start, end)
return periods, pitch | d53e66ff343c2391058e2717dde5bbe7c11a2c44 | 2,742 |
def main():
"""
Main function used in script, primarily used as a handle
to get the output into stdout.
"""
# There are no args, but parse them just so help works
print(process_files_json(), end="")
return None | 495af3e19cdd823ee6853f516b17df0c489f34f9 | 2,743 |
import torch
import os
import time
from datetime import datetime
import sys
def train_generator(train_loader, test_loader, num_epoch=500,
lr=0.0001, beta1=0.9, beta2=0.999):
"""Train a generator on its own.
Args:
train_loader: (DataLoader) a DataLoader wrapping the training dataset
test_loader: (DataLoader) a DataLoader wrapping the test dataset
num_epoch: (int) number of epochs performed during training
lr: (float) learning rate of the discriminator and generator Adam optimizers
beta1: (float) beta1 coefficient of the discriminator and generator Adam optimizers
beta2: (float) beta1 coefficient of the discriminator and generator Adam optimizers
Returns:
generator: (nn.Module) the trained generator
"""
cuda = True if torch.cuda.is_available() else False
print(f"Using cuda device: {cuda}") # check if GPU is used
# Tensor type (put everything on GPU if possible)
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# Output folder
if not os.path.exists("./images/generator"):
os.makedirs("./images/generator")
# Loss function
criterion = torch.nn.L1Loss() # A loss for a voxel-wise comparison of images like torch.nn.L1Loss
# Initialize the generator
generator = GeneratorUNet()
if cuda:
generator = generator.cuda()
criterion.cuda()
# Optimizer
optimizer = torch.optim.Adam(generator.parameters(),
lr=lr, betas=(beta1, beta2))
def sample_images(epoch):
"""Saves a generated sample from the validation set"""
imgs = next(iter(test_loader))
real_A = imgs["T1"].type(Tensor)
real_B = imgs["T2"].type(Tensor)
fake_B = generator(real_A)
img_sample = torch.cat((real_A.data, fake_B.data, real_B.data), -2)
save_image(img_sample, f"./images/generator/epoch-{epoch}.png",
nrow=5, normalize=True)
# ----------
# Training
# ----------
prev_time = time.time()
for epoch in range(num_epoch):
for i, batch in enumerate(train_loader):
# Inputs T1-w and T2-w
real_t1 = batch["T1"].type(Tensor)
real_t2 = batch["T2"].type(Tensor)
# Remove stored gradients
optimizer.zero_grad()
# Generate fake T2 images from the true T1 images
fake_t2 = generator(real_t1)
# Compute the corresponding loss
loss = criterion(fake_t2, real_t2)
# Compute the gradient and perform one optimization step
loss.backward()
optimizer.step()
# --------------
# Log Progress
# --------------
# Determine approximate time left
batches_done = epoch * len(train_loader) + i
batches_left = num_epoch * len(train_loader) - batches_done
time_left = datetime.timedelta(
seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
sys.stdout.write(
"\r[Epoch %d/%d] [Batch %d/%d] [Loss: %f] ETA: %s"
% (
epoch + 1,
num_epoch,
i,
len(train_loader),
loss.item(),
time_left,
)
)
# Save images at the end of each epoch
sample_images(epoch)
return generator | 714b6f9a9e21cda960430a2458fdc939fb48f36b | 2,744 |
def expand(doc, doc_url="param://", params=None):
"""
ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE
EXPANDING FEATURE
USE mo_json_config.expand({}) TO ASSUME CURRENT WORKING DIRECTORY
:param doc: THE DATA STRUCTURE FROM JSON SOURCE
:param doc_url: THE URL THIS doc CAME FROM (DEFAULT USES params AS A DOCUMENT SOURCE)
:param params: EXTRA PARAMETERS NOT FOUND IN THE doc_url PARAMETERS (WILL SUPERSEDE PARAMETERS FROM doc_url)
:return: EXPANDED JSON-SERIALIZABLE STRUCTURE
"""
if doc_url.find("://") == -1:
Log.error("{{url}} must have a prototcol (eg http://) declared", url=doc_url)
url = URL(doc_url)
url.query = set_default(url.query, params)
phase1 = _replace_ref(doc, url) # BLANK URL ONLY WORKS IF url IS ABSOLUTE
phase2 = _replace_locals(phase1, [phase1])
return wrap(phase2) | d8a8045cb6afea089f1241dc1a47bf6c95fc3628 | 2,745 |
def get_model_creator(hparams):
"""Get the right model class depending on configuration."""
if hparams.architecture == 'peng':
model_creator = model.Model
"""vanilla lstm, seq2seq"""
return model_creator | 84c65108e6be1a723184778db564b75ea333d52f | 2,746 |
import unittest
def extra():
"""Tests faint.extra. That is, less central faint code, possibly
requiring extensions (e.g. tesseract or GraphViz dot).
"""
return unittest.defaultTestLoader.discover("py_tests/test_extra",
top_level_dir="py_tests/") | c6fc2694144e852edaef219d95bc384b5b394d7d | 2,747 |
def cmd_te_activate(abs_filename):
"""最前面に持ってくる(テキストエディタ向け)
ファイルが含まれるVisual Studioを探し出して最前面に持ってくる。
abs_filename- ファイル名の絶対パス
(Ex.) c:/project/my_app/src/main.cpp
"""
return _te_main2(cmd_activate,abs_filename) | 2a4922c89360f049c50cc2b2b81509c48d32968e | 2,748 |
def search(search_domain, fmt=None):
"""Handle redirect from form submit."""
domain = tools.parse_post_data(search_domain)
if domain is None:
return handle_invalid_domain(search_domain)
if fmt is None:
if features.enable_async_search():
return flask.redirect('/search?ed={}'.format(search_domain))
else:
return html_render(domain)
elif fmt == 'json':
return json_render(domain)
elif fmt == 'csv':
return csv_render(domain)
else:
flask.abort(400, 'Unknown export format: {}'.format(fmt)) | 4be9a751fad12fae67a34b7c4bda2b2be3d7ff89 | 2,749 |
def get_graph_from_particle_positions(
particle_positions, box_lengths, cutoff_distance, store_positions=False
):
"""Returns a networkx graph of connections between neighboring particles
Args:
particle_positions (ndarray or dataframe): Shape
(`n_particles`, `n_dimensions`). Each of the `n_particles`
rows is a length `n_dimensions` particle position vector.
Positions must be in range [0, `box_lengths[d]`) for each
dimension `d`.
box_lengths (ndarray): Shape (`n_dimensions`,) array of box lengths for
each box dimension.
cutoff_distance (float): Maximum length between particle pairs to
consider them connected
store_positions (bool, optional): If True, store position vector data
within each node in the graph. Defaults to False.
Returns:
networkx Graph: Graph of connections between all particle pairs with
distance below cutoff_distance
"""
distances = pairwise_distances(particle_positions, box_lengths)
graph = get_within_cutoff_graph(distances, cutoff_distance)
if store_positions is True:
for particle_id, particle_position in zip(
graph.nodes, particle_positions
):
for i, x_i in enumerate(particle_position):
graph.nodes[particle_id][f"x{i}"] = x_i
return graph | a51ecbfd83fe08ae07f4e6952f2796eba9f79f0a | 2,750 |
import numpy as np
def cca(x,y):
""" canonical correlation analysis cca
wx, wy, r = cca(x,y) returns wx, wy two matrices which columns [:,i] correspond to the canonical weights (normalized eigenvectors) and a vector r containing the canonical correlations, all sorted in decreasing order. cca assumes as input matrices x,y of size l*m (time*nvar), and l*n, that are centered (no mean along 1st axis) within the function. cca returns an error if either x,y are not full rank."""
mx = x.shape[1]
my = y.shape[1]
l = x.shape[0] #needs to be the same for y
if l != y.shape[0]:
raise ValueError('Time dimension is not same length for x,y')
xrank = np.linalg.matrix_rank(x)
yrank = np.linalg.matrix_rank(y)
if mx > xrank:
raise ValueError('Matrix x is not full rank.')
if my > yrank:
raise ValueError("Matrix y is not full rank.")
#no mean
x = x - np.outer(x.mean(axis=0),np.ones(l)).transpose()
y = y - np.outer(y.mean(axis=0),np.ones(l)).transpose()
#covariance estimators
Sxy = np.dot(x.transpose(),y) / l
Sxx = np.dot(x.transpose(),x) / l
Syy = np.dot(y.transpose(),y) / l
B1 = np.dot(np.linalg.inv(Sxx),Sxy)
B2 = np.dot(np.linalg.inv(Syy),Sxy.transpose())
evalx, eigvx = np.linalg.eig(np.dot(B1,B2))
evaly, eigvy = np.linalg.eig(np.dot(B2,B1))
#normalize eigenvectors
eigvx = eigvx / np.outer(np.ones((mx,1)),np.sqrt((eigvx**2).sum(axis=0)))
eigvy = eigvy / np.outer(np.ones((my,1)),np.sqrt((eigvy**2).sum(axis=0)))
# eigenvalues should be the same in evalx and evaly
rx = np.sqrt(abs(evalx)) #correlation
ry = np.sqrt(abs(evaly))
#sort
ordargx = np.argsort(rx)[-1:-mx-1:-1] #decreasing order
ordargy = np.argsort(ry)[-1:-mx-1:-1]
rx = rx[ordargx]
ry = ry[ordargy]
eigvx = eigvx[:,ordargx]
eigvy = eigvy[:,ordargy]
if mx >= my:
r = rx
else:
r = ry
return eigvx, eigvy, r | f0d734fc927789d6ecca0685a85f727e48b334df | 2,751 |
from pathlib import Path
def get_assay_table_path(dataset: TemplateDataset, configuration: dict) -> Path:
"""Retrieve the assay table file name that determined as a valid assay based on configuration.
Specifically, defined in subsection 'ISA meta'
:param dataset: A dataset object including a metadata component with an attached ISA archive data asset
:type dataset: TemplateDataset
:param configuration: Standard assay parsed config
:type configuration: dict
:return: Path to the found assay table
:rtype: Path
"""
# retrieve study assay subtable from I_file
df = dataset.metadata.isa_investigation_subtables["STUDY ASSAYS"]
# get valid tuples of measurement and technology types from configuration
valid_measurements_and_technology_types: list[tuple[str, str]] = [
(entry["measurement"], entry["technology"])
for entry in configuration["Valid Study Assay Technology And Measurement Types"]
]
# check for matching rows based on configuration tuple
# one and only one row should match
# not very efficient, but table should never be too large for this to be of concern
matches: list[Path] = list()
for valid_combination in valid_measurements_and_technology_types:
log.debug(f"Searching subtable for {valid_combination}")
match_row = df.loc[
(
df[["Study Assay Measurement Type", "Study Assay Technology Type"]]
== valid_combination
).all(axis="columns")
]
match_file = [Path(val) for val in match_row["Study Assay File Name"].values]
matches.extend(match_file)
# guard, one and only one should match
assert (
len(matches) == 1
), f"One and only one should match, instead got these matches: {matches}"
# load assay table
assay_file_path = matches[0]
[assay_path] = [
f for f in dataset.metadata.fetch_isa_files() if f.name == assay_file_path.name
]
return assay_path | 57efa60ec4e3b6d16bfa39a28b9057aa864c2506 | 2,752 |
def train_models(vae, emulator, em_lr, vae_lr, signal_train, dataset, val_dataset,
epochs, vae_lr_factor, em_lr_factor, vae_min_lr, em_min_lr, vae_lr_patience, em_lr_patience,
lr_max_factor, es_patience, es_max_factor):
"""
Function that train the models simultaneously
:param vae: Keras model object, the VAE
:param emulator: Keras model object, the emulator
:param em_lr: float, initial emulator learning rate
:param vae_lr: float, initial VAE learning rate
:param signal_train: numpy array of training signals
:param dataset: batches from training dataset
:param val_dataset: batches from validation dataset
:param epochs: max number of epochs to train for, early stopping may stop it before
:param vae_lr_factor: factor * old LR (learning rate) is the new LR for the VAE
:param em_lr_factor: factor * old LR (learning rate) is the new LR for the emulator
:param vae_min_lr: minimum allowed LR for VAE
:param em_min_lr: minimum allowed LR for emulator
:param vae_lr_patience: max number of epochs loss has not decreased for the VAE before reducing LR
:param em_lr_patience: max number of epochs loss has not decreased for the emulator before reducing LR
:param lr_max_factor: max_factor * current loss is the max acceptable loss, a larger loss means that the counter
is added to, when it reaches the 'patience', the LR is reduced
:param es_patience: max number of epochs loss has not decreased before early stopping
:param es_max_factor: max_factor * current loss is the max acceptable loss, a larger loss for either the VAE or the
emulator means that the counter is added to, when it reaches the 'patience', early stopping is applied
:return tuple, four lists of losses as they change with epoch for the VAE (training loss and validation loss)
and emulator (training and validation) in that order
"""
# initialize lists of training losses and validation losses
vae_loss = []
vae_loss_val = []
em_loss = []
em_loss_val = []
# Did the model loss plateau?
plateau_vae = False
plateau_em = False
vae_reduced_lr = 0 # epochs since last time lr was reduced
em_reduced_lr = 0 # epochs since last time lr was reduced
# compile the models
compile_VAE(vae, vae_lr)
compile_emulator(emulator, em_lr, signal_train)
@tf.function
def run_train_step(batch):
"""
Function that trains the VAE and emulator for one batch. Returns the losses
for that specific batch.
"""
params = batch[0]
signal = batch[1]
amp_raw = batch[2] # amplitudes, raw because we need to reshape
amplitudes = tf.expand_dims(amp_raw, axis=1) # reshape amplitudes
signal_amplitudes = tf.concat((signal, amplitudes), axis=1) # both signal and amplitude
with tf.GradientTape() as tape:
vae_pred = vae(signal) # apply VAE to signal
vae_batch_loss = vae.losses # get the loss
# back-propagate losses for the VAE
vae_gradients = tape.gradient(vae_batch_loss, vae.trainable_weights)
vae.optimizer.apply_gradients(zip(vae_gradients, vae.trainable_weights))
# same procedure for emulator
with tf.GradientTape() as tape:
em_pred = emulator(params)
loss_function = em_loss_fcn(signal_train)
em_batch_loss = loss_function(signal_amplitudes, em_pred)
em_gradients = tape.gradient(em_batch_loss, emulator.trainable_weights)
emulator.optimizer.apply_gradients(zip(em_gradients, emulator.trainable_weights))
return vae_batch_loss, em_batch_loss
# the training loop
for i in range(epochs):
epoch = int(i + 1)
print("\nEpoch {}/{}".format(epoch, epochs))
# reduce lr if necessary
if plateau_vae and vae_reduced_lr >= 5:
reduce_lr(vae, vae_lr_factor, vae_min_lr)
vae_reduced_lr = 0
if plateau_em and em_reduced_lr >= 5:
reduce_lr(emulator, em_lr_factor, em_min_lr)
em_reduced_lr = 0
vae_batch_losses = []
val_vae_batch_losses = []
em_batch_losses = []
val_em_batch_losses = []
# loop through the batches and train the models on each batch
for batch in dataset:
vae_batch_loss, em_batch_loss = run_train_step(batch)
vae_batch_losses.append(vae_batch_loss) # append VAE train loss for this batch
em_batch_losses.append(em_batch_loss) # append emulator train loss for this batch
# loop through the validation batches, we are not training on them but
# just evaluating and tracking the performance
for batch in val_dataset:
param_val = batch[0]
signal_val = batch[1]
amp_val = tf.expand_dims(batch[2], axis=1)
val_signal_amplitudes = tf.concat((signal_val, amp_val), axis=1)
val_em_batch_loss = emulator.test_on_batch(param_val, val_signal_amplitudes)
val_vae_batch_loss = vae.test_on_batch(signal_val, signal_val)
val_vae_batch_losses.append(val_vae_batch_loss)
val_em_batch_losses.append(val_em_batch_loss)
vae_loss_epoch = K.mean(tf.convert_to_tensor(vae_batch_losses)) # average VAE train loss over this epoch
em_loss_epoch = K.mean(tf.convert_to_tensor(em_batch_losses)) # average emulator train loss
print('VAE train loss: {:.4f}'.format(vae_loss_epoch))
print('Emulator train loss: {:.4f}'.format(em_loss_epoch))
# in case a loss is NaN
# this is unusal, but not a big deal, just restart the training
# (otherwise the loss just stays NaN)
if np.isnan(vae_loss_epoch) or np.isnan(em_loss_epoch):
print("Loss is NaN, restart training")
break
# save each epoch loss to a list with all epochs
vae_loss.append(vae_loss_epoch)
em_loss.append(em_loss_epoch)
vae_loss_epoch_val = np.mean(val_vae_batch_losses) # average VAE train loss over this epoch
em_loss_epoch_val = np.mean(val_em_batch_losses) # average emulator train loss
vae_loss_val.append(vae_loss_epoch_val)
em_loss_val.append(em_loss_epoch_val)
print('VAE val loss: {:.4f}'.format(vae_loss_epoch_val))
print('Emulator val loss: {:.4f}'.format(em_loss_epoch_val))
# save weights
if epoch == 1: # save first epoch
vae.save('checkpoints/best_vae')
emulator.save('checkpoints/best_em')
elif em_loss_val[-1] < np.min(em_loss_val[:-1]): # performance is better than prev epoch
vae.save('checkpoints/best_vae')
emulator.save('checkpoints/best_em')
# early stopping?
keep_going = early_stop(es_patience, es_max_factor, vae_loss_val, em_loss_val)
if not keep_going:
break
# check if loss stopped decreasing
plateau_vae = plateau_check("vae", vae_lr_patience, lr_max_factor, vae_loss_val, em_loss_val)
plateau_em = plateau_check("emulator", em_lr_patience, lr_max_factor, vae_loss_val, em_loss_val)
vae_reduced_lr += 1
em_reduced_lr += 1
return vae_loss, vae_loss_val, em_loss, em_loss_val | a3301f178aade90cb5a5b441ccf7607e2f13c776 | 2,753 |
def get_next_term(cfg):
"""
Gets the next term to be added.
Args:
cfg: Expression config
"""
term = {}
if np.random.choice(['quantity', 'number'], p=[cfg.ratio, 1 - cfg.ratio]) == 'quantity':
idx = np.random.choice(range(len(cfg.quants)))
if cfg.reuse:
term['expression'] = cfg.quants[idx]
term['numerical'] = cfg.vals[idx]
term['estimation_difficulty'] = cfg.diffs[idx]
term['quantity_ids'] = [cfg.quantity_ids[idx]]
term['categories'] = [cfg.categories[idx]]
else:
term['expression'] = cfg.quants.pop(idx)
term['numerical'] = cfg.vals.pop(idx)
term['estimation_difficulty'] = cfg.diffs.pop(idx)
term['quantity_ids'] = [cfg.quantity_ids.pop(idx)]
term['categories'] = [cfg.categories.pop(idx)]
else:
if len(cfg.numbers) != 200:
# Where we're not using the default uniform sampling over numbers
idx = int(np.random.lognormal(3, 8) + abs(np.random.normal(0, 50))) + 1
term['expression'] = str(idx)
term['numerical'] = str(idx)
term['estimation_difficulty'] = 0
term['quantity_ids'] = []
term['categories'] = []
else:
idx = np.random.choice(range(len(cfg.numbers)))
term['expression'] = str(idx)
term['numerical'] = str(idx)
term['estimation_difficulty'] = 0
term['quantity_ids'] = []
term['categories'] = []
return term | edaf22a93ce1a0c51f4193c3ea022202c8bbaaef | 2,754 |
def demand_share_per_timestep_constraint_rule(backend_model, group_name, carrier, timestep, what):
"""
Enforces shares of demand of a carrier to be met by the given groups
of technologies at the given locations, in each timestep.
The share is relative to ``demand`` technologies only.
.. container:: scrolling-wrapper
.. math::
\\sum_{loc::tech::carrier \\in given\\_group} carrier_{prod}(loc::tech::carrier, timestep) \\leq
share \\times \\sum_{loc::tech:carrier \\in loc\\_techs\\_demand \\in given\\_locations}
carrier_{con}(loc::tech::carrier, timestep) for timestep \\in timesteps
"""
share = get_param(backend_model, 'group_demand_share_per_timestep_{}'.format(what), (carrier, group_name))
if share is None:
return return_noconstraint('demand_share_per_timestep', group_name)
else:
lhs_loc_tech_carriers, rhs_loc_tech_carriers = get_demand_share_lhs_and_rhs_loc_tech_carriers(
backend_model, group_name, carrier
)
lhs = sum(
backend_model.carrier_prod[loc_tech_carrier, timestep]
for loc_tech_carrier in lhs_loc_tech_carriers
)
rhs = share * -1 * sum(
backend_model.carrier_con[loc_tech_carrier, timestep]
for loc_tech_carrier in rhs_loc_tech_carriers
)
return equalizer(lhs, rhs, what) | 65cfc120a9a7a5f26b4057a21b7a38a32a335955 | 2,755 |
def b2str(data):
"""Convert bytes into string type."""
try:
return data.decode("utf-8")
except UnicodeDecodeError:
pass
try:
return data.decode("utf-8-sig")
except UnicodeDecodeError:
pass
try:
return data.decode("ascii")
except UnicodeDecodeError:
return data.decode("latin-1") | 05cbe6c8072e1bf24cc9ba7f8c8447d0fa7cbf7f | 2,756 |
def plotalphaerror(alphaarr,errorarr,errorlagarr):
""" This will plot the error with respect then alpha parameter for the
constraint.
"""
sns.set_style('whitegrid')
sns.set_context('notebook')
Nlag=errorlagarr.shape[-1]
nlagplot=4.
nrows=1+int(sp.ceil(float(Nlag)/(2*nlagplot)))
fig=plt.figure(figsize=(8,4*nrows),facecolor='w')
gs=gridspec.GridSpec(nrows,2)
axmain=plt.subplot(gs[0,:])
axlist=[plt.subplot(gs[int(sp.floor(float(i)/2.)+1),int(sp.mod(i,2))]) for i in range(2*(nrows-1))]
axmain.plot(alphaarr,errorarr)
axmain.set_xscale('log')
axmain.set_yscale('log')
axmain.set_title('Error From All Lags Added',fontsize=fs)
axmain.set_ylabel('Error',fontsize=fs)
axmain.set_xlabel(r'$\gamma$',fontsize=fs)
for iaxn,iax in enumerate(axlist):
strlist=[]
handlist=[]
for ilag in range(int(nlagplot)):
curlag=int(iaxn*nlagplot+ilag)
if curlag>=Nlag:
break
handlist.append(iax.plot(alphaarr,errorlagarr[:,curlag])[0])
strlist.append('Lag {0}'.format(curlag))
iax.set_xscale('log')
iax.set_yscale('log')
iax.set_title('Error From Lags',fontsize=fs)
iax.set_ylabel('Error',fontsize=fs)
iax.set_xlabel(r'$\gamma$',fontsize=fs)
iax.legend(handlist,strlist,loc='upper right',fontsize='large')
plt.tight_layout()
return(fig,axlist,axmain) | e87c771212a3e39b1f4d7a1a74fc18c1d2e85f87 | 2,757 |
def fill_space(space, dim, size, minval, maxval, factor):
"""Fill a dim-dimensional discrete space of ℕ^{size} with
some random hyperplane with values ranging from minval to
maxval. Returns a ℕ^{size} array. Changes space in-place."""
offsets=[np.array([0]*dim)]
return ndim_diamond_square_rec(space, dim, size, offsets, minval, maxval, factor) | 7744cb465438b40019f3edae9db04143b16d19b1 | 2,758 |
def fracorder_lowshelving_eastty(w1, w2, G1, G2, rB=None):
"""
Parameters
----------
w1: float
Lower corner frequency.
w2: float
Upper corner frequency.
G1: float
Target level at lower corner frequency in dB.
G2: float
Target level at upper corner frequency in dB.
rB: float
Gain per octave.
Returns
-------
z: array_like
Complex zeros in the Laplace domain.
p: array_like
Complex poles in the Laplace domain.
k: float
Gain.
"""
Gd = G1 - G2
n_eff = effective_order(w1, w2, Gd, rB)
n_int, n_frac = np.divmod(n_eff, 1)
n_int = int(n_int)
z = np.array([])
p = np.array([])
# Second-order sections (complex conjugate pole/zero pairs)
if n_int > 0:
alpha = complex_zp_angles(n_int, n_frac)
alpha = np.concatenate((alpha, -alpha))
z = w1 * np.exp(1j * alpha)
p = w2 * np.exp(1j * alpha)
# First-order section (real pole/zero)
if n_eff % 2 != 0:
s_lower, s_upper = real_zp(n_int, n_frac, w1, w2)
if n_int % 2 == 0:
z_real = s_lower
p_real = s_upper
elif n_int % 2 == 1:
z_real = s_upper
p_real = s_lower
z = np.append(z, z_real)
p = np.append(p, p_real)
return z, p, 1 | 379a87b024ff993c0abf0b75a482a8ab66a67546 | 2,759 |
def get_cookie_date(date):
"""
Return a date string in a format suitable for cookies (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Date)
:param date: datetime object
:return: date string in cookie format
"""
return date.strftime("%a, %d %b %Y %H:%M:%S GMT") | f2b4d6decab72cf1f25754bc7e290f62eae92156 | 2,760 |
def fill_block_with_call(newblock, callee, label_next, inputs, outputs):
"""Fill *newblock* to call *callee* with arguments listed in *inputs*.
The returned values are unwraped into variables in *outputs*.
The block would then jump to *label_next*.
"""
scope = newblock.scope
loc = newblock.loc
fn = ir.Const(value=callee, loc=loc)
fnvar = scope.make_temp(loc=loc)
newblock.append(ir.Assign(target=fnvar, value=fn, loc=loc))
# call
args = [scope.get_exact(name) for name in inputs]
callexpr = ir.Expr.call(func=fnvar, args=args, kws=(), loc=loc)
callres = scope.make_temp(loc=loc)
newblock.append(ir.Assign(target=callres, value=callexpr, loc=loc))
# unpack return value
for i, out in enumerate(outputs):
target = scope.get_exact(out)
getitem = ir.Expr.static_getitem(value=callres, index=i,
index_var=None, loc=loc)
newblock.append(ir.Assign(target=target, value=getitem, loc=loc))
# jump to next block
newblock.append(ir.Jump(target=label_next, loc=loc))
return newblock | 76e9edbeca59a75d9854e6ffa2d02658da7511ab | 2,761 |
from typing import Dict
from typing import List
import sys
import json
def terraform_write_variables(configs: Dict, variables_to_exclude: List) -> str:
"""Write out given config object as a Terraform variables JSON file.
Persist variables to Terraform state directory. These variables are used
on apply / plan, and are required for deprovisioning.
"""
det_version = configs.get("det_version")
if not det_version or not isinstance(det_version, str):
print("ERROR: Determined version missing or invalid")
sys.exit(1)
# Add GCP-friendly version key to configs. We persist this since it's used
# across the cluster lifecycle: to name resources on provisioning, and to
# filter for the master and dynamic agents on deprovisioning.
configs["det_version_key"] = det_version.replace(".", "-")[0:8]
# Track the default zone in configuration variables. This is needed
# during deprovisioning.
if "zone" not in configs:
configs["zone"] = f"{configs['region']}-b"
vars_file_path = get_terraform_vars_file_path(configs)
tf_vars = {k: configs[k] for k in configs if k not in variables_to_exclude}
with open(vars_file_path, "w") as f:
json.dump(tf_vars, f)
return vars_file_path | f8ab21569328e0f1a9a30a177ad30ec2ad44b62f | 2,762 |
def data_context_service_interface_pointuuid_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get(uuid, upper_frequency, lower_frequency): # noqa: E501
"""data_context_service_interface_pointuuid_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get
returns tapi.photonic.media.FrequencyConstraint # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param upper_frequency: Id of available-spectrum
:type upper_frequency: int
:param lower_frequency: Id of available-spectrum
:type lower_frequency: int
:rtype: TapiPhotonicMediaFrequencyConstraint
"""
return 'do some magic!' | 412f0b48a050e9201e6d52450a04a6bef0f4a0f3 | 2,763 |
def image_to_string(filename):
"""Generate a string representation of the image at the given path, for embedding in code."""
image = pyglet.image.load(filename)
data = image.get_data('LA', 16)
s = ''
for x in data:
s += "\\x%02x" % (ord(x))
return s | 19dea26d51dd29449759c5e1a3b4c9fc098702f3 | 2,764 |
def mpf_connectome(
mc, num_sampled, max_depth, args_dict, clt_start=10, sr=0.01, mean_estimate=False
):
"""Perform mpf statistical calculations on the mouse connectome."""
args_dict["max_depth"] = max_depth
args_dict["total_samples"] = num_sampled[0]
args_dict["static_verbose"] = False
args_dict["clt_start"] = clt_start
args_dict["mean_estimate"] = mean_estimate
if max_depth > 1:
sr = None
if mean_estimate is True:
sr = None
cp = CombProb(
mc.num_a,
num_sampled[0],
mc.num_senders,
mc.num_b,
num_sampled[1],
MatrixConnectivity.static_expected_connections,
verbose=True,
subsample_rate=sr,
**args_dict
)
result = {
"expected": cp.expected_connections(),
"total": cp.get_all_prob(),
"each_expected": {k: cp.expected_total(k) for k in range(num_sampled[0] + 1)},
}
return result | 6ae8ddb7c3355ddbf072a5bf97f57fe4e13b500e | 2,765 |
def valuedict(keys, value, default):
"""
Build value dictionary from a list of keys and a value.
Parameters
----------
keys: list
The list of keys
value: {dict, int, float, str, None}
A value or the already formed dictionary
default: {int, float, str}
A default value to set if no value
Returns
-------
dict
A dictionary
Notes
-----
This standalone and generic function is only required by plotters.
"""
if isinstance(value, dict):
return {key: value.get(key, default) for key in keys}
else:
return dict.fromkeys(keys, value or default) | 44283bac3be75c3569e87a890f507f7cff4161b6 | 2,766 |
async def chunks(request):
"""A handler that sends chunks at a slow pace.
The browser will download the page over the range of 2 seconds,
but only displays it when done. This e.g. allows streaming large
files without using large amounts of memory.
"""
async def iter():
yield "<html><head></head><body>"
yield "Here are some chunks dripping in:<br>"
for i in range(20):
await asgineer.sleep(0.1)
yield "CHUNK <br>"
yield "</body></html>"
return 200, {"content-type": "text/html"}, iter() | 590fe83c5ee53c603b973063e0e077ab87a220ae | 2,767 |
import os
import subprocess
import json
def start_vitess():
"""This is the main start function."""
topology = vttest_pb2.VTTestTopology()
keyspace = topology.keyspaces.add(name='user')
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace = topology.keyspaces.add(name='lookup')
keyspace.shards.add(name='0')
vttop = os.environ['VTTOP']
args = [os.path.join(vttop, 'py/vttest/run_local_database.py'),
'--port', '12345',
'--proto_topo', text_format.MessageToString(topology,
as_one_line=True),
'--web_dir', os.path.join(vttop, 'web/vtctld'),
'--schema_dir', os.path.join(vttop, 'examples/demo/schema')]
sp = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# This load will make us wait for vitess to come up.
json.loads(sp.stdout.readline())
return sp | 3643c780118aaa72485b059a34891d938aff7328 | 2,768 |
def get_zero_ranges(*args):
"""
get_zero_ranges(zranges, range) -> bool
Return set of ranges with zero initialized bytes. The returned set
includes only big zero initialized ranges (at least >1KB). Some zero
initialized byte ranges may be not included. Only zero bytes that use
the sparse storage method (STT_MM) are reported.
@param zranges: pointer to the return value. cannot be NULL (C++:
rangeset_t *)
@param range: the range of addresses to verify. can be NULL - means
all ranges (C++: const range_t *)
@return: true if the result is a non-empty set
"""
return _ida_bytes.get_zero_ranges(*args) | bd95dbb237ca0b2934e8653b1198d10d25abc553 | 2,769 |
def fista_step(L, Wd, X, alpha, last_Z):
"""
Calculates the next sparse code for the FISTA algorithm
Dimension notation:
B - Number of samples. Usually number of patches in image times batch size
K - Number of atoms in dictionary
d - Dimensionality of atoms in dictionary
:param X: Input - Signal to find sparse coding against. Dimensions: d X B
:param Wd: Dictionary - Tensor of atoms we want to get a sparse linear combination of. Dimensions: d X K
:param alpha: Float. Sparsity weight
:param L: Float. Largest eigenvalue in Wd
:param last_Z: Sparse code from previous step. Dimensions: K x B
:return: Z: linear coefficients for Sparse Code solution. Dimensions: K x B
"""
quantization_distance = Wd.mm(last_Z) - X.to(Wd.device)
normalized_dictionary = Wd.t() / L
normalized_quantization_projection = normalized_dictionary.mm(quantization_distance)
cur_Z = last_Z - normalized_quantization_projection
cur_Z = shrink_function(cur_Z, alpha / L)
return cur_Z | 6e237a01e631d08efcc425068c646b792d984cdd | 2,770 |
def get_and_validate_certs_for_replacement(
default_cert_location,
default_key_location,
default_ca_location,
new_cert_location,
new_key_location,
new_ca_location):
"""Validates the new certificates for replacement.
This function validates the new specified certificates for replacement,
based on the new certificates specified and the current ones. E.g. if
onlt a new certificate and key were specified, then it will validate them
with the current CA.
"""
cert_filename, key_filename = get_cert_and_key_filenames(
new_cert_location, new_key_location,
default_cert_location, default_key_location)
ca_filename = get_ca_filename(new_ca_location, default_ca_location)
validate_certificates(cert_filename, key_filename, ca_filename)
return cert_filename, key_filename, ca_filename | 9a4a3b46609fc1e5e7cc525b84397b9adba86b32 | 2,771 |
def build_model(cfg):
"""
Built the whole model, defined by `cfg.model.name`.
"""
name = cfg.model.name
return META_ARCH_REGISTRY.get(name)(cfg) | b106eca0f110007cb852dce9760e5e0ee08940a8 | 2,772 |
def download_n_parse_3k(url):
"""
Gets the article's metadata
Args:
url: The article's URL
"""
article3k = Article(url)
try:
article3k.download()
article3k.parse()
except Exception:
print(f"Download or Parse:\t{url}")
return
return article3k.text | fa63fc7c03b63c5e08004d61488074be538c714b | 2,773 |
def crop_to_square(img, target_size=None):
"""
Takes numpy array img and converts it to a square by trimming
:param img: np.array representing image
:param target_size: optionally specify target size. If None, will return min(l, w) x min(l, w)
:return: np.array
"""
l, w = img.shape
img_copy = img.copy()
if l > w:
delta = l - w
cropped_img = img_copy[delta // 2: -delta + delta // 2, :]
elif l < w:
delta = w - l
cropped_img = img_copy[:, delta // 2: -delta + delta // 2]
else:
cropped_img = img_copy
if target_size:
current_size = cropped_img.shape[0] # should be a square
center = max(target_size, current_size) // 2
offset_min = center - min(target_size, current_size) // 2
offset_max = offset_min + min(target_size, current_size)
if target_size > current_size:
new_image = np.zeros((target_size, target_size))
new_image[offset_min:offset_max, offset_min:offset_max] = cropped_img
cropped_img = new_image.copy()
else:
cropped_img = cropped_img[offset_min:offset_max, offset_min:offset_max]
return np.asarray(cropped_img, dtype=np.float32) | 2ad566c7d0a0c719ff207bc06d33e70208a7a03f | 2,774 |
def reboot(name, path=None):
"""
Reboot a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt 'minion' lxc.reboot myvm
"""
ret = {"result": True, "changes": {}, "comment": "{0} rebooted".format(name)}
does_exist = exists(name, path=path)
if does_exist and (state(name, path=path) == "running"):
try:
stop(name, path=path)
except (SaltInvocationError, CommandExecutionError) as exc:
ret["comment"] = "Unable to stop container: {0}".format(exc)
ret["result"] = False
return ret
if does_exist and (state(name, path=path) != "running"):
try:
start(name, path=path)
except (SaltInvocationError, CommandExecutionError) as exc:
ret["comment"] = "Unable to stop container: {0}".format(exc)
ret["result"] = False
return ret
ret["changes"][name] = "rebooted"
return ret | 2519f9ad5434dbb9ff0a48f5280483829584ebb1 | 2,775 |
import networkx
def find(domain):
""" Finds connected domains within a domain.
A domain is defined to be a connected region of lattice
points, subject to periodic boundary conditions.
Parameters
----------
domain : :py:class:`~fieldkit.mesh.Domain`
The set of nodes to seek connected domains in.
Returns
-------
tuple
A tuple of all :py:class:`~fieldkit.mesh.Domain` objects
identified within the `domain`. At most, there is only
one domain returned, but many can be identified if the points
in the `domain` are highly disconnected.
Notes
-----
The connected domains are determined using a graph-based approach,
which requires the `networkx` package. Performance is generally good,
but the algorithm may struggle for large numbers of nodes or domains.
"""
comps = networkx.connected_components(domain.graph)
return tuple([Domain(domain.mesh,list(c)) for c in comps]) | 3ea2128f84104686be88d359cda3df2554013f41 | 2,776 |
def url_to_license(url):
"""Given a URL, return the license as a license/version tuple"""
(scheme, netloc, path, *remainder) = urlparse(url)
path_parts = path.split('/')
if len(path_parts) < 4:
raise LicenseException("Did not get 4 path segments, probably not a CC license URL")
license = path_parts[2].upper() # First is '', because it starts with a leading /
version = path_parts[3]
# Handle the PD licenses as special-cases
if license == 'ZERO':
license = 'CC0'
version = '1.0'
if license == 'MARK':
license = 'PDM'
version = '1.0'
if license not in LICENSE_LIST:
raise LicenseException("License fragment %s was not a valid license", license)
return (license, version) | e6ae2d67f1dbd02c0fe0885231dbac4ae112b0d3 | 2,777 |
def dsystem_dt(request):
"""Test systems for test_discrete"""
# SISO state space systems with either fixed or unspecified sampling times
sys = rss(3, 1, 1)
# MIMO state space systems with either fixed or unspecified sampling times
A = [[-3., 4., 2.], [-1., -3., 0.], [2., 5., 3.]]
B = [[1., 4.], [-3., -3.], [-2., 1.]]
C = [[4., 2., -3.], [1., 4., 3.]]
D = [[-2., 4.], [0., 1.]]
dt = request.param
systems = {'sssiso': StateSpace(sys.A, sys.B, sys.C, sys.D, dt),
'ssmimo': StateSpace(A, B, C, D, dt),
'tf': TransferFunction([2, 1], [2, 1, 1], dt)}
return systems | faaf22165fc147955b69b1d983fbc37dafb34772 | 2,778 |
import types
def update_attributes(dsFolder: types.GirderModel, data: dict):
"""Upsert or delete attributes"""
crud.verify_dataset(dsFolder)
validated: AttributeUpdateArgs = crud.get_validated_model(AttributeUpdateArgs, **data)
attributes_dict = fromMeta(dsFolder, 'attributes', {})
for attribute_id in validated.delete:
attributes_dict.pop(str(attribute_id), None)
for attribute in validated.upsert:
attributes_dict[str(attribute.key)] = attribute.dict(exclude_none=True)
upserted_len = len(validated.delete)
deleted_len = len(validated.upsert)
if upserted_len or deleted_len:
update_metadata(dsFolder, {'attributes': attributes_dict})
return {
"updated": upserted_len,
"deleted": deleted_len,
} | d58dfecf68822d4b45688ea16ec39e97e999d458 | 2,779 |
def machado_et_al_2009_matrix_protanomaly(severity):
"""Retrieve a matrix for simulating anomalous color vision.
:param cvd_type: One of "protanomaly", "deuteranomaly", or "tritanomaly".
:param severity: A value between 0 and 100.
:returns: A 3x3 CVD simulation matrix as computed by Machado et al
(2009).
These matrices were downloaded from:
http://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html
which is supplementary data from :cite:`Machado-CVD`.
If severity is a multiple of 10, then simply returns the matrix from that
webpage. For other severities, performs linear interpolation.
"""
MACHADO_ET_AL_MATRIX_protanomaly = np.array(
(
(
[1.000000, 0.000000, -0.000000],
[0.000000, 1.000000, 0.000000],
[-0.000000, -0.000000, 1.000000],
),
(
[0.856167, 0.182038, -0.038205],
[0.029342, 0.955115, 0.015544],
[-0.002880, -0.001563, 1.004443],
),
(
[0.734766, 0.334872, -0.069637],
[0.051840, 0.919198, 0.028963],
[-0.004928, -0.004209, 1.009137],
),
(
[0.630323, 0.465641, -0.095964],
[0.069181, 0.890046, 0.040773],
[-0.006308, -0.007724, 1.014032],
),
(
[0.539009, 0.579343, -0.118352],
[0.082546, 0.866121, 0.051332],
[-0.007136, -0.011959, 1.019095],
),
(
[0.458064, 0.679578, -0.137642],
[0.092785, 0.846313, 0.060902],
[-0.007494, -0.016807, 1.024301],
),
(
[0.385450, 0.769005, -0.154455],
[0.100526, 0.829802, 0.069673],
[-0.007442, -0.022190, 1.029632],
),
(
[0.319627, 0.849633, -0.169261],
[0.106241, 0.815969, 0.077790],
[-0.007025, -0.028051, 1.035076],
),
(
[0.259411, 0.923008, -0.182420],
[0.110296, 0.804340, 0.085364],
[-0.006276, -0.034346, 1.040622],
),
(
[0.203876, 0.990338, -0.194214],
[0.112975, 0.794542, 0.092483],
[-0.005222, -0.041043, 1.046265],
),
(
[0.152286, 1.052583, -0.204868],
[0.114503, 0.786281, 0.099216],
[-0.003882, -0.048116, 1.051998],
),
),
dtype=np.float64,
)
assert 0 <= severity <= 100
fraction = severity % 10
low = int(severity - fraction) // 10
high = low + 1
# assert low <= severity <= high
low_matrix = MACHADO_ET_AL_MATRIX_protanomaly[low]
if severity == 100:
# Don't try interpolating between 100 and 110, there is no 110...
return low_matrix
high_matrix = MACHADO_ET_AL_MATRIX_protanomaly[high]
return (1 - fraction / 10.0) * low_matrix + fraction / 10.0 * high_matrix | a99a07a7f447fc741ee1a4bd239bda5ed8079e6b | 2,780 |
def derivative_compliance(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam):
""" calculates the derivative of the compliance function.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
alpha (:obj:`float`): Damping coefficient proportional to mass.
beta (:obj:`float`): Damping coefficient proportional to stiffness.
omega_par (:obj:`float`): 2 * pi * frequency
p_par (:obj:`float`): Penalization power to stiffness.
q_par (:obj:`float`): Penalization power to mass.
x_min_m (:obj:`float`): Minimum relative densities to mass.
x_min_k (:obj:`float`): Minimum relative densities to stiffness.
xval (:obj:`numpy.array`): Indicates where there is mass.
disp_vector (:obj:`numpy.array`): Displacement vector.
lam (:obj:`float`): Lambda parameter.
Returns:
Derivative of the compliance function.
"""
deriv_f = np.empty((len(connect), 1))
dofs = 2
ind_dofs = (np.array([dofs*connect[:,1]-1, dofs*connect[:,1], dofs*connect[:,2]-1, dofs*connect[:,2],
dofs*connect[:,3]-1, dofs*connect[:,3], dofs*connect[:,4]-1, dofs*connect[:,4]], dtype=int)-1).T
for el in range(len(connect)):
Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho)
ind = ind_dofs[el, :]
dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke
dCe = alpha * Me + beta * dKe
if xval[el]>0.1:
dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me
else:
dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me
dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe
deriv_f[el, 0] = (-lam *(disp_vector[ind].reshape(1, 8)@dKed@disp_vector[ind].reshape(8, 1)))[0,0].real
return deriv_f | 2d6e9467c410c3960fb149cb20a306cff85374e5 | 2,781 |
def test_status_string(app, authed_client, status_code, status):
"""The status string should populate itself based on status code."""
@app.route('/test_endpoint')
def test_endpoint():
return flask.jsonify('test'), status_code
response = authed_client.get('/test_endpoint')
assert response.get_json() == {'response': 'test', 'status': status} | 2af688a5f0db42ae2ad13eae86a09a04b3745b07 | 2,782 |
def lanc(numwt, haf):
"""Generates a numwt + 1 + numwt lanczos cosine low pass filter with -6dB
(1/4 power, 1/2 amplitude) point at haf
Parameters
----------
numwt : int
number of points
haf : float
frequency (in 'cpi' of -6dB point, 'cpi' is cycles per interval.
For hourly data cpi is cph,
Examples
--------
>>> from datetime import datetime
>>> import matplotlib.pyplot as plt
>>> t = np.arange(500) # Time in hours.
>>> h = 2.5 * np.sin(2 * np.pi * t / 12.42)
>>> h += 1.5 * np.sin(2 * np.pi * t / 12.0)
>>> h += 0.3 * np.random.randn(len(t))
>>> wt = lanc(96+1+96, 1./40)
>>> low = np.convolve(wt, h, mode='same')
>>> high = h - low
>>> fig, (ax0, ax1) = plt.subplots(nrows=2)
>>> _ = ax0.plot(high, label='high')
>>> _ = ax1.plot(low, label='low')
>>> _ = ax0.legend(numpoints=1)
>>> _ = ax1.legend(numpoints=1)
"""
summ = 0
numwt += 1
wt = np.zeros(numwt)
# Filter weights.
ii = np.arange(numwt)
wt = 0.5 * (1.0 + np.cos(np.pi * ii * 1. / numwt))
ii = np.arange(1, numwt)
xx = np.pi * 2 * haf * ii
wt[1:numwt + 1] = wt[1:numwt + 1] * np.sin(xx) / xx
summ = wt[1:numwt + 1].sum()
xx = wt.sum() + summ
wt /= xx
return np.r_[wt[::-1], wt[1:numwt + 1]] | bcfc9dac83fa517a7a564b1d58aba4e4d901b828 | 2,783 |
from ete3 import NCBITaxa
def normalize_target_taxa(target_taxa):
"""
Receives a list of taxa IDs and/or taxa names and returns a set of expanded taxids numbers
"""
ncbi = NCBITaxa()
expanded_taxa = set()
for taxon in target_taxa:
taxid = ""
try:
taxid = int(taxon)
except ValueError:
taxid = ncbi.get_name_translator([taxon])[taxon][0]
else:
taxon = ncbi.get_taxid_translator([taxid])[taxid]
species = ncbi.get_descendant_taxa(taxid, collapse_subspecies=False)
for sp in species:
expanded_taxa.add(sp)
return expanded_taxa | cc88ba730495fea54bbab86eb83521d88da692fc | 2,784 |
import types
def filled (a, value = None):
"""a as a contiguous numeric array with any masked areas replaced by value
if value is None or the special element "masked", get_fill_value(a)
is used instead.
If a is already a contiguous numeric array, a itself is returned.
filled(a) can be used to be sure that the result is numeric when
passing an object a to other software ignorant of MA, in particular to
numeric itself.
"""
if isinstance(a, MaskedArray):
return a.filled(value)
elif isinstance(a, ndarray) and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, types.DictType):
return numeric.array(a, 'O')
else:
return numeric.array(a) | aabc4772cc4f318d794ef509f22a54116c14764c | 2,785 |
import configparser
def get_generic_global(section, prop):
"""Generic getter for getting a property"""
if section is None:
raise GlobalPropertyError("Section cannot be null!")
elif prop is None:
raise GlobalPropertyError("Property cannot be null!")
global_conf = configparser.ConfigParser()
global_conf.read(DTF_GLOBAL_CONFIG)
try:
return global_conf.get(section, prop)
except configparser.NoSectionError:
raise GlobalPropertyError("Section not found: %s" % section)
except configparser.NoOptionError:
raise GlobalPropertyError("Property not found: %s" % prop) | 09e87d4f915e0591193a57505ad9a8e5d28d271e | 2,786 |
def get_mspec_descriptors(mod, mod_lim=20, freq_lim=8000, n_mod_bin=20, n_freq_bin=20):
"""
Parameters
----------
mod : 2D Numpy array
Modulation spectrogram
mod_lim : int
Upper limit of modulation frequency. The default is 20.
freq_lim : int
Upper limit of frequency. The default is 8000.
n_mod_bin : int, optional
Number of modulation frequency bins. The default is 20.
n_freq_bin : int, optional
Number of frequency bins. The default is 20.
Returns
-------
Modulation spectrogram descriptors: 1D numpy array
"""
n_fea = 8 #Number of features to compute
mod = 10**(mod/10) #Convert energies in dB to original values
n_mod_bin = n_mod_bin #Number of modulation frequency bins
n_freq_bin = n_freq_bin #Number of conventional frequency bins
mod = np.reshape(mod,(n_freq_bin, n_mod_bin)) #Reshape psd matrix
ds_mod = np.empty((n_mod_bin,n_fea))*np.nan #Initialize a matrix to store descriptors in all bins
ds_freq = np.empty((n_freq_bin,n_fea))*np.nan
def get_subband_descriptors(psd, freq_range):
#Initialize a matrix to store features
ft=np.empty((8))*np.nan
lo,hi = freq_range[0], freq_range[-1]#Smallest and largest value of freq_range
#Centroid
ft[0] = np.sum(psd*freq_range)/np.sum(psd)
#Entropy
ft[1]=-np.sum(psd*np.log(psd))/np.log(hi-lo)
#Spread
ft[2]=np.sqrt(np.sum(np.square(freq_range-ft[0])*psd)/np.sum(psd))
#skewness
ft[3]=np.sum(np.power(freq_range-ft[0],3)*psd)/(np.sum(psd)*ft[2]**3)
#kurtosis
ft[4]=np.sum(np.power(freq_range-ft[0],4)*psd)/(np.sum(psd)*ft[2]**4)
#flatness
arth_mn=np.mean(psd)/(hi-lo)
geo_mn=np.power(np.exp(np.sum(np.log(psd))),(1/(hi-lo)))
ft[5]=geo_mn/arth_mn
#crest
ft[6]=np.max(psd)/(np.sum(psd)/(hi-lo))
#flux
ft[7]=np.sum(np.abs(np.diff(psd)))
return ft
#Loop through all modulation frequency bands
freq_bin_width = freq_lim/n_freq_bin
mod_bin_width = mod_lim/n_mod_bin
freq = np.arange(0,freq_lim,freq_bin_width)+freq_bin_width/2 #List of center values of frequency bins
mod_freq = np.arange(0,mod_lim,mod_bin_width)+mod_bin_width/2 #List of center values of modulation frequency bins
#Calculate features for each modulation frequency bin
for mod_band in np.arange(n_mod_bin):
ds_mod[mod_band,:] = get_subband_descriptors(mod[:,mod_band], freq)
#Calculate features for each conventional frequency bin
for freq_band in np.arange(n_freq_bin):
ds_freq[freq_band,:] = get_subband_descriptors(mod[freq_band,:], mod_freq)
return np.concatenate((np.reshape(ds_mod, (8*n_mod_bin)), np.reshape(ds_freq, (8*n_freq_bin))),axis=None) | 0a5fd0739d46ec6fa4539d96ed406dfcf540f2d2 | 2,787 |
def mustachify(
file,
mustache_file="mustache.png",
rotation=True,
perspective=False, # TODO add perspective transformation
modelsize="small",
):
"""
Pastes a mustache on each face in the image file
:param file: image file name or file object to load
:param mustache_file: file pointer to mustache png
:return: PIL image object with mustache on each face
"""
if modelsize not in ("small", "large"):
raise ValueError("Landmarks model should be \"small\" or \"large\"")
# load file to img
img_array = load_image_file(file)
# get landmarks of all faces
locations = face_locations(img_array, number_of_times_to_upsample=1)
landmarks = face_landmarks(img_array, face_locations=None, model=modelsize)
# create PIL object for img and drawing
img = Image.fromarray(img_array)
draw = ImageDraw.Draw(img)
# load mustache
mustache = Image.open(mustache_file)
# loop over each face
for landmark in landmarks:
mask = rotate(img=mustache, landmark=landmark)
mask = scale(img=mask, landmark=landmark, scale=1.3)
mask = removePadding(mask)
if modelsize=="small":
nose = landmark["nose_tip"][0]
elif modelsize=="large":
nose = landmark["nose_tip"][2]
midpoint = (round(mask.size[0]/2), round(mask.size[1]/2.8))
position = (nose[0] - midpoint[0], nose[1] - midpoint[1])
img.paste(mask, position, mask)
return img | f1ae14f813b15a685c5c8e2d8c0a8e38629284d5 | 2,788 |
import subprocess
def get_available_gpus():
"""Return a list of available GPUs with their names"""
cmd = 'nvidia-smi --query-gpu=name --format=csv,noheader'
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = process.communicate()
if process.returncode == 0:
return stdout.decode().splitlines()
return [] | 9ef81b08aad25e0109604cdd538b5c9e6902c4d1 | 2,789 |
def register_image_array(img, img_name, img_desc, project_id, sample_id, usr, pwd, host, port=4064):
"""
This function imports a 5D (time-points, channels, x, y, z) numpy array of an image
to an omero server using the OMERO Python bindings
Example:
register_image_array(hypercube, "tomo_0", "this is a tomogram",
"project_x", "sample_y", "joe_usr", "joe_pwd", "192.168.2.2")
Args:
file_path (string): the path to the fastq file to validate
project_id (string): the corresponding project ID in openBIS server
sample_id (string): the corresponding sample ID in openBIS server
usr (string): username for the OMERO server
pwd (string): password for the OMERO server
host (string): OMERO server address
port (int): OMERO server port
Returns:
int: newly generated omero ID for registered image array
"""
img_id = -1
save_flag = 0
conn = omero_connect(usr, pwd, host, str(port))
for project in conn.getObjects("Project"):
if project.getName() == project_id:
for dataset in project.listChildren():
if dataset.getName() == sample_id:
img_id = create_array(conn, img, img_name, img_desc, dataset)
save_flag = 1
break
if save_flag == 1:
break
return int(img_id) | 2dec56554280a45e2421192e95896683743939aa | 2,790 |
def count_symbols (val):
""" Counts the number of symbols in a string.
A symbol is defined as any character that is neither a lowercase letter, uppercase letter or digit.
Args:
val (str): The string to count symbols in.
Returns:
int: The number of symbols in the string.
"""
return sum(1 for c in val if is_symbol(c)) | 6c96454cf6a508942719458ba2c28644ffaa6d7d | 2,791 |
import math
def arctan(dy, dx):
""" Returns the arctan of angle between 0 and 2*pi """
arc_tan = math.atan2(dy, dx)
if arc_tan < 0:
arc_tan = arc_tan + 2 * np.pi
return arc_tan | e3b8d35f97b25983a9ba05f306a45c2852ae2b42 | 2,792 |
def meshsize(mesh: dolfin.Mesh,
kind: str = "cell") -> dolfin.MeshFunction:
"""Return the local meshsize `h` as a `MeshFunction` on cells or facets of `mesh`.
The local meshsize is defined as the length of the longest edge of the cell/facet.
kind: "cell" or "facet"
"""
if kind not in ("cell", "facet"):
raise ValueError(f"`kind` must be 'cell' or 'facet', got {type(kind)} with value {kind}")
dim = mesh.topology().dim()
if kind == "cell":
entities = dolfin.cells(mesh)
fdim = dim
else: # kind == "facet":
entities = dolfin.facets(mesh)
fdim = dim - 1
f = dolfin.MeshFunction("double", mesh, fdim)
f.set_all(0.0)
if kind == "cell":
for cell in entities:
f[cell] = cell.h()
else: # facets have no `.h`
def vertices_as_array(entity):
return [vtx.point().array() for vtx in dolfin.vertices(entity)]
def euclidean_distance(vtxpair):
assert len(vtxpair) == 2
dx = vtxpair[0] - vtxpair[1]
return np.sqrt(np.sum(dx**2))
for entity in entities:
edges = dolfin.edges(entity)
vtxpairs = [vertices_as_array(edge) for edge in edges]
edge_lengths = [euclidean_distance(vtxpair) for vtxpair in vtxpairs]
f[entity] = max(edge_lengths)
return f | 8cd66227ab46acb85c9ea8907e0164eee142a9ae | 2,793 |
def dep_graph_parser_parenthesis(edge_str):
"""Given a string representing a dependency edge in the 'parenthesis'
format, return a tuple of (parent_index, edge_label, child_index).
Args:
edge_str: a string representation of an edge in the dependency tree, in
the format edge_label(parent_word-parent_index, child_word-child_index)
Returns:
tuple of (parent_index, edge_label, child_index)
"""
tokens = edge_str.split("(")
label = tokens[0]
tokens = tokens[1].split(", ")
parent = int(tokens[0].split("-")[-1]) - 1
child = int(",".join(tokens[1:]).split("-")[-1][:-1]) - 1
return (parent, label, child) | a3f96ebec6fdcb00f3f64ea02e91147df16df196 | 2,794 |
def measure_crypts_props_no_paneth(crypt_objs, label_mask, edu_objs, df, row, col, fld):
"""Measure crypt level properties for all crypts in image
Args:
crypt_objs (array): labeled cell objects (e.g. nuclei segmentation)
label_mask (array): labeled crypt objects
edu_objs (list): ids of cell objects positive for EdU
df (dataframe): dataframe of crypt measurements in this well
- add results to dataframe
row (char): row of current well
col (int): column of current well
fld (int): field of current frame
Returns:
dataframe: dataframe with measurements from this field added
"""
# list of crypt labels
crypt_labels = nonzero_unique(label_mask)
for l in crypt_labels:
# measure properties for one crypt
crypt_mask = get_object(label_mask, l)
objs = mask_objects(crypt_objs, crypt_mask, mask_val=l)
crypt_props = measure.regionprops(crypt_mask)[0]
# add properties to dataframe
df['num_cells'].append(len(nonzero_unique(objs)))
df['num_edu'].append(count_stained_objs(objs, edu_objs))
df['nuc_area'].append(crypt_props.area)
df['eccentricity'].append(crypt_props.eccentricity)
df['solidity'].append(crypt_props.solidity)
df['row'].append(row)
df['col'].append(col)
df['fld'].append(fld)
return df | 28e689a3812cceb5d3165b92d286965b397f6ef1 | 2,795 |
import math
def intersection_angle(m1, m2):
"""
Computes intersection angle between two slopes.
"""
return math.degrees(math.atan((m2-m1) / (1+m1*m2))) | 244192d3d1fe74130d64350606e765d8f2d4831b | 2,796 |
import click
from pathlib import Path
def create_zappa_project(
project_name, stack_name, session, client, username, email, password
):
"""Create the Zappa project."""
aws_rds_host = get_aws_rds_host(stack_name, session)
with open('.env', 'a') as file:
file.write('AWS_RDS_HOST={}\n'.format(aws_rds_host))
aws_lambda_host = deploy_zappa(project_name, client)
with open('.env', 'a') as file:
file.write('AWS_LAMBDA_HOST={}\n'.format(aws_lambda_host))
update_zappa(project_name, client)
click.echo(
'Run initial Django migration for Zappa deployment...', nl=False
)
client.containers.run(
'{}_web:latest'.format(project_name),
'/bin/bash -c "source ve/bin/activate && zappa manage dev migrate"',
remove=True,
volumes={
Path.cwd(): {'bind': '/var/task', 'mode': 'rw'},
'{}/.aws'.format(Path.home()): {
'bind': '/root/.aws',
'mode': 'ro'
}
}
)
click.secho(' done', fg='green')
click.echo(
'Create Django superuser {} for Zappa...'.format(username), nl=False
)
try:
django_command = '''from django.contrib.auth import get_user_model; \
User = get_user_model(); \
User.objects.create_superuser(\\"{}\\", \\"{}\\", \\"{}\\")'''.format(
username, email, password
)
bash_command = 'source ve/bin/activate \
&& zappa invoke --raw dev "{}"'.format(django_command)
zappa_command = "/bin/bash -c '{}'".format(bash_command)
client.containers.run(
'{}_web:latest'.format(project_name),
zappa_command,
remove=True,
volumes={
Path.cwd(): {'bind': '/var/task', 'mode': 'rw'},
'{}/.aws'.format(Path.home()): {
'bind': '/root/.aws',
'mode': 'ro'
}
}
)
click.secho(' done', fg='green')
except docker.errors.ContainerError:
pass
click.echo('Running collectstatic for Zappa deployment...', nl=False)
client.containers.run(
'{}_web:latest'.format(project_name),
'/bin/bash -c "source ve/bin/activate \
&& python manage.py collectstatic --noinput"',
environment={'DJANGO_ENV': 'aws-dev'},
remove=True,
volumes={
Path.cwd(): {'bind': '/var/task', 'mode': 'rw'},
'{}/.aws'.format(Path.home()): {
'bind': '/root/.aws',
'mode': 'ro'
}
}
)
click.secho(' done', fg='green')
return(aws_lambda_host) | 0b5d9dd0f74604b96cb39ef131c46480d5a0b84f | 2,797 |
import os
def erase_create_HDF(filename):
"""Create and return a new HDS5 file with the given filename, erase the file if existing.
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
open for writing, truncate if exists
https://h5py.readthedocs.io/en/stable/high/file.html#opening-creating-files
Args:
| filename (string): name of the file to be created
Returns:
| HDF5 file.
Raises:
| No exception is raised.
Author: CJ Willers
"""
if os.path.isfile(filename):
os.remove(filename)
f = h5py.File(filename,'w')
return f | 04539ee0f5a8de817265285c3396c85eeda907b4 | 2,798 |
def cast2dtype(segm):
"""Cast the segmentation mask to the best dtype to save storage.
"""
max_id = np.amax(np.unique(segm))
m_type = getSegType(int(max_id))
return segm.astype(m_type) | e877502f8a7d6c8e212338d2edf74bafc051e051 | 2,799 |