content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_error_signature(error_type, n_top, **kwargs):
"""Generates a signature for the specified settings of pose error
calculation.
:param error_type: Type of error.
:param n_top: Top N pose estimates (with the highest score) to be evaluated
for each object class in each image.
:return: Generated signature.
"""
error_sign = "error:" + error_type + "_ntop:" + str(n_top)
if error_type == "vsd":
if kwargs["vsd_tau"] == float("inf"):
vsd_tau_str = "inf"
else:
vsd_tau_str = "{:.3f}".format(kwargs["vsd_tau"])
error_sign += "_delta:{:.3f}_tau:{}".format(kwargs["vsd_delta"], vsd_tau_str)
return error_sign | 82036a650862a7b3a6b55493458ff3b7dc6cd2ff | 1,100 |
import re
def clean_text_from_multiple_consecutive_whitespaces(text):
"""Cleans the text from multiple consecutive whitespaces, by replacing these with a single whitespace."""
multi_space_regex = re.compile(r"\s+", re.IGNORECASE)
return re.sub(multi_space_regex, ' ', text) | f25b27da070d6a984012a4cb5b1ae4a477713033 | 1,101 |
import re
def run(filename):
"""
MUST HAVE FUNCTION!
Begins the plugin processing
Returns a list of endpoints
"""
run_results = set()
r_rule = re.compile(r"(Route\(\"[^,)]+)", flags=re.IGNORECASE)
for line in filename:
try:
route_match = r_rule.search(line)
if route_match:
run_results.add(route_match.group(1)[7:-1])
except Exception:
# Print the offending line the BurpSuite's extension Output tab
print("Error! Couldn't parse: %s" % line)
return list(run_results) | e5ad233e3c3e07769b2f8f61657fa712b1f151c4 | 1,102 |
import re
from unittest.mock import patch
async def setup_script(hass, notify_q, notify_q2, now, source, config=None):
"""Initialize and load the given pyscript."""
conf_dir = hass.config.path(FOLDER)
file_contents = {f"{conf_dir}/hello.py": source}
Function.hass = None
mock_open = MockOpen()
for key, value in file_contents.items():
mock_open[key].read_data = value
def isfile_side_effect(arg):
return arg in file_contents
def glob_side_effect(path, recursive=None):
result = []
path_re = path.replace("*", "[^/]*").replace(".", "\\.")
path_re = path_re.replace("[^/]*[^/]*/", ".*")
for this_path in file_contents:
if re.match(path_re, this_path):
result.append(this_path)
return result
if not config:
config = {DOMAIN: {CONF_ALLOW_ALL_IMPORTS: True}}
with patch("custom_components.pyscript.os.path.isdir", return_value=True), patch(
"custom_components.pyscript.glob.iglob"
) as mock_glob, patch("custom_components.pyscript.global_ctx.open", mock_open), patch(
"custom_components.pyscript.trigger.dt_now", return_value=now
), patch(
"custom_components.pyscript.open", mock_open
), patch(
"homeassistant.config.load_yaml_config_file", return_value=config
), patch(
"custom_components.pyscript.install_requirements", return_value=None,
), patch(
"custom_components.pyscript.watchdog_start", return_value=None
), patch(
"custom_components.pyscript.os.path.getmtime", return_value=1000
), patch(
"custom_components.pyscript.global_ctx.os.path.getmtime", return_value=1000
), patch(
"custom_components.pyscript.os.path.isfile"
) as mock_isfile:
mock_isfile.side_effect = isfile_side_effect
mock_glob.side_effect = glob_side_effect
assert await async_setup_component(hass, "pyscript", config)
#
# I'm not sure how to run the mock all the time, so just force the dt_now()
# trigger function to return the given list of times in now.
#
def return_next_time():
nonlocal now
if isinstance(now, list):
if len(now) > 1:
return now.pop(0)
return now[0]
return now
trigger.__dict__["dt_now"] = return_next_time
if notify_q or notify_q2:
async def state_changed(event):
var_name = event.data["entity_id"]
if var_name == "pyscript.done":
value = event.data["new_state"].state
if notify_q:
await notify_q.put(value)
if var_name == "pyscript.done2":
value = event.data["new_state"].state
if notify_q2:
await notify_q2.put(value)
hass.bus.async_listen(EVENT_STATE_CHANGED, state_changed) | d1d194af7686cbf5bb5e61ebc692d7fd7e9aae71 | 1,103 |
def get_assignment_grade_summaries(course_id):
""" return a list of a course's assignments with a grade summary for each
https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_assignments """
assignments = api.get_list('courses/{}/analytics/assignments'.format(course_id))
return [] if 'errors' in assignments else assignments | 69dddeee4389ee457201c3d1195537f869d0ea57 | 1,104 |
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return sorted(
[
desc for (_, desc) in XModuleDescriptor.load_classes()
] + XBLOCK_CLASSES,
key=str
) | e19b7957b3a65495e1d0fb7c33b4b2748bc1473f | 1,105 |
def e3p0(tof,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10):
"""
Background function for TOF spectra
Parameters
----------
tof : array-like
The time-of-flight spectrum
p1 : float
constant background
p2 : float
multiplier on 1st exponential
p3 : float
multiplier on time-of-flight in 1st exponent
p4 : float
constant added to 1st exponent
p5-p10 : float
(see equation in notes)
Returns
-------
e3p0 : array-like
The function in the length of t (see notes)
Notes
-----
.. math:: f(t) = p1 + p2e^{p3t+p4} + p5e^{p6t+p7} + p8e^{p9t+p10}
"""
return p1 + p2*np.exp(p3*tof+p4) + p5*np.exp(p6*tof+p7) + p8*np.exp(p9*tof+p10) | 0ad3aad94c4e8b5f48a6ec4458329d0de6eda612 | 1,106 |
def dice_coefficient(pred, gt):
"""
Computes dice coefficients between two masks
:param pred: predicted masks - [0 ,1]
:param gt: ground truth masks - [0 ,1]
:return: dice coefficient
"""
d = (2 * np.sum(pred * gt) + 1) / ((np.sum(pred) + np.sum(gt)) + 1)
return d | d1d97b749ce365c6181a2b17c41d946195339c96 | 1,107 |
def get_keep_score(source_counts, prediction_counts, target_counts):
"""Compute the keep score (Equation 5 in the paper)."""
source_and_prediction_counts = source_counts & prediction_counts
source_and_target_counts = source_counts & target_counts
true_positives = sum((source_and_prediction_counts & source_and_target_counts).values())
selected = sum(source_and_prediction_counts.values())
relevant = sum(source_and_target_counts.values())
return _get_fbeta_score(true_positives, selected, relevant) | ce2d94f3ffc353a3a9843f5c0b6a846608efe962 | 1,108 |
def run_decode_generator(gc, env):
"""Run the decode table generator"""
if env == None:
return (1, ['no env!'])
xedsrc = env.escape_string(env['src_dir'])
build_dir = env.escape_string(env['build_dir'])
debug = ""
other_args = " ".join(env['generator_options'])
gen_extra_args = "--gendir %s --xeddir %s %s %s" % (build_dir,
xedsrc, debug,
other_args)
if env['gen_ild_storage']:
gen_extra_args += ' --gen-ild-storage'
if env['compress_operands']:
gen_extra_args += " --compress-operands"
cmd = env.expand_string(gc.decode_command(xedsrc,
gen_extra_args,
env.on_windows()))
if mbuild.verbose(2):
mbuild.msgb("DEC-GEN", cmd)
(retval, output, error_output) = mbuild.run_command(cmd,
separate_stderr=True)
oo = env.build_dir_join('DEC-OUT.txt')
oe = env.build_dir_join('DEC-ERR.txt')
_write_file(oo, output)
_write_file(oe, error_output)
if retval == 0:
list_of_files = read_file_list(gc.dec_output_file)
mbuild.hash_files(list_of_files,
env.build_dir_join(".mbuild.hash.xeddecgen"))
mbuild.msgb("DEC-GEN", "Return code: " + str(retval))
return (retval, error_output ) | 256ea26ee5c699fb0253c9508da4fe7d245065e6 | 1,109 |
def dict_comparator(first_dict, second_dict):
"""
Функция проверяет на совпадение множеств пар ключ-значение для двух словарей
Возвращает True в случае совпадения, иначе False
"""
if set(first_dict.keys()) != set(second_dict.keys()):
return False
for key, value in first_dict.items():
if value != second_dict[key]:
return False
return True | 47f28e8810b8437cc0e3bfca6ccba6734c988890 | 1,110 |
def word_check(seq1,seq2,word):
"""Returns False and aborts if seq2 contains a substring of seq1 of length word. Returns True otherwise"""
for i in range(len(seq1)-word+1):
if seq2.find(seq1[i:i+word])>-1: return seq2.find(seq1[i:i+word])
return -1 | 86b4cad571fdbf55073f30f9c5fd9a5e25da46d7 | 1,111 |
def plasma_parameter(N_particles, N_grid, dx):
"""
Estimates the plasma parameter as the number of particles per step.
Parameters
----------
N_particles : int, float
Number of physical particles
N_grid : int
Number of grid cells
dx : float
grid step size
"""
return (N_particles / N_grid) * dx | 51d3b96ccba2689db461fd6117cb5c2961dc3812 | 1,112 |
import torch
def get_ious_and_iou_loss(inputs,
targets,
weight=None,
loss_type="iou",
reduction="none"):
"""
Compute iou loss of type ['iou', 'giou', 'linear_iou']
Args:
inputs (tensor): pred values
targets (tensor): target values
weight (tensor): loss weight
box_mode (str): 'xx' or 'lr', 'lr' is currently supported.
loss_type (str): 'giou' or 'iou' or 'linear_iou'
reduction (str): reduction manner
Returns:
loss (tensor): computed iou loss.
"""
# box_mode = "lr"
inputs = torch.cat((-inputs[..., :1], inputs[..., 1:]), dim=-1)
targets = torch.cat((-targets[..., :1], targets[..., 1:]), dim=-1)
eps = torch.finfo(torch.float32).eps
inputs_area = (inputs[..., 1] - inputs[..., 0]).clamp_(min=0)
targets_area = (targets[..., 1] - targets[..., 0]).clamp_(min=0)
w_intersect = (torch.min(inputs[..., 1], targets[..., 1])
- torch.max(inputs[..., 0], targets[..., 0])).clamp_(min=0)
area_intersect = w_intersect
area_union = targets_area + inputs_area - area_intersect
ious = area_intersect / area_union.clamp(min=eps)
if loss_type == "iou":
loss = -ious.clamp(min=eps).log()
elif loss_type == "linear_iou":
loss = 1 - ious
elif loss_type == "giou":
g_w_intersect = torch.max(inputs[..., 1], targets[..., 1]) \
- torch.min(inputs[..., 0], targets[..., 0])
ac_uion = g_w_intersect
gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
loss = 1 - gious
else:
raise NotImplementedError
if weight is not None:
loss = loss * weight.view(loss.size())
if reduction == "mean":
loss = loss.sum() / max(weight.sum().item(), eps)
else:
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
return ious, loss | 302fb70c888caf33cf0077b553cd0d055ff4003a | 1,113 |
def load_chembl():
"""Downloads a small subset of the ChEMBL dataset.
Returns
-------
ic50_train: sparse matrix
sparse train matrix
ic50_test: sparse matrix
sparse test matrix
feat: sparse matrix
sparse row features
"""
# load bioactivity and features
ic50 = load_one("chembl-IC50-346targets.mm")
feat = load_one("chembl-IC50-compound-feat.mm")
## creating train and test sets
ic50_train, ic50_test = make_train_test(ic50, 0.2)
return (ic50_train, ic50_test, feat) | f9c5017ab7892f7fbf6c3ee1a1dd9da0e322f66f | 1,114 |
import re
def validate_name_dynamotable(table_name):
"""Validate if table name matches DynamoDB naming standards."""
if not isinstance(table_name, str):
ValueError('Input argument \"name\" must a string')
if table_name.__len__() < 3 or table_name.__len__() > (255 - 5):
# note: deduct 5 chars to allow postfix space (e.g. for .lock)
return (False, 'TableName should be of length: [3-255]')
if not re.match(r'^[a-zA-Z0-9]', table_name):
return (False, 'BucketName should start with a lowercase letter or number')
if re.search(r'[-\._]{2}', table_name):
return (False, 'TableName can\'t contain two special characters [-, ., _] in a row')
if not re.match(r'^[-a-zA-Z0-9\._]*$', table_name):
return (False, re.sub(' +', ' ', 'TableName contains invalid character. \
Allowed characters: [a-z, A-Z, 0-9, \'.\', \'-\', \'_\']'))
return (True, 'Success') | 139391e3ece6cacae24d5bd72fd0fd77b65ecc41 | 1,115 |
def delete_item(item_id):
"""
The method deletes item with the provided id.
:param item_id: id of the item to be deleted
:return: http response
"""
try:
if DATA_CONTROLLER.delete_bucketlist_item(item_id):
return make_response("", 200)
else:
return make_response("", 404)
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response | 95e0bb38d30cbda6d617bb5f396dba4cfd4ef328 | 1,116 |
def import_from_text_file(filename, defaultExt, readDataFcn, verbose=False):
"""
Opens a given text file and reads data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
readDataFcn : callable
the function to read data from the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the readDataFcn
"""
return _open_file(filename, defaultExt, 'r', readDataFcn, verbose) | 4f5602e09d02446ce9770656e4b42df5dd018ccd | 1,117 |
def is_template_definition(metric_name):
"""Return if the given metric name is a template definition by
convention."""
fields = metric_name.split('/')
return fields[0].lower() == TEMPLATE_DEFINITION_PREFIX | da5fb191cf451b542a656c352d64258be74f7710 | 1,118 |
def _cm_ramp_points_and_voltages(abf):
"""
Return [points, voltages] if the sweep contains a ramp suitable for
capacitance calculation using a matching doward and upward ramp.
points is a list of 3 numbers depicting index values important to this
ramp. The first number is the index at the start of the downward ramp, the
second is the index of its nadir, and the third is the index where it
returns to the original level.
voltages is a list of 2 numbers: voltage before and during the ramp.
"""
assert isinstance(abf, pyabf.ABF)
if abf.sweepUnitsY != "pA":
raise Exception("must be in voltage clamp configuration")
for i, p1 in enumerate(abf.sweepEpochs.p1s):
if i == 0:
continue
# ensure this sweep and the last are both ramps
if abf.sweepEpochs.types[i] != "Ramp":
continue
if abf.sweepEpochs.types[i-1] != "Ramp":
continue
# ensure the levels are different
if abf.sweepEpochs.levels[i] == abf.sweepEpochs.levels[i-1]:
continue
ptStart = abf.sweepEpochs.p1s[i-1]
ptTransition = abf.sweepEpochs.p1s[i]
ptEnd = abf.sweepEpochs.p2s[i]
points = [ptStart, ptTransition, ptEnd]
voltageBefore = abf.sweepEpochs.levels[i-1]
voltageDuring = abf.sweepEpochs.levels[i]
voltages = [voltageBefore, voltageDuring]
return [points, voltages]
return None | c73b5f5cbc44c0794b332f6010864e9f25fcff0c | 1,119 |
def single_model_embeddings_specify(single_model_embeddings):
"""Returns an instance of MultiTaskLSTMCRF initialized with the default configuration file,
loaded embeddings and single specified model."""
single_model_embeddings.specify()
return single_model_embeddings | fe23c571ca29dbbf87cbccdbfc1e11aaaf784c01 | 1,120 |
import bz2
import gzip
import json
def load_json(filename):
"""
Load a JSON file that may be .bz2 or .gz compressed
"""
if '.bz2' in filename:
with bz2.open(filename, 'rt') as infile:
return json.load(infile)
elif '.gz' in filename:
with gzip.open(filename, 'rt') as infile:
return json.load(infile)
else:
with open(filename, 'rt') as infile:
return json.load(infile) | 1b985db386e85c3b8e87911d89a7652133bfee7b | 1,121 |
def get_future_contracts(underlying_symbol, date=None):
"""
获取某期货品种在策略当前日期的可交易合约标的列表
:param security 期货合约品种,如 ‘AG’(白银)
:return 某期货品种在策略当前日期的可交易合约标的列表
"""
assert underlying_symbol, "underlying_symbol is required"
dt = to_date_str(date)
return JQDataClient.instance().get_future_contracts(underlying_symbol=underlying_symbol, dt=dt) | 9945c897c643e410f8a127da5b77525d6e3ba28c | 1,122 |
import urllib3
import requests
def rodeo_query(fc, pallet): # 3.5-4 seconds for 150 elem
"""
Get pd DataFrame with info from rodeo about pallet/tote in TS Out.
:param fc: str
:param pallet: Pallet or Tote are accepted.
:return: df or "No data was found" if status_code = 200, "There was an error while connecting to {url}"
otherwise.
"""
url = f"https://rodeo-dub.amazon.com/{fc}/Search?_enabledColumns=on&enabledColumns=ASIN_TITLES&enabledColumns" \
f"=FC_SKU&enabledColumns=OUTER_SCANNABLE_ID&&searchKey={pallet} "
urllib3.disable_warnings() # prevent warnings for unverified request
print(COLOR + "Downloading manifested pallet's content from Rodeo.")
with requests.Session() as req:
resp = req.get(url,
timeout=30,
verify=False,
allow_redirects=True,
auth=HTTPKerberosAuth(mutual_authentication=OPTIONAL))
if resp.status_code == 200:
data = pd.read_html(resp.text, flavor=None, header=0, parse_dates=["Need To Ship By Date"])
if data is not None and len(data[0]) > 0:
df = pd.concat(data, sort=False)
df = df.drop(columns='Unnamed: 0')
return df
else:
return f"No data was found at {url}\nPlease check that {pallet} is correct.\nIf the error persists, " \
f"please check Rodeo status for your FC: {fc}."
else:
# return resp.raise_for_status() # to see error
return f"There was an error while connecting to {url}" | 926a9f42b5ed82128d4e5fae4adc2c74dab3e567 | 1,123 |
import itertools
def plan_to_joint_configuration(robot, qgoal, pname='BiRRT', max_iters=20,
max_ppiters=40, try_swap=False):
"""
Plan a trajectory to the given `qgoal` configuration.
Parameters
----------
robot: orpy.Robot
The OpenRAVE robot
qgoal: array_like
The goal configuration
pname: str
Name of the planning algorithm. Available options are: `BasicRRT`, `BiRRT`
max_iters: float
Maximum iterations for the planning stage
max_ppiters: float
Maximum iterations for the post-processing stage. It will use a parabolic
smoother wich short-cuts the trajectory and then smooths it
try_swap: bool
If set, will compute the direct and reversed trajectory. The minimum
duration trajectory is used.
Returns
-------
traj: orpy.Trajectory
Planned trajectory. If plan fails, this function returns `None`.
"""
qstart = robot.GetActiveDOFValues()
env = robot.GetEnv()
planner = orpy.RaveCreatePlanner(env, pname)
params = orpy.Planner.PlannerParameters()
params.SetMaxIterations(max_iters)
if max_ppiters > 0:
params.SetPostProcessing('ParabolicSmoother',
'<_nmaxiterations>{0}</_nmaxiterations>'.format(max_ppiters))
else:
params.SetPostProcessing('', '')
# Plan trajectory
best_traj = None
min_duration = float('inf')
reversed_is_better = False
count = 0
for qa, qb in itertools.permutations([qstart, qgoal], 2):
count += 1
with robot:
robot.SetActiveDOFValues(qa)
params.SetGoalConfig(qb)
params.SetRobotActiveJoints(robot)
initsuccess = planner.InitPlan(robot, params)
if initsuccess:
traj = orpy.RaveCreateTrajectory(env, '')
status = planner.PlanPath(traj) # Plan the trajectory
if status == orpy.PlannerStatus.HasSolution:
duration = traj.GetDuration()
if duration < min_duration:
min_duration = duration
best_traj = orpy.RaveCreateTrajectory(env, traj.GetXMLId())
best_traj.Clone(traj, 0)
if count == 2:
reversed_is_better = True
if not try_swap:
break
# Check if we need to reverse the trajectory
if reversed_is_better:
best_traj = orpy.planningutils.ReverseTrajectory(best_traj)
return best_traj | 78bf727bede2d886ba93825e3a0cd8ccaa99f57e | 1,124 |
def _get_texinfo(data):
"""Return the texture information of a texture data.
Arguments:
* data: the texture data as an array.
Returns:
* texinfo: a dictionary with the information related to the texture data.
"""
assert data.ndim == 3
size = data.shape[:2]
if size[0] == 1:
ndim = 1
elif size[0] > 1:
ndim = 2
ncomponents = data.shape[2]
return dict(size=size, ndim=ndim, ncomponents=ncomponents) | 9dfa3b88e65b4c7b7eaa60149f4f24381b36e762 | 1,125 |
def set_featured_notebooks(notebook_ids): # noqa: E501
"""set_featured_notebooks
:param notebook_ids: Array of notebook IDs to be featured.
:type notebook_ids: List[str]
:rtype: None
"""
update_multiple(ApiNotebook, [], "featured", False)
if notebook_ids:
update_multiple(ApiNotebook, notebook_ids, "featured", True)
return None, 200 | 7add2e120bf803cf8fa36c0fa56c854654c447fa | 1,126 |
def speed_to_cadences(bicycle, speed, digits=None):
"""
Return cadences in hertz (revolutions per second).
Speed is measured in kilometers per hour.
Assume the following bicycle attributes are non-null and non-empty:
- front_cogs
- rear_cogs
- crank_length
- rear_wheel
Raise a ``ValueError``, if that is not the case.
EXAMPLES::
>>> w = Wheel(diameter=600)
>>> b = Bicycle(front_cogs=[40], rear_cogs=[20, 30], crank_length=100, rear_wheel=w)
>>> speed_to_cadences(b, 18.1, digits=1)
{(40, 30): 2.0, (40, 20): 1.3}
"""
b = bicycle
attrs = ['front_cogs', 'rear_cogs', 'crank_length', 'rear_wheel']
check_attrs(b, *attrs)
check_attrs(b.rear_wheel, 'diameter')
gr = gain_ratios(b)
result = {}
for (k, g) in gr.items():
result[k] = speed/(2*pi*b.crank_length*g*(3600/1e6))
if digits is not None:
result = {k: round(v, digits) for k, v in result.items()}
return result | 358343831e341f49facd8b2c0af940ee765083aa | 1,127 |
import hashlib
def _gen_version(fields):
"""Looks at BotGroupConfig fields and derives a digest that summarizes them.
This digest is going to be sent to the bot in /handshake, and bot would
include it in its state (and thus send it with each /poll). If server detects
that the bot is using older version of the config, it would ask the bot
to restart.
Args:
fields: dict with BotGroupConfig fields (without 'version').
Returns:
A string that going to be used as 'version' field of BotGroupConfig tuple.
"""
# Just hash JSON representation (with sorted keys). Assumes it is stable
# enough. Add a prefix and trim a bit, to clarify that is it not git hash or
# anything like that, but just a dumb hash of the actual config.
digest = hashlib.sha256(utils.encode_to_json(fields)).hexdigest()
return 'hash:' + digest[:14] | a4bd4420ce548f8a0c40f3120c119f89c158a371 | 1,128 |
import sys
def getopt(clf, ret_val, isbool=False):
""" Command Line Option input parser"""
found = []
def getCLO(flag):
iindx = sys.argv.index(flag)
sys.argv.pop(iindx)
return sys.argv.pop(iindx)
if isbool: return (clf in sys.argv)
while clf in sys.argv: found.append(getCLO(clf))
if found: ret_val = [found, found[0]][int(len(found) == 1)]
return ret_val | 9f7738bb308d2875f2c229dd7e33f7aae6981733 | 1,129 |
def pay_and_save_financing(req: request, request_json, account_id):
"""Set up the financing statement, pay if there is an account id, and save the data."""
# Charge a fee.
token: dict = g.jwt_oidc_token_info
statement = FinancingStatement.create_from_json(request_json, account_id, token.get('username', None))
invoice_id = None
registration = statement.registration[0]
pay_trans_type, fee_quantity = resource_utils.get_payment_type_financing(registration)
pay_ref = None
if not is_reg_staff_account(account_id):
pay_account_id: str = account_id if not is_sbc_office_account(account_id) else None
payment = Payment(jwt=jwt.get_token_auth_header(),
account_id=pay_account_id,
details=resource_utils.get_payment_details_financing(registration))
pay_ref = payment.create_payment(pay_trans_type, fee_quantity, None, registration.client_reference_id)
else:
payment_info = resource_utils.build_staff_registration_payment(req, pay_trans_type, fee_quantity)
payment = Payment(jwt=jwt.get_token_auth_header(),
account_id=None,
details=resource_utils.get_payment_details_financing(registration))
pay_ref = payment.create_payment_staff_registration(payment_info, registration.client_reference_id)
invoice_id = pay_ref['invoiceId']
registration.pay_invoice_id = int(invoice_id)
registration.pay_path = pay_ref['receipt']
# Try to save the financing statement: failure throws an exception.
try:
statement.save()
except Exception as db_exception: # noqa: B902; handle all db related errors.
current_app.logger.error(SAVE_ERROR_MESSAGE.format(account_id, 'financing', repr(db_exception)))
if account_id and invoice_id is not None:
current_app.logger.info(PAY_REFUND_MESSAGE.format(account_id, 'financing', invoice_id))
try:
payment.cancel_payment(invoice_id)
except SBCPaymentException as cancel_exception:
current_app.logger.error(PAY_REFUND_ERROR.format(account_id, 'financing', invoice_id,
repr(cancel_exception)))
raise db_exception
return statement | 0de221d2e6acb090e2e2a135b1280f2daa73b63c | 1,130 |
def resolve_cmds_path(cmds, singlesrv_mode):
"""Resolve the cmds path if in single server mode.
Args:
cmds: A list of sender/receiver commands.
singlesrv_mode: A bool on whether running in single server mode.
Returns:
The commands that path has been resolved if needed
(in single server mode).
"""
if not singlesrv_mode:
return cmds
r_cmds = []
for cmd in cmds:
r_cmds.append(_resolve_binary_path_for_timed_cmd(cmd))
return r_cmds | 6d7e673a48c657a446785716fc09f47d4f87d81d | 1,131 |
import base64
def _encode_base64(data: str) -> str:
"""Base 64 encodes a string."""
ebytes = base64.b64encode(data.encode("utf-8"))
estring = str(ebytes, "utf-8")
return estring | 5304972fec4cc54d9fa652cbd977b7c069d228d5 | 1,132 |
from typing import Mapping
from typing import Any
def workflow_spec(
dag: DAG,
workflow: Workflow,
) -> Mapping[str, Any]:
"""
Return a minimal representation of a WorkflowSpec for the supplied DAG and metadata.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#workflowspec
Parameters
----------
dag
The DAG to generate the spec for
workflow
The configuration for this workflow
Raises
------
ValueError
If any of the extra_spec_options collides with a property used by the runtime.
"""
validate_parameters(inputs=dag.inputs, params=workflow.params)
spec = {
"entrypoint": BASE_DAG_NAME,
"templates": _templates(
node=dag,
container_image=workflow.container_image,
container_command=workflow.container_entrypoint_to_dag_cli,
params=workflow.params,
),
}
if workflow.params:
spec["arguments"] = _workflow_spec_arguments(workflow.params)
spec = with_extra_spec_options(
original=spec,
extra_options=workflow.extra_spec_options,
context="the Workflow spec",
)
return spec | d13b7242f3158ea7528141ca65a17df01968c14e | 1,133 |
def redirect(request):
"""
Handling what happens when the groupcode is submitted by user and handles input from user's when they are answering
questions.
:param request:
:return: The methods returns the student view page which is the actual game to the user if they entered a correct
groupcode, it will also return messages when user's are answering questions in the quiz telling them if the answers
are correct or not
"""
"""handling what happens when the groupcode is entered and submitted aswell as the question logic"""
global score
global num
map_check = False
# Below is to check if whether the button is for groupcode or answer to question
# process the group code passed from the landing page
if request.method == 'POST' and 'submit-groupcode' in request.POST:
# Get inputted groupcode from the user
groupcode = str(request.POST.get('groupCode'))
# if the group code exists, load the treasure hunt page with the correct questions
if Gamecode.objects.filter(groupcode=groupcode).exists():
#Below is for question loading and getting question informations
questionNum = Gamecode.objects.get(groupcode=groupcode)
mapCheck = questionNum.map
routeID = questionNum.routeID_id
num = questionNum.questionNum
score = questionNum.score
# Get question by using the question number the group is currently on
info = Questions.objects.filter(node_num=int(num),routeID=routeID)
# Add group code into user's session
request.session['groupcode'] = groupcode
# Add score into user's session
request.session['score'] = score
# Add routeID into user's session
request.session['routeID'] = routeID
#To show the correct map for the user to go to when the join the game after a question is answered but the
# map check is not yet done
if num >1:
print(num)
#set map value to the previous question
num -=1
print(num)
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
#Return number to the correct question number
num +=1
else:
latest_question = Questions.objects.get(node_num=num , routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
return render(request, 'app/studentview.html',{"groupcode":groupcode, "data":info, "id":id, "score":score,"map_check":mapCheck,"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name})
# otherwise show an error message
else:
print("Wrong")
messages.error(request, 'The game code does not exist')
return render(request, 'app/index.html')
# if an answer to question is submitted, check if it is correct
if request.method == 'POST' and 'submit-question' in request.POST:
# Get routeID from user's session
routeID = request.session['routeID']
# Get groupcode from user's session
groupcode = request.session['groupcode']
# Get text from the input answer box
data = str(request.POST.get('answer'))
# Retrieve the current question the group is on from the database
questionNum = Gamecode.objects.get(groupcode=groupcode)
# if answer is correct for the current node, move onto the next question if it exists,
# otherwise show they have finished the quiz
if Questions.objects.filter(answers__icontains=data.strip(), node_num=int(num), routeID=routeID).exists():
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
map_check = "True"
# Add 1 to the counter so the questions moves on to the next one
num += 1
# Check whether if the user is on the last question
if Questions.objects.filter(node_num=int(num), routeID=routeID).exists():
score += 3
questionNum.map = map_check
questionNum.questionNum = num
questionNum.score = score
questionNum.save()
print(location)
info = Questions.objects.filter(node_num=num, routeID=routeID)
messages.success(request, 'Correct!') #Generate message saying correct
return render(request, 'app/studentview.html',{"groupcode":groupcode,"data":info,"id":id,
"score":score, "map_check":map_check,
"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name})
# Case when the user is on the last question
else:
# To make sure user stays on the last question
num -=1
questionNum.questionNum = num
questionNum.map = map_check
questionNum.save()
info = Questions.objects.filter(node_num=num,routeID=routeID)
# Generate message when user finish the quiz
messages.success(request, 'You have finished the quiz, well done!')
# Return the information back to user's view
return render(request, 'app/studentview.html', {"groupcode":groupcode,"data":info,"id":id,
"score":score, "map_check":map_check,
"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name,"Finished":"True"})
# Case when user gets the answer wrong
else:
info = Questions.objects.filter(node_num=num, routeID=routeID)
# Return incorrect message
messages.error(request, 'That is the wrong answer, please try again')
# Return the information back to user's view
return render(request, 'app/studentview.html', {"groupcode": groupcode, "data": info, "id": id,"score":score})
# Case when user refreshes the page during the game
if 'groupcode' in request.session:
# Retrieve information about the questions
groupcode = request.session['groupcode']
routeID = request.session['routeID']
questionNum = Gamecode.objects.get(groupcode=groupcode)
num = questionNum.questionNum
mapcheck = questionNum.map
# Get question from the database using num counter
info = Questions.objects.filter(node_num=int(num), routeID=routeID)
if num > 1:
print(num)
# set map value to the previous question
num -= 1
print(num)
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
# Return number to the correct question number
num += 1
else:
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
# Return the information back to user's view
return render(request, 'app/studentview.html',
{"groupcode": groupcode, "data": info, "id": id, "score": score, "map_check": mapcheck,
"location": location, "longtitude": longtitude,
"latitude": latitude, "answer": place_name})
else:
# Redirect user back to start page
return render(request, 'app/index.html') | 5f906dae9bde9533b5b09bd5540b8458a766e583 | 1,134 |
import torch
def build_test_fn(policy, optim, log_dir, model_name, train_collector, save_train_buffer, obs_shape, stack_num, env_id,
num_episodes):
""" Build custom test function for maze world environment """
def custom_test_fn(epoch, env_step):
# Save agent
print(f"Epoch = {epoch}")
torch.save({'model': policy.state_dict(), 'optim': optim.state_dict()},
log_dir + model_name + f'_epoch{epoch}.pth')
if save_train_buffer:
train_collector.buffer.save_hdf5(f'{log_dir}/epoch{epoch}_train_buffer.hdf5')
# Record agent`s performance in video
policy.eval()
test_env = envpool.make_gym(env_id, num_envs=1, seed=0, episodic_life=False, reward_clip=True, stack_num=4,
gray_scale=False, img_height=160, img_width=160)
collector = ts.data.Collector(policy, test_env, exploration_noise=True)
record.collect_and_record(collector, n_episode=num_episodes // 2, obs_shape=obs_shape, stack_num=stack_num,
log_dir=log_dir, epoch=epoch, starting_episode=0)
collector = ts.data.Collector(policy, test_env, exploration_noise=False)
record.collect_and_record(collector, n_episode=num_episodes // 2, obs_shape=obs_shape, stack_num=stack_num,
log_dir=log_dir, epoch=epoch, starting_episode=num_episodes // 2)
return custom_test_fn | 6fefef23e1a502ce30f556ea9350d933e7303dfd | 1,135 |
def check_score(encoding, min_qual, qual_str):
"""Return True if the average quality score is at least min_qual
"""
qscores = [encoding[q] for q in qual_str]
return sum(qscores) >= min_qual * len(qscores) | 427dd8617d5ab425e3b7989923a271599fc7371a | 1,136 |
import functools
import warnings
def add_unsafe_warning(func, fig):
"""
Generate warning if not supported by Paxplot
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if fig._show_unsafe_warning:
warnings.warn(
f'The function you have called ({func.__name__}) is not '
'officially supported by Paxplot, but it may still work. '
'Report issues to '
'https://github.com/kravitsjacob/paxplot/issues',
Warning
)
return func(*args, **kwargs)
return wrapper | 8bca3fbc514315cd4c761b2e8f7f1168e01af7a9 | 1,137 |
from typing import List
from typing import Optional
def update_dask_partitions_shuffle(
ddf: dd.DataFrame,
table: str,
secondary_indices: List[str],
metadata_version: int,
partition_on: List[str],
store_factory: StoreFactoryType,
df_serializer: DataFrameSerializer,
dataset_uuid: str,
num_buckets: int,
sort_partitions_by: Optional[str],
bucket_by: List[str],
) -> da.Array:
"""
Perform a dataset update with dask reshuffling to control partitioning.
The shuffle operation will perform the following steps
1. Pack payload data
Payload data is serialized and compressed into a single byte value using
``distributed.protocol.serialize_bytes``, see also ``pack_payload``.
2. Apply bucketing
Hash the column subset ``bucket_by`` and distribute the hashes in
``num_buckets`` bins/buckets. Internally every bucket is identified by an
integer and we will create one physical file for every bucket ID. The
bucket ID is not exposed to the user and is dropped after the shuffle,
before the store. This is done since we do not want to guarantee at the
moment, that the hash function remains stable.
3. Perform shuffle (dask.DataFrame.groupby.apply)
The groupby key will be the combination of ``partition_on`` fields and the
hash bucket ID. This will create a physical file for every unique tuple
in ``partition_on + bucket_ID``. The function which is applied to the
dataframe will perform all necessary subtask for storage of the dataset
(partition_on, index calc, etc.).
4. Unpack data (within the apply-function)
After the shuffle, the first step is to unpack the payload data since
the follow up tasks will require the full dataframe.
5. Pre storage processing and parquet serialization
We apply important pre storage processing like sorting data, applying
final partitioning (at this time there should be only one group in the
payload data but using the ``MetaPartition.partition_on`` guarantees the
appropriate data structures kartothek expects are created.).
After the preprocessing is done, the data is serialized and stored as
parquet. The applied function will return an (empty) MetaPartition with
indices and metadata which will then be used to commit the dataset.
Returns
-------
A dask.Array holding relevant MetaPartition objects as values
"""
if ddf.npartitions == 0:
return ddf
group_cols = partition_on.copy()
if num_buckets is None:
raise ValueError("``num_buckets`` must not be None when shuffling data.")
meta = ddf._meta
meta[_KTK_HASH_BUCKET] = np.uint64(0)
ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)
group_cols.append(_KTK_HASH_BUCKET)
packed_meta = ddf._meta[group_cols]
packed_meta[_PAYLOAD_COL] = b""
unpacked_meta = ddf._meta
ddf = pack_payload(ddf, group_key=group_cols)
ddf = ddf.groupby(by=group_cols)
ddf = ddf.apply(
partial(
_store_partition,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
table=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
unpacked_meta=unpacked_meta,
),
meta=("MetaPartition", "object"),
)
return ddf | d7f050247d89997ec76d00cb6e2e7ed25a7b24fb | 1,138 |
def edit_paycheck(paycheck_id):
""" Edit a paycheck """
paycheck = Paycheck.query.get(paycheck_id)
form = PaycheckForm(obj=paycheck)
return render_template('pay/edit_paycheck.jinja', form=form, paycheck_id=paycheck_id) | 9e8a22af102bc818c35c32d49b5a26c348b0f221 | 1,139 |
def is_meeting_approved(meeting):
"""Returns True if the meeting is approved"""
if meeting.session_set.first().status.slug == 'apprw':
return False
else:
return True | 0dca106890d195f613477334d2bb6187c1587e15 | 1,140 |
def F(
u,
v,
kappa,
rho,
cp,
convection,
source,
r,
neumann_bcs,
robin_bcs,
my_dx,
my_ds,
stabilization,
):
"""
Compute
.. math::
F(u) =
\\int_\\Omega \\kappa r
\\langle\\nabla u, \\nabla \\frac{v}{\\rho c_p}\\rangle
\\, 2\\pi \\, \\text{d}x
+ \\int_\\Omega \\langle c, \\nabla u\\rangle v
\\, 2\\pi r\\,\\text{d}x
- \\int_\\Omega \\frac{1}{\\rho c_p} f v
\\, 2\\pi r \\,\\text{d}x\\\\
- \\int_\\Gamma r \\kappa \\langle n, \\nabla T\\rangle v
\\frac{1}{\\rho c_p} 2\\pi \\,\\text{d}s
- \\int_\\Gamma r \\kappa \\alpha (u - u_0) v
\\frac{1}{\\rho c_p} \\, 2\\pi \\,\\text{d}s,
used for time-stepping
.. math::
u' = F(u).
"""
rho_cp = rho * cp
F0 = kappa * r * dot(grad(u), grad(v / rho_cp)) * 2 * pi * my_dx
# F -= dot(b, grad(u)) * v * 2*pi*r * dx_workpiece(0)
if convection is not None:
c = as_vector([convection[0], convection[1]])
F0 += dot(c, grad(u)) * v * 2 * pi * r * my_dx
# Joule heat
F0 -= source * v / rho_cp * 2 * pi * r * my_dx
# Neumann boundary conditions
for k, n_grad_T in neumann_bcs.items():
F0 -= r * kappa * n_grad_T * v / rho_cp * 2 * pi * my_ds(k)
# Robin boundary conditions
for k, value in robin_bcs.items():
alpha, u0 = value
F0 -= r * kappa * alpha * (u - u0) * v / rho_cp * 2 * pi * my_ds(k)
if stabilization == "supg":
# Add SUPG stabilization.
assert convection is not None
# TODO u_t?
R = (
-div(kappa * r * grad(u)) / rho_cp * 2 * pi
+ dot(c, grad(u)) * 2 * pi * r
- source / rho_cp * 2 * pi * r
)
mesh = v.function_space().mesh()
element_degree = v.ufl_element().degree()
tau = stab.supg(mesh, convection, kappa, element_degree)
F0 += R * tau * dot(convection, grad(v)) * my_dx
else:
assert stabilization is None
return F0 | 45fd3f2933aaddf0135478801a5b3d95988f0032 | 1,141 |
import requests
def check_radarr():
"""
Connects to an instance of Radarr and returns a tuple containing the instances status.
Returns:
(str) an instance of the Status enum value representing the status of the service
(str) a short descriptive string representing the status of the service
"""
try:
req = requests.get('{}/api/system/status?apikey={}'.format(paths['Radarr'], keys['Radarr']), timeout=0.2)
req.raise_for_status()
except (requests.ConnectionError, requests.HTTPError, requests.Timeout):
return Status.ERROR.value, "NoAPI"
try:
data = req.json()
except ValueError:
return Status.ERROR.value, "BadJSON"
if data['version']:
return Status.ACTIVE.value, "Online"
else:
return Status.ERROR.value, "BadAPI" | f078ba526e0fb23dad323db92e9f6ac861da4bf0 | 1,142 |
def reversebits5(max_bits, num):
""" Like reversebits4, plus optimizations regarding leading zeros in
original value. """
rev_num = 0
shifts = 0
while num != 0 and shifts < max_bits:
rev_num |= num & 1
num >>= 1
rev_num <<= 1
shifts += 1
rev_num >>= 1
rev_num <<= (max_bits - shifts)
return rev_num | ada43721780d512cda73c30d0279216b709501fc | 1,143 |
def rescale(img, thresholds):
"""
Linear stretch of image between two threshold values.
"""
return img.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0]) | 76d5f56384f408e57161848ded85142e68296258 | 1,144 |
def X_n120() -> np.ndarray:
"""
Fixture that generates a Numpy array with 120 observations. Each
observation contains two float values.
:return: a Numpy array.
"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
return X | 6c746c759c1113611ee19acfae5326de615821a8 | 1,145 |
import json
import os
def get_shp(shp_str):
"""
Return a shapely geometry in WGS84 lon/lat
input: shp_str - a string corresponding to an iso-3166-1 or -2 administrative area for admin-level 1 (countries) and -2 (states/provinces) respectively
"""
if len(shp_str.split('-'))>1:
load_fts = json.load(open(os.path.join(os.getcwd(),'data','ne_10m_admin_1_states_provinces.geojson'),'r'))
select_fts = [ft for ft in load_fts['features'] if ft['properties']['iso_3166_2']==shp_str]
else:
load_fts = json.load(open(os.path.join(os.getcwd(),'data','ne_10m_admin_0_countries.geojson'),'r'))
select_fts = [ft for ft in load_fts['features'] if ft['properties']['ISO_A2']==shp_str]
all_shps = [geometry.shape(ft['geometry']) for ft in select_fts]
return reduce2mp(flatten_polys(all_shps)) | 70916481e379f4d7502abef2e0d7625e52800336 | 1,146 |
def int_to_bytes(n: uint64, length: uint64) -> bytes:
"""
Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian.
"""
return n.to_bytes(length, ENDIANNESS) | 135260207a65cff99770b8edc1cae3848f888434 | 1,147 |
import string
def upper_to_title(text, force_title=False):
"""Inconsistently, NiH has fields as all upper case.
Convert to titlecase"""
if text == text.upper() or force_title:
text = string.capwords(text.lower())
return text | 939515204b841c5443c5767da20712dff684d286 | 1,148 |
def pairwise_negative(true, pred):
"""Return p_num, p_den, r_num, r_den over noncoreferent item pairs
As used in calcualting BLANC (see Luo, Pradhan, Recasens and Hovy (2014).
>>> pairwise_negative({1: {'a', 'b', 'c'}, 2: {'d'}},
... {1: {'b', 'c'}, 2: {'d', 'e'}})
(2, 4, 2, 3)
"""
true_pairs = _positive_pairs(values(true))
pred_pairs = _positive_pairs(values(pred))
n_pos_agreements = len(true_pairs & pred_pairs)
true_mapping = sets_to_mapping(true)
pred_mapping = sets_to_mapping(pred)
extra_mentions = keys(true_mapping) ^ keys(pred_mapping)
disagreements = {p for p in true_pairs ^ pred_pairs
if p[0] not in extra_mentions
and p[1] not in extra_mentions}
n_common_mentions = len(keys(true_mapping) & keys(pred_mapping))
n_neg_agreements = (_triangle(n_common_mentions) - n_pos_agreements -
len(disagreements))
# Total number of negatives in each of pred and true:
p_den = _triangle(len(pred_mapping)) - len(pred_pairs)
r_den = _triangle(len(true_mapping)) - len(true_pairs)
return n_neg_agreements, p_den, n_neg_agreements, r_den | 64ff19dc861abe9fbc68412a61906b54439aa864 | 1,149 |
def reptile_resurgence_links(tar_url, max_layer, max_container="", a_elem="a", res_links=[], next_url="", callback=None):
"""
爬虫层次挖掘,对目标 URL 进行多层挖链接
参数:目标 URL | 最大层数 | 爬取范围 | 爬取的a标签选择器 | 内部使用,返回列表 | 内部使用 下一个目标
"""
if next_url != "" and next_url[:4] in 'http':
res_links.append(next_url)
if max_layer <= 0:
return res_links
rep = init_reptile(tar_url)
document = rep['document']
# 专注于某一区域对网页爬虫 推荐这种方法只爬一层
container_tags = document.find(max_container).items()
for tag1 in container_tags:
children_tags = tag1.children(a_elem).items()
for tag2 in children_tags:
# 可以在这里增加 callback 有效减少请求次数
if callback != None:
callback(comp_http_url(tar_url, tag2.attr('href')))
reptile_resurgence_links(
tar_url, max_layer - 1,
max_container=max_container,
res_links=res_links,
next_url=comp_http_url(tar_url, tag2.attr('href'))
)
# 爬取之后将会获得每一个链接
return res_links | a0fe56f4d1b0cd67b1918bf1db54cda6400fc2ca | 1,150 |
from typing import Tuple
def random_uniform(seed_tensor: Tensor,
shape: Tuple[int, ...],
low: float = 0.0,
high: float = 1.0,
dtype: dtypes.dtype = dtypes.float32):
"""
Randomly sample from a uniform distribution with minimum value `low` and maximum value `high`.
Note: not compatible with `IPUModel`.
Args:
seed_tensor (Tensor):
Used to seed the probability distribution. Must have data type uint32 and shape (2,).
shape (Tuple[int, ...]):
Shape of output tensor
low (float, optional):
Minimum value. Defaults to 0.0.
high (float, optional):
Maximum value. Defaults to 1.0.
dtype (dtypes.dtype, optional):
Data type of output tensor. Defaults to dtypes.float32.
Returns:
Tensor: tensor with elements sampled from a uniform distribution.
"""
ctx = get_current_context()
g = ctx.graph
pb_g = g._pb_graph
check_in_graph(g, seed_tensor)
settings = ctx._get_op_settings('random_uniform')
opid = _ir.OperatorIdentifier("ai.onnx", "RandomUniform", 1,
_ir.NumInputs(1, 1), 1)
op = pb_g.createConnectedOp_RandomUniformOp(
{0: seed_tensor.id},
{0: g._create_tensor_id("random_uniform_out")},
shape_=shape,
low_=low,
high_=high,
dataType_=convert_optional_dtype(dtype),
opid=opid,
settings=settings,
)
return Tensor._from_pb_tensor(op.outTensor(0)) | 6ec691faa5e35788df8f1b0be839727163271ee8 | 1,151 |
from shapely import wkt
def pipelines_as_gdf():
"""
Return pipelines as geodataframes
"""
def wkt_loads(x):
try:
return wkt.loads(x)
except Exception:
return None
df_fossil_pipelines = load_fossil_pipelines().query("route==route")
# Manual transform to line string:
# Input 43.5995, 16.3946: 43.6098, 16.5395:
# Output: LINESTRING (30 10, 10 30, 40 40)
df_fossil_pipelines['route'] = 'LINESTRING (' + df_fossil_pipelines['route'].str.replace(',', '').str.replace(':', ',') + ')'
df_fossil_pipelines['route'] = df_fossil_pipelines['route'].apply(wkt_loads)
return gpd.GeoDataFrame(df_fossil_pipelines, geometry=df_fossil_pipelines['route']) | b8f8817111e061db4160f524f13b005f6e8d8a3f | 1,152 |
def historico(
historia="",sintomas="",medicamentos=""
):
"""Histótia: Adicionar os relatos de doenças anteriores do paciente,\n incluindo sintomas antigos e histórico de doenças familiares
\n Sintomas: Descrever os atuais sintomas do paciente
\n Medicamentos: Remédios e tratamentos usados durante o tratamento geral do paciente."""
historia = str(
input(
"Digite o histórico de vida do paciente: "
)
)
sintomas = str(
input( "Digite os sintomas do paciente: " )
)
medicamentos = str(
input("Digite o medicamento a ser usado e a dosagem: " )
)
return historia, sintomas, medicamentos | a5bdb6cc6d13c73845650ec8fcd1d18fc1e4feb2 | 1,153 |
def plot_beam_ts(obs, title=None, pix_flag_list=[], reg_interest=None,
plot_show=False, plot_save=False, write_header=None,
orientation=ORIENTATION):
"""
plot time series for the pipeline reduction
:param obs: Obs or ObsArray or list or tuple or dict, can be the object
containing the data to plot, or list/tuple of objects, or dict in the
form of {key: obs} or {key: (obs, kwargs)} or {key: (obs, obs_yerr)} or
{key: (obs, obs_yerr, kwargs)} or {key: [obs, kwargs]}, in which case
the dict key will be the label in legend, obs and obs_yerr is Obs or
ObsArray objects, and kwargs is passed to FigArray.scatter() if the dict
iterm is tuple or FigArray.plot() if it's list, the items in the
tuple/list determined based on type, and if obs_yerr is present,
FigArray.errorbar() will also be called with kwargs
:type obs: Union[Obs, ObsArray, list, tuple, dict]
:param str title: str, title of the figure, will use the first available
obs_id if left None
:param list pix_flag_list: list, [[spat, spec], ...] or [[row, col], ...] of
the flagged pixels, shown in grey shade
:param dict reg_interest: dict, indicating the region of array to plot,
passed to ArrayMap.take_where(); will plot all the input pixels if
left None
:param bool plot_show: bool, flag whether to show the figure with plt.show()
:param bool plot_save: bool, flag whether to save the figure
:param str write_header: str, path to the file header to write the figure to,
the figure will be saved as {write_header}.png, only matters if
plot_save=True; will use the first available obs_id if left None
:param str orientation: str, the orientation of the figure, passed to
FigArray.init_with_array_map
:return: FigArray, object of the figure
:rtype: FigArray
"""
if isinstance(obs, (Obs, ObsArray, np.ndarray)):
obs0 = obs
elif isinstance(obs, dict):
obs0 = list(obs.values())[0]
if isinstance(obs0, (list, tuple)):
obs0 = obs0[0]
else:
obs0 = obs[0]
array_map = ObsArray(obs0).array_map_
if title is None:
title = obs0.obs_id_
if write_header is None:
write_header = obs0.obs_id_
if isinstance(obs0, (Obs, ObsArray)) and (not obs0.ts_.empty_flag_):
obs_t_len = obs0.t_end_time_ - obs0.t_start_time_
x_size = max((obs_t_len / units.hour).to(1).value / 2,
FigArray.x_size_)
else:
x_size = FigArray.x_size_
fig = FigArray.init_by_array_map(array_map if reg_interest is None else
array_map.take_where(**reg_interest),
orientation=orientation, x_size=x_size)
if isinstance(obs, (Obs, ObsArray, np.ndarray)):
fig.scatter(obs)
elif isinstance(obs, dict):
for key in obs:
if isinstance(obs[key], (list, tuple)):
plot_func = fig.scatter if isinstance(obs[key], tuple) else \
fig.plot
if len(obs[key]) > 1:
if isinstance(obs[key][1], (Obs, ObsArray)):
kwargs = obs[key][2] if len(obs[key]) > 2 else {}
plot_func(obs[key][0], **kwargs)
fig.errorbar(obs[key][0], yerr=obs[key][1], label=key,
**kwargs)
else:
plot_func(obs[key][0], label=key, **obs[key][1])
else:
plot_func(obs[key][0], label=key)
else:
fig.scatter(obs[key], label=key)
fig.legend(loc="upper left")
if fig.twin_axs_list_ is not None:
fig.legend(twin_axes=True, loc="lower right")
else:
for obs_i in obs:
fig.scatter(obs_i)
fig.imshow_flag(pix_flag_list=pix_flag_list)
fig.set_labels(obs0, orientation=orientation)
fig.set_title(title)
if plot_save:
fig.savefig("%s.png" % write_header)
if plot_show:
plt.show()
return fig | eb9cd76d3e6b2a722c2d26dd7371cd2ed1716264 | 1,154 |
import sys
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg,subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg,str):
return arg in arg2value
return arg in assigned_tuple_params
if ismethod(func) and func.im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
try:
unicode
except NameError:
pass
else:
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value | a6343d9f5fef418a2ba4a977357c4d59ba77984c | 1,155 |
def get_qos():
"""Gets Qos policy stats, CLI view"""
return render_template('qos.html', interfaces=QueryDbFor.query_interfaces(device),
interface_qos=QueryDbFor.query_qos(device)) | 5e2557738aca2d67561961e10cd7229345cfd96c | 1,156 |
from typing import List
from typing import Callable
from typing import Any
def create_multiaction(action_name: str, subactions: List[str], description: str = '') -> Callable[[Context, Any], Any]:
"""Creates and registers an action that only executes the subactions in order.
Dependencies and allowation rules are inferred from subactions.
Subactions must be defined first, because the function uses registered definitions!
Argumens
--------
action_name
Name of the new action that acts as a key
subactions
The subactions in the execution order.
The subactions must be registered before the multiaction.
description
Human readable action description.
Returns
-------
function
The combination of subaction functions.
"""
registerations = [registered_actions[sa] for sa in subactions]
affects_database = any([r.affects_database for r in registerations])
baseactions = {
baseaction for r in registerations for baseaction in r.baseactions}
dependencies = {
dep for r in registerations for dep in r.dependencies} - baseactions
def func(*args, **kwargs):
returns = [r.function(*args, **kwargs) for r in registerations]
return returns
func.__doc__ = description
ActionRegisteration(func, action_name, affects_database,
dependencies, baseactions)
return func | 920f6696608e120e1218618dd96daa691e95e383 | 1,157 |
import numpy
def phase_amp_seq_to_complex():
"""
This constructs the function to convert from phase/magnitude format data,
assuming that data type is simple with two bands, to complex64 data.
Returns
-------
callable
"""
def converter(data):
if not isinstance(data, numpy.ndarray):
raise TypeError(
_requires_array_text.format(type(data)))
if len(data.shape) != 3 and data.shape[2] != 2:
raise ValueError(_requires_3darray_text.format(data.shape))
if data.dtype.name not in ['uint8', 'uint16', 'uint32', 'uint64']:
raise ValueError(
'Requires a numpy.ndarray of unsigned integer type.')
bit_depth = data.dtype.itemsize*8
out = numpy.zeros(data.shape[:2] + (1, ), dtype=numpy.complex64)
mag = data[:, :, 0]
theta = data[:, :, 1]*(2*numpy.pi/(1 << bit_depth))
out[:, :, 0].real = mag*numpy.cos(theta)
out[:, :, 0].imag = mag*numpy.sin(theta)
return out
return converter | ee6a88df5c226115a05a4e57501df73837f98ef5 | 1,158 |
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
def Regress_model(x_train,y_train,x_test=None,y_test=None,degree=2,test_size=0.1):
"""[summary]
DESCRIPTION :-
Regressin Model selection.
This Model will compare all the different Regression models, and will return model with highest Rsq value.
It also shows performance graph comaring the models.
PARAMETERS :-
x_train,x_test,y_train,y_test = are the data after tain test split
test_size = 10 % of original data is used for testing
degree = degree of polinomial regresoin (default = 2)
Returns:
Model with heighest Rsq.
Along with model compaing plot.
"""
print('Regression Model Selection...')
if x_test is None or y_test is None:
x_train,x_test,y_train,y_test = train_test_split(x_train,y_train,random_state=0,test_size=test_size)
print('\nLinear Regression ...')
lr=LinearRegression()
lr.fit(x_train,y_train)
y_pred_lir = lr.predict(x_test)
lr_pred=r2_score(y_test, y_pred_lir)
print('Rsq :',lr_pred )
print('\nPolinomial Regression ...')
polr=PolynomialFeatures(degree)
x_polr=polr.fit_transform(x_train)
polr.fit(x_polr,y_train)
lr.fit(x_polr,y_train)
y_pred_poly=lr.predict(polr.fit_transform(x_test))
poly_pred=r2_score(y_pred_poly,y_test)
print('Rsq :',poly_pred )
print('\nSVM Model ...')
regressor = SVR(kernel = 'rbf')
regressor.fit(x_train, y_train)
y_pred=regressor.predict(x_test)
svr_pred=r2_score(y_test,y_pred)
print('Rsq :',svr_pred)
print('\nDesision Tree ...')
d_tree=DecisionTreeRegressor(random_state=1)
d_tree.fit(x_train,y_train)
y_pred=d_tree.predict(x_test)
d_tree_acc=r2_score(y_test,y_pred)
print('Rsq : ',d_tree_acc)
print('\nRandom Forest ...')
rand = RandomForestRegressor(n_estimators = 100, random_state = 1)
rand.fit(x_train,y_train)
y_pred=rand.predict(x_test)
ran_for_acc=r2_score(y_test,y_pred)
print('Rsq :',ran_for_acc)
l=[lr_pred,poly_pred,svr_pred,d_tree_acc,ran_for_acc]
x_label=['Lin_Reg','Poly_Reg','Svm','Des_Tr','Rand_For']
ma=l.index(max(l))
if ma==0:
model=lr
elif(ma==1):
model=polr
elif(ma==2):
model=regressor
elif(ma==3):
model=d_tree
else:
model=rand
xx=np.arange(0,5)
plt.plot(xx,l)
plt.ylabel('Rsq')
plt.title('Regression Models')
plt. xticks(xx,x_label)
plt.show()
return model | 9987edf48821d3c1a7164b0d5675da96b6319aa6 | 1,159 |
async def get_group_list_all():
"""
获取所有群, 无论授权与否, 返回为原始类型(列表)
"""
bot = nonebot.get_bot()
self_ids = bot._wsr_api_clients.keys()
for sid in self_ids:
group_list = await bot.get_group_list(self_id=sid)
return group_list | 2eae159381c48451fb776c8c46c15212aa431689 | 1,160 |
import emcee
import os
def emcee_schools_model(data, draws, chains):
"""Schools model in emcee."""
chains = 10 * chains # emcee is sad with too few walkers
y = data["y"]
sigma = data["sigma"]
J = data["J"] # pylint: disable=invalid-name
ndim = J + 2
pos = np.random.normal(size=(chains, ndim))
pos[:, 1] = np.absolute(pos[:, 1]) # pylint: disable=unsupported-assignment-operation
if emcee_version() < 3:
sampler = emcee.EnsembleSampler(chains, ndim, _emcee_lnprob, args=(y, sigma))
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws)
else:
here = os.path.dirname(os.path.abspath(__file__))
data_directory = os.path.join(here, "saved_models")
filepath = os.path.join(data_directory, "reader_testfile.h5")
backend = emcee.backends.HDFBackend(filepath) # pylint: disable=no-member
backend.reset(chains, ndim)
# pylint: disable=unexpected-keyword-arg
sampler = emcee.EnsembleSampler(
chains, ndim, _emcee_lnprob, args=(y, sigma), backend=backend
)
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws, store=True)
return sampler | 06553352a4106ab8e6850950f4aa5b97d4d7c0b7 | 1,161 |
def _agefromarr(arr, agelist):
"""Measures the mean age map of a timeslice array.
:param arr: A timeslice instance's data array.
:param agelist: List of age sampling points of array.
:return:
:agemap: Light- or mass-weighted (depending on weight_type in the timecube()) mean metallicity of the slice_obj at each spaxel, in years.
"""
arr = np.sum(arr, axis=3) # Remove metallicities
arrshape = np.shape(arr)
arw = np.expand_dims(np.log10(agelist), 0)
arw = np.expand_dims(arw, 0)
arw, np.pad(arw, ((0,arrshape[0]-1),(0,arrshape[1]-1),(0,0)), 'maximum')
return 10**(np.sum(arw*arr, axis=2)/np.sum(arr, axis=2)) | ba1d31e94661022b8bed05cc36085ed0dbc38c94 | 1,162 |
def _build_timecode(time, fps, drop_frame=False, additional_metadata=None):
"""
Makes a timecode xml element tree.
.. warning:: The drop_frame parameter is currently ignored and
auto-determined by rate. This is because the underlying otio timecode
conversion assumes DFTC based on rate.
:param time: The :class: `opentime.RationalTime` for the timecode.
:param fps: The framerate for the timecode.
:param drop_frame: If True, generates drop-frame timecode.
:param additional_metadata: A dictionary with other metadata items like
``field``, ``reel``, ``source``, and ``format``. It is assumed this
dictionary is of the form generated by :func:`_xml_tree_to_dict` when
the file was read originally.
:return: The ``timecode`` element.
"""
if additional_metadata:
# Only allow legal child items for the timecode element
filtered = {
k: v for k, v in additional_metadata.items()
if k in ("field", "reel", "source", "format")
}
tc_element = _dict_to_xml_tree(filtered, "timecode")
else:
tc_element = cElementTree.Element("timecode")
tc_element.append(_build_rate(fps))
rate_is_not_ntsc = (tc_element.find('./rate/ntsc').text == "FALSE")
if drop_frame and rate_is_not_ntsc:
tc_fps = fps * (1000 / 1001.0)
else:
tc_fps = fps
# Get the time values
tc_time = opentime.RationalTime(time.value_rescaled_to(fps), tc_fps)
tc_string = opentime.to_timecode(tc_time, tc_fps, drop_frame)
_append_new_sub_element(tc_element, "string", text=tc_string)
frame_number = int(round(time.value))
_append_new_sub_element(
tc_element, "frame", text="{:.0f}".format(frame_number)
)
drop_frame = (";" in tc_string)
display_format = "DF" if drop_frame else "NDF"
_append_new_sub_element(tc_element, "displayformat", text=display_format)
return tc_element | 9d85287795cf17c1d665b3620d59679183943bd7 | 1,163 |
def transform(nodes, fxn, *args, **kwargs):
"""
Apply an arbitrary function to an array of node coordinates.
Parameters
----------
nodes : numpy.ndarray
An N x M array of individual node coordinates (i.e., the
x-coords or the y-coords only)
fxn : callable
The transformation to be applied to the whole ``nodes`` array
args, kwargs
Additional positional and keyword arguments that are passed to
``fxn``. The final call will be ``fxn(nodes, *args, **kwargs)``.
Returns
-------
transformed : numpy.ndarray
The transformed array.
"""
return fxn(nodes, *args, **kwargs) | edc487b7f1b83f750f868ee446ecf2676365a214 | 1,164 |
import glob
def create_input(
basedir, pertdir, latout=False, longwave=False, slc=slice(0, None, None)
):
"""Extract variables from a given directory and places into dictionaries.
It assumes that base and pert are different directories and only one
experiment output is present in each directory.
Slicing into time chunks is allowed and providing the filenames
follow CMIP6 convention they should be concatenated in the correct
order.
Variables required are rsdt, rsus, rsds, clt, rsdscs, rsuscs, rsut, rsutcs
An error will be raised if variables are not detected.
Parameters
----------
basedir : str
Directory containing control climate simulation variables
pertdir : str
Directory containing perturbed climate simulation variables
latout : bool, default=False
if True, include array of latitude points in the output.
longwave : bool, default=False
if True, do the longwave calculation using cloud radiative effect, in
addition to the shortwave calculation using APRP.
slc: `slice`, optional
Slice of indices to use from each dataset if not all of them.
Returns
-------
base, pert : dict of array_like of variables needed for APRP from control
pert: dict of variables needed for APRP from experiment
[lat]: latitude points relating to axis 1 of arrays
"""
base = {}
pert = {}
if longwave:
varlist = [
"rsdt",
"rsus",
"rsds",
"clt",
"rsdscs",
"rsuscs",
"rsut",
"rsutcs",
"rlut",
"rlutcs",
]
else:
varlist = ["rsdt", "rsus", "rsds", "clt", "rsdscs", "rsuscs", "rsut", "rsutcs"]
def _extract_files(filenames, var, directory):
if len(filenames) == 0:
raise RuntimeError(
f"No variables of name {var} found in directory {directory}"
)
for i, filename in enumerate(filenames):
ncfile = Dataset(filename)
invar = ncfile.variables[var][slc, ...]
lat = ncfile.variables["lat"][:]
ncfile.close()
if i == 0:
outvar = invar
else:
# This works for me with CMIP6 netcdfs, but we don't have a small
# example to test with
outvar = np.append(outvar, invar, axis=0) # pragma: nocover
return outvar, lat
for var in varlist:
filenames = sorted(glob.glob(f"{basedir}/{var}_*.nc"))
base[var], lat = _extract_files(filenames, var, basedir)
filenames = sorted(glob.glob(f"{pertdir}/{var}_*.nc"))
pert[var], lat = _extract_files(filenames, var, pertdir)
if latout:
return base, pert, lat
return base, pert | 1bfda243bf6e11eee38cfb4311767c78f79589c2 | 1,165 |
import logging
def get_tax_proteins(tax_id, tax_prot_dict, prot_id_dict, gbk_dict, cache_dir, args):
"""Get the proteins linked to a tax id in NCBI, and link the tax id with the local db protein ids
:param tax_id: str, NCBI tax db id
:param tax_prot_dict: {ncbi tax id: {local db protein ids}}
:param prot_id_dict: dict {protein ncbi id: prot acc}
:param gbk_dict: dict, {prot acc: local db id}
:param cache_dir: Path, path to cache dir
:param args: cmd-line args parser
Return dict {tax_id: {local db protein ids}} and bool (True=success, False=failed)
"""
logger = logging.getLogger(__name__)
try:
with entrez_retry(
args.retries,
Entrez.elink,
id=tax_id,
db="Protein",
dbfrom="Taxonomy",
linkname="taxonomy_protein",
) as handle:
tax_links = Entrez.read(handle, validate=False)
except (AttributeError, TypeError, RuntimeError) as err:
logger.warning(f"Failed to link NCBI tax id to NCBI Protein db for tax id {tax_id}\n{err}")
return tax_prot_dict, False
try:
tax_prot_dict[tax_id]
except KeyError:
tax_prot_dict[tax_id] = set()
for result in tax_links:
for item in result['LinkSetDb']:
links = item['Link']
for link in links:
linked_prot_id = link['Id']
# check if from the local database
try:
prot_ver_acc = prot_id_dict[linked_prot_id]
except KeyError:
continue
try:
prot_local_db_id = gbk_dict[prot_ver_acc]
except KeyError:
logger.error(
"Did not previously retrieved data from the local "
f"db for {prot_local_db_id}\n"
"Caching and skipping protein"
)
with open((cache_dir/"failed_local_db_retrieval.out"), "a") as fh:
fh.write(f"{prot_local_db_id}\n")
continue
tax_prot_dict[tax_id].add(prot_local_db_id)
return tax_prot_dict, True | d3aaa32adbc1ad66e0a6bb2c615ffc0f33df9f00 | 1,166 |
def define_features_vectorizer(columns, training_data, testing_data = None, ngramrange=(1,1)):
"""
Define the features for classification using CountVectorizer.
Parameters
----------
column: String or list of strings if using multiple columns
Names of columns of df that are used for trainig the classifier
training_data: Pandas dataframe
The dataframe containing the training data for the classifier
testing_data: Pandas dataframe
The dataframe containing the testing data for the classifier
ngramrange: tuple (min_n, max_n), with min_n, max_n integer values
range for ngrams used for vectorization
Returns
-------
vectorizer: sklearn CountVectorizer
CountVectorizer fit and transformed for training data
training_features: sparse matrix
Document-term matrix for training data
testing_features: sparse matrix
Document-term matrix for testing data
"""
#intialise Countvectorizer and fit transform to data
vectorizer = CountVectorizer(ngram_range = ngramrange)
vectorizer.fit_transform(training_data[columns].values)
#build matrixes for training_features and testing_features
training_features=vectorizer.transform(training_data[columns].values)
if testing_data is not None:
testing_features=vectorizer.transform(testing_data[columns].values)
else:
testing_features = None
return vectorizer, training_features, testing_features | 9c29847620ba392004efdeac80f607bc86db2780 | 1,167 |
def resolve_4d_input_blob(
hparams: tf.contrib.training.HParams,
runtime_bs: tf.Tensor,
features: dict,
feature_columns: list,
info_log: bool = False,
) -> tf.Tensor:
"""Convert a dict feature input to a 4D input cube with dimension (NHWC).
this function is experimental.
Arguments:
hparams {tf.contrib.training.HParams} -- hyper parameters
runtime_bs {tf.Tensor} -- the batch_size in runtime
features {dict} -- the dict of feature
feature_columns {list} -- the list of feature columns
Keyword Arguments:
info_log {bool} -- True to enable debugging info logging (default: {False})
Returns:
tf.Tensor -- The created 4D input Tensor
"""
# initialize vars
frame_shape = resolve_simple_number_array(hparams, "frameShape")
batch_input_shape_tensor = tf.convert_to_tensor([runtime_bs] + frame_shape)
padding = resolve_simple_number_array(hparams, "padding")
# Process time-series and non-time-series features one by one
feature_list = []
for key in sorted(features):
cur_feature = features[key]
is_array, _ = check_array_feature(cur_feature)
# build ts feature planes
if is_array:
# padding
if sum(padding) > 0:
padding_tensor = tf.constant([[0, 0], padding])
cur_feature = tf.pad(
cur_feature, padding_tensor, mode="CONSTANT", constant_values=0
)
# reshape
cur_feature = tf.reshape(cur_feature, batch_input_shape_tensor)
# cast to float
if cur_feature.dtype != tf.float32:
cur_feature = tf.cast(cur_feature, dtype=tf.float32)
# add to list with added channel dim (NHWC)
feature_list.append(cur_feature[:, :, :, tf.newaxis])
# log ts feature
if info_log:
tf.logging.info("{}: {}".format(key, cur_feature))
# build non-ts feature planes (Numerical Features)
# note that we treat SparseTensor and Tensor with dtype=string as categorical features
elif type(cur_feature) is tf.Tensor and cur_feature.dtype.name != "string":
# tiling
cur_feature = tf.tile(
cur_feature[:, tf.newaxis], [1, frame_shape[0] * frame_shape[1]]
)
# reshape
cur_feature = tf.reshape(cur_feature, batch_input_shape_tensor)
# cast to float
if cur_feature.dtype != tf.float32:
cur_feature = tf.cast(cur_feature, dtype=tf.float32)
# add to list with added channel dim (NHWC)
feature_list.append(cur_feature[:, :, :, tf.newaxis])
# log numerical feature
if info_log:
tf.logging.info("{}: {}".format(key, cur_feature))
# build non-ts feature planes (Categorical Features)
else:
cur_feature = tfc.input_layer(
{key: cur_feature}, find_feature_column(key, feature_columns)
)
# padding
cur_feature = tf.tile(
cur_feature[:, :, tf.newaxis], [1, 1, frame_shape[0] * frame_shape[1]]
)
# split
cur_features = tf.split(
cur_feature, axis=1, num_or_size_splits=cur_feature.shape[1]
)
# process each feature plane
for entry in cur_features:
# reshape
entry = tf.reshape(entry, batch_input_shape_tensor)
# cast to float
if entry.dtype != tf.float32:
entry = tf.cast(entry, dtype=tf.float32)
# add to list with added channel dim (NHWC)
feature_list.append(entry[:, :, :, tf.newaxis])
# log categorical feature plane
if info_log:
tf.logging.info("{}: {}".format(key, entry))
# channel stacking
data = tf.concat(feature_list, -1)
# interpolation
interp = resolve_simple_number_array(hparams, "interp")
if interp is not None and interp != frame_shape:
data = tf.image.resize_images(data, tf.convert_to_tensor(interp))
return data | d822d1b829454056d4e1bde878d2c25475d049a1 | 1,168 |
import os
def GetPostgreSQLLoginInfo():
"""
* Get database login information from pem file
"""
passfile = '/mnt/data/other/pem/sinnud_pg.dat'
with open(passfile, 'r') as f:
passinfo = f.read().strip()
(host, user, dbname, password, port) = passinfo.split()
if os.path.isfile(passfile):
return (True, (host, user, dbname, password, port))
return (False, None) | 8a37306f05443b6b92b07cf104a6564ad3a71625 | 1,169 |
import html
def show_graph_unique_not_callback(n_clicks, input_box):
""" Function which is called by a wrapped function in another module. It takes
user input in a text box, returns a graph if the query produces a hit in Solr.
Returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
RETURNS: 1 graph (unique occurrences) of all terms which have results
from Solr """
# Store the layout with the appropriate title and y axis labels for the graph
layout_unique = go.Layout(
title = 'Percentage of papers containing chosen entity mention(s) per Month',
xaxis = {'title': 'Publication date', 'tickformat': '%b %y', 'tick0': '2007-04-30',
'dtick': 'M2', 'range': ['2007-03-25', '2018-01-25'], 'titlefont': {'size': 20}, 'tickfont': {'size': 15}},
yaxis = {'title': 'Percentage of papers with entity mention', 'ticksuffix': '%', 'titlefont': {'size': 19}, 'tickfont': {'size': 18}},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
barmode = 'stack',
hovermode = 'closest',
font= {
'color': colours['text'],
'size': 15
},
showlegend=True,
legend = {'font': {'size': 18}, 'x': 0, 'y': -0.5, 'orientation': 'h'}
)
if input_box != '':
# Get the input data: both freq_df dfs will have index= published_date,
# columns = percentage_occurrences unique.
input_list = input_box.lower().split(',')
data_list_unique = []
notfound_list = []
for input_val in input_list:
# Make sure to strip input_val, otherwise if the user enters a
# space after the comma in the query, this space will get sent
# to Solr.
input_val = input_val.strip()
# If the search phrase doesn't start with the wikipedia url, it is a
# noun phrase which has to be converted to a URL
if not input_val.startswith('http://en.wikipedia.org/wiki'):
input_val = convert_phrase_to_url(input_val)
freq_df_total, freq_df_unique = get_aggregated_data(input_val)
if freq_df_unique is not None:
# Plot the graphs, published_date (index) goes on the x-axis,
# and percentage_occurrences (unique) goes on the y-axis.
data_list_unique.append(go.Bar(
x = freq_df_unique.index,
y = freq_df_unique.percentage_occurrences,
text = input_val.strip(), # hover text
opacity = 0.7,
name = input_val.strip() # legend text
))
else:
# Term not found, append it to the not found list and go to the
# next term.
notfound_list.append(input_val)
if data_list_unique == []:
if notfound_list != []:
# Append the error message for the terms not found in the
# Solr index
# return html.Br()
return not_found_message(notfound_list)
# One or more of the Solr queries returned a result
else:
graph_unique_terms = {'data': data_list_unique, 'layout': layout_unique}
if notfound_list != []:
terms_not_found = not_found_message(notfound_list)
#return terms_not_found, html.Br(),
return terms_not_found, dcc.Graph(id='uniquefreq', figure= graph_unique_terms)
return html.Br(), dcc.Graph(id='uniquefreq', figure= graph_unique_terms) | a53112cf76acd2a02ccd1a251b0a439ea8b06c77 | 1,170 |
def _add_string_datatype(graph, length):
"""Add a custom string datatype to the graph refering.
Args:
graph (Graph): The graph to add the datatype to
length (int): The maximim length of the string
Returns:
URIRef: The iri of the new datatype
"""
iri = rdflib_cuba[f"_datatypes/STRING-{length}"]
triple = (iri, RDF.type, RDFS.Datatype)
if graph is None or triple in graph:
return iri
graph.add(triple)
# length_triple = (iri, rdflib_cuba._length, Literal(int(length)))
# graph.add(length_triple)
return iri | 65534a58257157c9cf8943b8f4ba3c3a39d8a5b2 | 1,171 |
def get_selected_shipping_country(request):
"""Returns the selected shipping country for the passed request.
This could either be an explicitely selected country of the current
user or the default country of the shop.
"""
customer = customer_utils.get_customer(request)
if customer:
if customer.selected_shipping_address:
return customer.selected_shipping_address.country
elif customer.selected_country:
return customer.selected_country
return lfs.core.utils.get_default_shop(request).get_default_country() | 330ccf01ed261a3b669589ab3e550147f6086b0b | 1,172 |
def func_item_iterator_next(*args):
"""
func_item_iterator_next(fii, testf, ud) -> bool
"""
return _ida_funcs.func_item_iterator_next(*args) | ce4bb7516354c36fbb3548882ff45c64c8090381 | 1,173 |
def find_score_maxclip(tp_support, tn_support, clip_factor=ut.PHI + 1):
"""
returns score to clip true positives past.
Args:
tp_support (ndarray):
tn_support (ndarray):
Returns:
float: clip_score
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.score_normalization import * # NOQA
>>> tp_support = np.array([100, 200, 50000])
>>> tn_support = np.array([10, 30, 110])
>>> clip_score = find_score_maxclip(tp_support, tn_support)
>>> result = str(clip_score)
>>> print(result)
287.983738762
"""
max_true_positive_score = tp_support.max()
max_true_negative_score = tn_support.max()
if clip_factor is None:
clip_score = max_true_positive_score
else:
overshoot_factor = max_true_positive_score / max_true_negative_score
if overshoot_factor > clip_factor:
clip_score = max_true_negative_score * clip_factor
else:
clip_score = max_true_positive_score
return clip_score | 5b51bc8a09f8e7c93e9196bab8e0566c32d31ad9 | 1,174 |
import json
def create_sponsor():
"""
Creates a new sponsor.
---
tags:
- sponsor
summary: Create sponsor
operationId: create_sponsor
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Sponsor'
- type: object
multipart/form-data:
schema:
type: object
properties:
sponsor:
deprecated: true
allOf:
- $ref: '#/components/schemas/Sponsor'
- type: object
description: >
Deprecated,
do not use `multipart/form-data`,
use `application/json`.
properties:
encoding:
sponsor:
contentType: application/json
description: Created sponsor Object
required: true
responses:
201:
description: OK
400:
description: Bad request.
409:
description: Sorry, that sponsor already exists.
5XX:
description: Unexpected error.
"""
if "multipart/form-data" in request.content_type:
try:
data = json.loads(request.form.get("sponsor"))
except JSONDecodeError:
raise BadRequest("Invalid JSON sent in sponsor form part.")
elif request.content_type == "application/json":
data = request.get_json()
else:
raise UnsupportedMediaType()
if not data:
raise BadRequest()
try:
sponsor = Sponsor.createOne(**data)
sponsor.save()
except NotUniqueError:
raise Conflict("Sorry, that sponsor already exists.")
except ValidationError:
raise BadRequest()
res = {
"status": "success",
"message": "sponsor was created!"
}
res = make_response(res)
if "multipart/form-data" in request.content_type:
res.headers["Deprecation"] = (
"The use of multipart/form-data is deprecated. ")
if "socials" in data:
res.headers["Deprecation"] = (
"The socials field is deprecated use sponsor_website instead")
return res, 201 | 70d78a05046b2a9e845176838ae85b7c6aef01f5 | 1,175 |
import shutil
def download_or_copy(uri, target_dir, fs=None) -> str:
"""Downloads or copies a file to a directory.
Downloads or copies URI into target_dir.
Args:
uri: URI of file
target_dir: local directory to download or copy file to
fs: if supplied, use fs instead of automatically chosen FileSystem for
uri
Returns:
the local path of file
"""
local_path = download_if_needed(uri, target_dir, fs=fs)
shutil.copy(local_path, target_dir)
return local_path | ac7871adc2784a77246bbe9d1d5ae9c3d8b8443e | 1,176 |
import os
from sys import version
def startup():
""" Starts everything up """
settings = {
'telegram_token': os.environ.get('TELEGRAM_TOKEN'),
'telegram_chat_id': os.environ.get('TELEGRAM_CHAT_ID'),
'gotify_url': os.environ.get('GOTIFY_URL'),
'gotify_token': os.environ.get('GOTIFY_TOKEN'),
'port': int(os.environ.get('PORT', '8899')),
'host': os.environ.get('ADDRESS', '*'),
'telegram_template': os.environ.get('TELEGRAM_TEMPLATE', 'html.j2'),
'gotify_template': os.environ.get('GOTIFY_TEMPLATE', 'markdown.md.j2'),
'null_template': os.environ.get('NULL_TEMPLATE', 'text.j2'),
'exclude_labels': os.environ.get('EXCLUDE_LABELS'),
'notifiers': [],
}
if settings['telegram_token'] and settings['telegram_chat_id']:
settings['notifiers'].append('telegram')
if settings['gotify_url'] and settings['gotify_token']:
settings['notifiers'].append('gotify')
log.info(f"Starting {__package__} {version}, listening on {settings['host']}:{settings['port']}")
return settings | 3b28beaf34144dca6ccad886a22a0dbda3732b79 | 1,177 |
from typing import Dict
from typing import Any
import yaml
def as_yaml(config: Dict[str, Any], **yaml_args: Any) -> str:
"""Use PyYAML library to write YAML file"""
return yaml.dump(config, **yaml_args) | 28c792504d7a6ccd7dbf040d516343e44e072b16 | 1,178 |
def retrieve(filen,start,end):
"""Retrieve a block of text from a file.
Given the name of a file 'filen' and a pair of start and
end line numbers, extract and return the text from the
file.
This uses the linecache module - beware of problems with
consuming too much memory if the cache isn't cleared."""
text = ""
# Check for consistency and validity of lines
if start < 0 and end < 0 or end < start:
return ""
# Fetch from a file if possible
if os.path.isfile(filen):
try:
for i in range(start,end+1):
text = text+str(linecache.getline(filen,i))
return text
except Exception:
print "Exception raised in retrieve method:"
print "\tSource file = \""+str(filen)+"\""
print "\tStart line = "+str(start)
print "\tEnd line = "+str(end)
print "\tCurrent line = "+str(i)
raise
# Otherwise return nothing
return "" | 1ead50be72c542551b2843e8a7fd59b98106f2ce | 1,179 |
def L1_Charbonnier_loss(predict, real):
"""
损失函数
Args:
predict: 预测结果
real: 真实结果
Returns:
损失代价
"""
eps = 1e-6
diff = tf.add(predict, -real)
error = tf.sqrt(diff * diff + eps)
loss = tf.reduce_mean(error)
return loss | 61b0183bf78914dc405290fc89e2ec875b9adfd7 | 1,180 |
def correction_byte_table_h() -> dict[int, int]:
"""Table of the number of correction bytes per block for the correction
level H.
Returns:
dict[int, int]: Dictionary of the form {version: number of correction
bytes}
"""
table = {
1: 17, 2: 28, 3: 22, 4: 16, 5: 22, 6: 28, 7: 26, 8: 26, 9: 24, 10: 28,
11: 24, 12: 28, 13: 22, 14: 24, 15: 24, 16: 30, 17: 28, 18: 28, 19: 26,
20: 28, 21: 30, 22: 24, 23: 30, 24: 30, 25: 30, 26: 30, 27: 30, 28: 30,
29: 30, 30: 30, 31: 30, 32: 30, 33: 30, 34: 30, 35: 30, 36: 30, 37: 30,
38: 30, 39: 30, 40: 30
}
return table | 982f775172ed0fa148f0d618e4c521fc42e3883e | 1,181 |
def stash_rename(node_id, new_name):
"""Renames a node."""
return stash_invoke('rename', node_id, new_name) | 1fd1dd27bcab8db64e2fb39bf4301a3eb3d48035 | 1,182 |
from datetime import datetime
def get_fake_value(attr): # attr = (name, type, [dim, [dtype]])
""" returns default value for a given attribute based on description.py """
if attr[1] == pq.Quantity or attr[1] == np.ndarray:
size = []
for i in range(int(attr[2])):
size.append(np.random.randint(100) + 1)
to_set = np.random.random(size) * pq.millisecond # let it be ms
if attr[0] == 't_start': to_set = 0.0 * pq.millisecond
if attr[0] == 't_stop': to_set = 1.0 * pq.millisecond
if attr[0] == 'sampling_rate': to_set = 10000.0 * pq.Hz
if attr[1] == np.ndarray:
to_set = np.array(to_set, dtype=attr[3])
if attr[1] == str:
to_set = str(np.random.randint(100000))
if attr[1] == int:
to_set = np.random.randint(100)
if attr[1] == datetime:
to_set = datetime.now()
return to_set | 6a732c90946b58cc7be834193692c36c56bd83fc | 1,183 |
def find_x(old_time,omega,new_time):
"""
Compute x at the beginning of new time array.
"""
interp_omega=spline(old_time,omega)
x=interp_omega(new_time[0])**(2./3)
return x | 450af49dca9c8a66dc0b9a37abddb23afd9a9749 | 1,184 |
import struct
def _platformio_library_impl(ctx):
"""Collects all transitive dependencies and emits the zip output.
Outputs a zip file containing the library in the directory structure expected
by PlatformIO.
Args:
ctx: The Skylark context.
"""
name = ctx.label.name
# Copy the header file to the desired destination.
header_file = ctx.actions.declare_file(
_HEADER_FILENAME.format(dirname=name, filename=name))
inputs = [ctx.file.hdr]
outputs = [header_file]
commands = [_COPY_COMMAND.format(
source=ctx.file.hdr.path, destination=header_file.path)]
# Copy all the additional header and source files.
for additional_files in [ctx.attr.add_hdrs, ctx.attr.add_srcs]:
for target in additional_files:
if len(target.files.to_list()) != 1:
fail("each target listed under add_hdrs or add_srcs must expand to " +
"exactly one file, this expands to %d: %s" %
(len(target.files), target.files))
# The name of the label is the relative path to the file, this enables us
# to prepend "lib/" to the path. For PlatformIO, all the library files
# must be under lib/...
additional_file_name = target.label.name
additional_file_source = [f for f in target.files.to_list()][0]
additional_file_destination = ctx.actions.declare_file(
_ADDITIONAL_FILENAME.format(dirname=name, filename=additional_file_name))
inputs.append(additional_file_source)
outputs.append(additional_file_destination)
commands.append(_COPY_COMMAND.format(
source=additional_file_source.path,
destination=additional_file_destination.path))
# The src argument is optional, some C++ libraries might only have the header.
if ctx.attr.src != None:
source_file = ctx.actions.declare_file(
_SOURCE_FILENAME.format(dirname=name, filename=name))
inputs.append(ctx.file.src)
outputs.append(source_file)
commands.append(_COPY_COMMAND.format(
source=ctx.file.src.path, destination=source_file.path))
# Zip the entire content of the library folder.
outputs.append(ctx.outputs.zip)
commands.append(_ZIP_COMMAND.format(
output_dir=ctx.outputs.zip.dirname, zip_filename=ctx.outputs.zip.basename))
ctx.actions.run_shell(
inputs=inputs,
outputs=outputs,
command="\n".join(commands),
)
# Collect the zip files produced by all transitive dependancies.
transitive_zip_files=depset([ctx.outputs.zip])
for dep in ctx.attr.deps:
transitive_zip_files = depset(transitive=[
transitive_zip_files, dep.transitive_zip_files
])
return struct(
transitive_zip_files=transitive_zip_files,
) | e048afd34e1228490f879e10bf42105197053bd8 | 1,185 |
def repeat_interleave(x, arg):
"""Use numpy to implement repeat operations"""
return paddle.to_tensor(x.numpy().repeat(arg)) | 9677d48626460241751dde2dfe1ca70d31bab6e2 | 1,186 |
def quantize_arr(arr, min_val=None, max_val=None, dtype=np.uint8):
"""Quantization based on real_value = scale * (quantized_value - zero_point).
"""
if (min_val is None) | (max_val is None):
min_val, max_val = np.min(arr), np.max(arr)
scale, zero_point = choose_quant_params(min_val, max_val, dtype=dtype)
transformed_arr = zero_point + arr / scale
# print(transformed_arr)
if dtype == np.uint8:
clamped_arr = np.clip(transformed_arr, 0, 255)
quantized = clamped_arr.astype(np.uint8)
elif dtype == np.uint32:
clamped_arr = np.clip(transformed_arr, 0, 2 ** 31)
quantized = clamped_arr.astype(np.uint32)
else:
raise ValueError('dtype={} is not supported'.format(dtype))
# print(clamped_arr)
min_val = min_val.astype(np.float32)
max_val = max_val.astype(np.float32)
return quantized, min_val, max_val | 55f36d84708b32accd7077dc59fa9321f074cd5a | 1,187 |
def EST_NOISE(images):
"""Implementation of EST_NOISE in Chapter 2 of Trucco and Verri."""
num = images.shape[0]
m_e_bar = sum(images)/num
m_sigma = np.sqrt(sum((images - m_e_bar)**2) / (num - 1))
return m_sigma | 8f8d68b25a88cc800b1a6685407072c29c47db7d | 1,188 |
def continue_cad_funcionario(request):
""" Continuação do Cadastro do Funcionário.
"""
usuario = request.user
try:
funcionario = Funcionario.objects.get(usuario=usuario)
except Exception:
raise Http404()
if funcionario and request.method == "POST":
form = FuncionarioForm(request.POST)
if form.is_valid():
form.save()
return redirect("funcionario")
else:
form = FuncionarioForm()
return render(request, "continue_cad_funcionario.html", {"form": form})
# if request.method == "POST":
# form = FuncionarioForm(request.POST)
# if form.is_valid():
# #'nome', 'rua', 'cpf', 'rg', 'fone', 'bloqueado', 'usuario_fun'
# nome = form.cleaned_data['nome']
# rua = form.cleaned_data['rua']
# cpf = form.cleaned_data['cpf']
# rg = form.cleaned_data['rg']
# fone = form.cleaned_data['fone']
# bloqueado = form.cleaned_data['bloqueado']
# usuario_fun = form.cleaned_data['usuario_fun']
# novo = Funcionario(
# nome=nome, rua=rua, cpf=cpf,
# rg=rg, fone=fone, bloqueado=bloqueado,
# suario_fun=usuario_fun
# )
# novo.save()
# return redirect("funcionario")
# else:
# form = FuncionarioForm()
# return render(request, "continue_cad_funcionario.html", {"form": form}) | 626ffeefdeb98f6921b5b76832fd2b622f6d3a26 | 1,189 |
import re
def remove_words(i_list, string):
"""
remove the input list of word from string
i_list: list of words to be removed
string: string on the operation to be performed
"""
regexStr = re.compile(r'\b%s\b' %
r'\b|\b'.join(map(re.escape, i_list)))
o_string = regexStr.sub("", string)
return o_string | 59ac5c2660459f7d2def0d7958a002977a6ca643 | 1,190 |
from datetime import datetime
def save_user_time():
"""
Creates a DateTime object with correct save time
Checks if that save time is now
"""
save_time = datetime.utcnow().replace(hour=18, minute=0, second=0, microsecond=0)
return (save_time == (datetime.utcnow() - timedelta(hours=4))) | 3a16e5de6d912487ca0c46c48d039cf7d44a4991 | 1,191 |
def manage_rating_mails(request, orders_sent=[], template_name="manage/marketing/rating_mails.html"):
"""Displays the manage view for rating mails
"""
return render(request, template_name, {}) | 4afb288233bd4e1fe1b288e6872b522b993ae434 | 1,192 |
from typing import Optional
import time
from datetime import datetime
def cancel(request_url: str,
wait: Optional[bool] = False,
poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
verbose: Optional[bool] = False) -> int:
"""
Cancel the request at the given URL.
This method returns immediately by default since the API processes
this request asynchronously. If you would prefer to wait for it
to be completed, set the 'wait' parameter to True. You can adjust
the polling time using the 'poll_interval' parameter.
Args:
request_url: the URL string of the request to be canceled
wait: set to True to block until the cancellation request
has been completed (may wait for several minutes)
poll_interval: seconds to wait between polling
calls, defaults to STANDARD_POLLING_SLEEP_TIME.
verbose: if True then output poll times and other
progress, defaults to False
Returns:
1 on success
Raises:
pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
"""
# do request
req = AuroraXRequest(method="delete",
url=request_url,
null_response=True)
req.execute()
# return immediately if we don't want to wait
if (wait is False):
return 1
# get status
status = get_status(request_url)
# wait for request to be cancelled
while (status["search_result"]["data_uri"] is None and status["search_result"]["error_condition"] is False):
time.sleep(poll_interval)
if (verbose is True):
print("[%s] Checking for cancellation status ..." % (datetime.datetime.now()))
status = get_status(request_url)
# return
if (verbose is True):
print("[%s] The request has been cancelled" % (datetime.datetime.now()))
return 1 | 9f9534c3114c42cfbc08330566244d8981659cdb | 1,193 |
def selected_cases(self):
"""Get a list of all grid cases selected in the project tree
Returns:
A list of :class:`rips.generated.generated_classes.Case`
"""
case_infos = self._project_stub.GetSelectedCases(Empty())
cases = []
for case_info in case_infos.data:
cases.append(self.case(case_info.id))
return cases | 27485e7d2244167c0e9766972856f2cb221f8813 | 1,194 |
import requests
import json
def create_whatsapp_group(org, subject):
"""
Creates a Whatsapp group using the subject
"""
result = requests.post(
urljoin(org.engage_url, "v1/groups"),
headers=build_turn_headers(org.engage_token),
data=json.dumps({"subject": subject}),
)
result.raise_for_status()
return json.loads(result.content)["groups"][0]["id"] | b867046be5623a7e3857ae6ef0069d909db323c1 | 1,195 |
def compute_MVBS_index_binning(ds_Sv, range_sample_num=100, ping_num=100):
"""Compute Mean Volume Backscattering Strength (MVBS)
based on intervals of ``range_sample`` and ping number (``ping_num``) specified in index number.
Output of this function differs from that of ``compute_MVBS``, which computes
bin-averaged Sv according to intervals of range (``echo_range``) and ``ping_time`` specified
in physical units.
Parameters
----------
ds_Sv : xr.Dataset
dataset containing ``Sv`` and ``echo_range`` [m]
range_sample_num : int
number of samples to average along the ``range_sample`` dimension, default to 100
ping_num : int
number of pings to average, default to 100
Returns
-------
A dataset containing bin-averaged Sv
"""
da_sv = 10 ** (ds_Sv["Sv"] / 10) # average should be done in linear domain
da = 10 * np.log10(
da_sv.coarsen(ping_time=ping_num, range_sample=range_sample_num, boundary="pad").mean(
skipna=True
)
)
# Attach attributes and coarsened echo_range
da.name = "Sv"
ds_MVBS = da.to_dataset()
ds_MVBS.coords["range_sample"] = (
"range_sample",
np.arange(ds_MVBS["range_sample"].size),
{"long_name": "Along-range sample number, base 0"},
) # reset range_sample to start from 0
ds_MVBS["echo_range"] = (
ds_Sv["echo_range"]
.coarsen( # binned echo_range (use first value in each average bin)
ping_time=ping_num, range_sample=range_sample_num, boundary="pad"
)
.min(skipna=True)
)
_set_MVBS_attrs(ds_MVBS)
ds_MVBS["Sv"] = ds_MVBS["Sv"].assign_attrs(
{
"cell_methods": (
f"ping_time: mean (interval: {ping_num} pings "
"comment: ping_time is the interval start) "
f"range_sample: mean (interval: {range_sample_num} samples along range "
"comment: range_sample is the interval start)"
),
"comment": "MVBS binned on the basis of range_sample and ping number specified as index numbers", # noqa
"binning_mode": "sample number",
"range_sample_interval": f"{range_sample_num} samples along range",
"ping_interval": f"{ping_num} pings",
"actual_range": [
round(float(ds_MVBS["Sv"].min().values), 2),
round(float(ds_MVBS["Sv"].max().values), 2),
],
}
)
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "preprocess.compute_MVBS_index_binning"
ds_MVBS = ds_MVBS.assign_attrs(prov_dict)
ds_MVBS["frequency_nominal"] = ds_Sv["frequency_nominal"] # re-attach frequency_nominal
return ds_MVBS | c5b163ec0f9b2807580c586a3e40ca81d0bcd6cb | 1,196 |
def set_image_exposure_time(exp_time):
"""
Send the command to set the exposure time per frame to SAMI.
Parameters
----------
exp_time (float) : the exposure time in seconds.
Returns
-------
message (string) : DONE if successful.
"""
message = send_command("dhe set obs.exptime {:f}".format(exp_time))
return message | c01a12607b6554f29c63229eefe87c1bf81bc7e0 | 1,197 |
import platform
import subprocess
def ping(host):
""" Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the
host name is valid.
Base on https://bit.ly/2TmgeX2 but with pexpect
:param str host: A host name or ip
:return boolean: True if ping is replied correctly
"""
# Option for the number of packets as a function of
param = '-n' if platform.system().lower() == 'windows' else '-c'
# Building the command. Ex: "ping -c 1 example.com"
command = ['ping', param, '1', host]
return subprocess.call(command) == 0 | 0c50e5042fbac4a29c96f07411040f511fab366a | 1,198 |
def stack_exists(client, stack_name):
""" Checks that stack was specified is existing """
cfn_stacks = client.list_stacks()
for cfn_stack in cfn_stacks["StackSummaries"]:
if cfn_stack['StackName'] == stack_name and "COMPLETE" in cfn_stack['StackStatus'] and "DELETE" not in cfn_stack['StackStatus']:
return True
return False | 8e9476b57300cb030ba5292f83060bb5ae652d19 | 1,199 |