content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
"""conv init"""
fan_in = in_channel * kernel_size * kernel_size
scale = 1.0
scale /= max(1., fan_in)
stddev = (scale ** 0.5) / .87962566103423978
mu, sigma = 0, stddev
weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
return Tensor(weight, dtype=mstype.float32) | 925339a12e4f2e04c403ad8148145df0497da0da | 449 |
def vgg8(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], **kwargs)
return model | 61d9f2e98c68691c1ed26631220f447eef28ba11 | 450 |
def get_char_from_ascii(key_num):
"""Function that converts a character to an ascii code
Parameters
----------
ascii_code : int
Ascii code of character
Returns
-------
char : character
character converted from ascii
"""
return chr(key_num) | 79f6a5627805909a005d5921f4e9fe738fb09936 | 451 |
def start():
"""
view for data entry for optimisation
"""
form = LocationForm()
if form.validate_on_submit():
return optimise(form.data)
return flask.render_template("start.html",
title="Start", form=form) | caba5a4d20d544ed480bda4d4e4d4377880bbd40 | 453 |
def c_flag(opt, test_not=False):
""" convert a test parameter into t if true for the Fortran build system """
if test_not:
if opt: return "FALSE"
else: return "TRUE"
else:
if opt: return "TRUE"
else: return "FALSE" | cf78668ae19287822fba9946fa472187848e0084 | 455 |
def false_function():
"""Sample function to test unit testing."""
return False | 7823ac0f533c97544a8f73f73715bebb8e5b45cc | 456 |
def broker_task_send(task_uuid, request, broker_point, reply_to=None):
"""Command to publish `primitives.Request` to customer
Args:
task_uuid(str): task identification
request: Serialized request
broker_point(gromozeka.BrokerPoint):
reply_to(gromozeka.BrokerPoint):
Returns:
Command:
"""
return Command(command=BROKER_TASK_SEND,
args={'task_uuid': task_uuid, 'request': request, 'broker_point': broker_point,
'reply_to': reply_to}).as_tuple() | 52b389982676f65547f10a2cd45ac225e6486673 | 457 |
import numpy
def process_axis_labels(datadesc, blobs, offset=0):
"""Convert the raw axis label descriptions.
Similar to LiveDataPanel._process_axis_labels, but is flexible in datadesc.
"""
CLASSIC = {'define': 'classic'}
labels = {}
titles = {}
for size, axis in zip(reversed(datadesc['shape']), AXES):
# if the 'labels' key does not exist or does not have the right
# axis key set default to 'classic'.
label = datadesc.get(
'labels', {'x': CLASSIC, 'y': CLASSIC}).get(axis, CLASSIC)
if label['define'] == 'range':
start = label.get('start', 0)
size = label.get('length', 1)
step = label.get('step', 1)
end = start + step * size
labels[axis] = numpy.arange(start, end, step)
elif label['define'] == 'array':
index = label.get('index', 0)
labels[axis] = numpy.frombuffer(blobs[index],
label.get('dtype', '<i4'))
else:
labels[axis] = numpy.array(range(size))
labels[axis] += offset if axis == 'x' else 0
titles[axis] = label.get('title')
return labels, titles | d0f880c69160b2a620affe7b1cfe8c7dda12d807 | 458 |
def _to_ranks_by_group(dat, group, formula, exclude_cols=[]):
"""
Covert predictors to ranks separately for each group for use in rank Lmer. Any columns not in the model formula or in exclude_cols will not be converted to ranks. Used by models.Lmer
Args:
dat (pd.DataFrame): dataframe of data
group (string): string name of column to group data on
formula (string): Lmer flavored model formula with random effects
exclude_cols (list): optional columns that are part of the formula to exclude from rank conversion.
Returns:
pandas.core.frame.DataFrame: ranked data
"""
if (not isinstance(group, str)) and (group not in dat.columns):
raise TypeError(
"group must be a valid column name in the dataframe. Currently only 1 grouping variable is supported."
)
if isinstance(exclude_cols, str):
exclude_cols = [exclude_cols]
original_col_order = list(dat.columns)
formula = formula.replace(" ", "")
to_rank = formula.split("~")[-1].split("(")[0].split("+")[:-1]
# add dv to be ranked
to_rank.append(formula.split("~")[0])
to_rank = [c for c in to_rank if c not in exclude_cols]
other_cols = [c for c in dat.columns if c not in to_rank]
dat = pd.concat(
[dat[other_cols], dat.groupby(group).apply(lambda g: g[to_rank].rank())], axis=1
)
return dat[original_col_order] | 6cff465b0a1877d6594953dda75913dfb36a67ad | 459 |
def list_scans():
"""
:return: A JSON containing a list of:
- Scan resource URL (eg. /scans/1)
- Scan target
- Scan status
"""
data = []
for scan_id, scan_info in SCANS.iteritems():
if scan_info is None:
continue
target_urls = scan_info.target_urls
status = scan_info.w3af_core.status.get_simplified_status()
errors = True if scan_info.exception is not None else False
data.append({'id': scan_id,
'href': '/scans/%s' % scan_id,
'target_urls': target_urls,
'status': status,
'errors': errors})
return jsonify({'items': data}) | 60d5eb5c33c09ac6e35ffae2c10b6aca566c6027 | 461 |
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
**Examples**
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (1 + x**2, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor') | c13e503a631d3bfc5ead05dc8de8cc5243614241 | 462 |
import pickle
import torch
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = dist.get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list | 9e89ed2f299f5de8dec55d5529478177d45c21fa | 463 |
import torch
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=()):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net | 2ddeda15b84bca0b83a7b5b516c83f991cec44c7 | 464 |
def remove_from_group(group_name, nodes=None, nodes_by_col='SUID', edges=None, edges_by_col='SUID', network=None,
base_url=DEFAULT_BASE_URL):
"""Remove the specified nodes and edges from the specified group.
Args:
group_name (str): Specifies the name used to identify the group
nodes (list or str or int or None): List of nodes or keyword: selected, unselected or all. If node list:
``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name
or SUID). Node names should be found in the ``SUID`` column of the ``node table`` unless
specified in ``nodes_by_col``. If list is None, default is currently selected nodes.
nodes_by_col (str): name of node table column corresponding to provided nodes list. Default is 'SUID'.
edges (str or list or int or None): List of edges or keyword: selected, unselected or all. If edge list:
``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar edge name
or SUID). Edge names should be found in the ``SUID`` column of the ``edge table`` unless
specified in ``edges_by_col``. If list is None, default is currently selected edges.
edges_by_col (str): name of edge table column corresponding to provided edges list. Default is 'SUID'.
network (SUID or str or None): Name or SUID of a network. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {}
Raises:
CyError: if network name or SUID doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> remove_from_group('Group 1', ['GDS1', 'SIP4', 'PDC1'], nodes_by_col='COMMON') # remove nodes by common name & all their edges
{}
>>> remove_from_group('Group 1', 'GDS1, SIP4, PDC1', nodes_by_col='COMMON') # remove nodes by common name & all their edges
{}
>>> remove_from_group('Group 1', [76545, 75499, 80299]) # remove nodes by SUID & all their edges
{}
>>> remove_from_group('Group 1', 80299) # remove node by SUID & all its edges
{}
>>> remove_from_group('Group 1') # remove all selected nodes and edges
{}
>>> remove_from_group('Group 1', nodes=[], edges=[78565, 79565]) # remove edges but not any nodes
{}
>>> remove_from_group('Group 1', nodes='unselected', edges='unselected') # remove all unselected nodes and edges
{}
"""
if isinstance(nodes, str) and nodes in {'all', 'selected', 'unselected'}: nodes_by_col = None
node_list = prep_post_query_lists(nodes, nodes_by_col)
if isinstance(edges, str) and edges in {'all', 'selected', 'unselected'}: edges_by_col = None
edge_list = prep_post_query_lists(edges, edges_by_col)
net_suid = networks.get_network_suid(network, base_url=base_url)
res = commands.commands_post(
f'group remove groupName="{group_name}" nodeList="{node_list}" edgeList="{edge_list}" network="SUID:{net_suid}"',
base_url=base_url)
return res | 0f7ae3b161aa1b189be14973ddaa7a7a4fef4bbf | 465 |
def filter_bank_2high(t, Nj, Nj_1, ac=2.0, bc=2.0):
"""
computes the filter bank for control points N_j, Nj_1 given the variable t
:param t: data points on the real line R arranged in numpy array
:param Nj: control point, Nj > Nj_1, integer
:param Nj_1: control point, Nj > Nj_1, integer
:param ac: between (1, 2]. Default 2.0
:param bc: bc < 2. Default 2.0
:return: (ha, hb1, hb2) low-pass filter ha and high-pass filters hb1 and hb2 at t,
all in numpy array format
"""
# a_hat
a_cR = (1 + Nj_1) / ac
a_epsR = Nj_1 - a_cR
a_cL = -a_cR
a_epsL = a_epsR
# b_hat_1
b1_cL = a_cR
b1_epsL = a_epsR
b1_cR = (Nj_1 + Nj) / bc
b1_epsR = Nj - b1_cR
# b_hat_2
b2_cL = b1_cR
b2_epsL = b1_epsR
b2_cR = 2 * Nj
b2_epsR = 1
# supp(ha) = [0, 1 / 4]
ha = hmask(t, a_cL, a_epsL, a_cR, a_epsR)
# supp(hb1) = [1 / 8, 1 / 2]
hb1 = hmask(t, b1_cL, b1_epsL, b1_cR, b1_epsR)
# supp(hb2) = [1 / 4, 1 / 2]
hb2 = hmask(t, b2_cL, b2_epsL, b2_cR, b2_epsR)
return ha, hb1, hb2 | a197cbd99ea4d2ce6fcf9c277cff3e634b539049 | 466 |
def as_public():
"""Return requests session without authentication"""
return BaseUrlSession() | d55cc3616c6910e88d99083cf4e530987c1d8d6c | 468 |
def transform_real_2_sim(real_position):
"""
Transforms a position from the 'real' coordinate system to the 'sim' coordinate system.
:param real_position: dictionary with 'x', 'y' and 'z' keys to floating point values
:return: position in sim space as dictionary with 'x', 'y' and 'z' keys to floating point values
"""
real_pos = np.array([real_position["x"], real_position["y"], 1])
sim_pos_np = np.dot(REAL_2_SIM_TRANSFORM, real_pos)
sim_pos = {"x": sim_pos_np[0], "y": 0.9010001, "z": sim_pos_np[1]}
return sim_pos | 29b83be1f6f4e49f777e085db651e4f31d47c2e0 | 469 |
import torch
def generate_tgt_mask(sz):
"""Generate a square mask for the sequence. The masked positions
are filled with float('-inf'). Unmasked positions are filled with
float(0.0).
This function is a slight modification of the version in the PyTorch
repository.
Parameters
----------
sz : int
The length of the target sequence.
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask | 3fce5eb1cb852ca162fda58407c2cf81c1bdc849 | 470 |
def SceneAddPipeline(builder, pipeline):
"""This method is deprecated. Please switch to AddPipeline."""
return AddPipeline(builder, pipeline) | f220a53ad13923b1f00d208f59e575926e5b7fa2 | 471 |
def SynthesizeUserId(email):
"""Return a synthetic user ID from an email address.
Note that this is not the same user ID found in the production system.
Args:
email: An email address.
Returns:
A string userid derived from the email address.
"""
user_id_digest = _MD5_FUNC(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
return user_id | fb3f81e37decaa941857ac575a5fd034f92a2324 | 472 |
import torch
def compute_jacobian(fn, x0: torch.Tensor, bs: int):
"""
Computes the Jacobian matrix of the given function at x0, using vector-Jacobian products
"""
input_shape = x0.shape
assert len(input_shape) == 3
dim = x0.numel()
eye = torch.eye(dim, dtype=x0.dtype, device=x0.device)
# Forward pass
x0rep = x0.detach()[None].repeat([bs] + [1] * len(input_shape)) # repeat along batch axis
x0rep.requires_grad = True
z0rep = fn(x0rep)
zshape = z0rep.shape[1:]
assert zshape.numel() == dim
# Compute batches of rows of the Jacobian
rows = []
for row_start in trange(0, dim, bs, desc='jacobian', leave=False):
# Pre-pad with extra rows to ensure that batch size stays constant
row_end = min(row_start + bs, dim)
num_rows = row_end - row_start
if num_rows != bs:
assert num_rows < bs
pre_pad_rows = bs - num_rows
else:
pre_pad_rows = 0
assert row_start - pre_pad_rows >= 0
# vector-Jacobian product with rows of an identity matrix
g, = torch.autograd.grad(
z0rep, x0rep,
grad_outputs=eye[row_start - pre_pad_rows:row_end].reshape(row_end - row_start + pre_pad_rows, *zshape),
retain_graph=True
)
assert g.shape == x0rep.shape
rows.append(g.view(g.shape[0], -1)[pre_pad_rows:, :])
jacobian = torch.cat(rows, dim=0)
assert jacobian.shape == (dim, dim)
return jacobian | c184fd03abea440e27bdd27bb1105778e7bde4b6 | 474 |
import math
def pixel_distance(A, B):
"""
In 9th grade I sat in geometry class wondering "when then hell am I
ever going to use this?"...today is that day.
Return the distance between two pixels
"""
(col_A, row_A) = A
(col_B, row_B) = B
return math.sqrt(math.pow(col_B - col_A, 2) + math.pow(row_B - row_A, 2)) | 64853c44400428c8040ae47d1cc2cca17aed0a5f | 475 |
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace):
"""
Word-level n-grams in a string
By default, whitespace is assumed to be a word boundary.
>>> ng.word_ngrams('This is not a test!')
[('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')]
If the sequence's length is less than or equal to n, the n-grams are
simply the sequence itself.
>>> ng.word_ngrams('Test!')
[('Test!')]
Args:
s: a string
Returns:
list: tuples of word-level n-grams
"""
tokens = token_fn(s)
return __ngrams(tokens, n=min(len(tokens), n)) | 8be360785e38b8f427c509d63f5ecba3b6b2c020 | 476 |
def phosites_detail(text):
"""
create detail view output of phosphosites by accession.
:param text: string of phos group ID
:return: template
"""
results = browse_queries.browse_detail(text,'Phosphosite')
table = browse_queries.phos_kin_query(text)
# pass tables, results and style indicator to template for rendering, plus
# variables for title info (related and text of acc no)
return render_template('search_results.html', title="Phosphosite",
style='double', results=results, table=table,
related="Kinases", text=text) | 5f1b67fadda3eb1dfe86e7e996e65197ff1eca3a | 477 |
def convert_to_np_arrays(X):
"""
Converts the input arrays to dense numpy arrays to allow the methods to work properly
"""
try:
X = X.todense()
except:
pass
X = np.array(X)
if len(X.shape) > 2:
X = reduce_shape(X)
return X | 68fdf6fd87df160e96acec5abb8af310886fccc2 | 478 |
def reduce_arr(arr):
"""
Return which elements on which axis are unique
Args:
arr (np.ndarray) : input array which to reduce to unique value
Returns:
reduced array(np.ndarray) : array with reduced data.
data_axis (list) : the axises that have changing data.
"""
ndim = len(arr.shape)
data_axis = []
slice_array = ()
for i in range(ndim):
mn = np.min(arr, axis=i)
mx = np.max(arr, axis=i)
eq = np.all(mn == mx)
if not eq:
data_axis.append(ndim - i - 1)
slice_array += (slice(None),)
else:
slice_array += (0,)
red_ar = arr[slice_array]
return red_ar, data_axis | d207876b820c7d7b30f8af6c302181620b00bf25 | 480 |
import torch
def nll_lorentzian(preds, target, gamma):
"""
Isotropic lorentzian loss function
:param preds: prediction values from NN of size [batch, particles, timesteps, (x,y,v_x,v_y)]
:param target: target data of size [batch, particles, timesteps, (x,y,v_x,v_y)]
:param gamma: The tensor for the FWHM of the distribution of size [batch, particles, timesteps, (x,y,v_x,v_y)]
:return: value of the loss function normalised by (batch * number of atoms)
"""
gammasquared = gamma ** 2
neg_log_p = torch.log(1+((preds - target) ** 2 / (gammasquared)))
neg_log_p += torch.log(gamma)
return neg_log_p.sum() / (target.size(0) * target.size(1)) | 24d6ea2c4b40bc0f8c27eebf0e402261d865836e | 481 |
from pathlib import Path
def get_archive():
"""Ensure that the archive file exists and return its path.
This is a function so the path can be made configurable in the future.
Returns:
:obj:`str`: The full local path to the archive file.
"""
filename = '/config/archive.txt'
archfile = Path(filename)
if not archfile.exists():
archfile.touch()
return filename | 78abc493d7f256ebf53ec2cfeb9ab4f1d42b5c02 | 482 |
from typing import Sequence
from typing import Callable
from typing import List
def _filter_unique_configs(
configs: Sequence[ProblemConfig],
filter_fn: Callable[[ProblemConfig], bool] = lambda _: True,
) -> List[ProblemConfig]: # pytype: disable=annotation-type-mismatch
"""Filters a list of problem_config to their unique occurrences for testing.
Args:
configs: list of ProblemConfig.
filter_fn: optional function to apply only to subset meeting this condition.
Returns:
List of unique occurrences for testing.
"""
observed_configs = set()
new_configs = []
for problem_config in configs:
if filter_fn(problem_config):
if problem_config not in observed_configs:
new_configs.append(problem_config)
observed_configs.add(problem_config)
return new_configs | 98481fa9991726f3ba4253fb132f7f7e3cb2a420 | 483 |
def convert_units(str):
""" Convert some string with binary prefix to int bytes"""
unit = ''.join(ele for ele in str if not ele.isdigit()).strip().lower()
return int(''.join(ele for ele in str if ele.isdigit()))*{
"b": 1,
"B": 1,
"k": 2**10,
"kb": 2**10,
"m": 2**20,
"mb": 2**20,
"g": 2**30,
"gb": 2**30,
"t": 2**40,
"tb": 2**40
}.get(unit, 1) | a9de044090bfd4311a27dbbf373361e7d88a1e06 | 484 |
def match_piecewise(candidates: set, symbol: str, sep: str='::') -> set:
"""
Match the requested symbol reverse piecewise (split on ``::``) against the candidates.
This allows you to under-specify the base namespace so that ``"MyClass"`` can match ``my_namespace::MyClass``
Args:
candidates: set of possible matches for symbol
symbol: the symbol to match against
sep: the separator between identifier elements
Returns:
set of matches
"""
piecewise_list = set()
for item in candidates:
split_symbol = symbol.split(sep)
split_item = item.split(sep)
split_symbol.reverse()
split_item.reverse()
min_length = len(split_symbol)
split_item = split_item[:min_length]
if split_symbol == split_item:
piecewise_list.add(item)
return piecewise_list | 1c6d7240365ef22f753aa4195cfb5e879fc453e0 | 485 |
def is_kube_version_supported(kube_version, min_version=None, max_version=None):
"""Check if the k8s version is supported by the application.
:param kube_version: the running or target k8s version
:param min_version (optional): minimum k8s version supported by the app
:param max_version (optional): maximum k8s version supported by the app
:returns bool: True if k8s version is supported
"""
if ((min_version is not None and LooseVersion(kube_version) < LooseVersion(min_version)) or
(max_version is not None and LooseVersion(kube_version) > LooseVersion(max_version))):
return False
return True | f08a5e5eb9ac9928e2e08ddddd6d30db90e8c868 | 486 |
def chebi(name=None, identifier=None):
"""Build a ChEBI abundance node.
:rtype: Abundance
"""
return Abundance(namespace='CHEBI', name=name, identifier=identifier) | bbe8cf217a545f2818d7957b01d7bbaf0a2cc6d2 | 487 |
def get_group(request):
"""returns all the groups in database
"""
group_id = request.matchdict.get('id', -1)
group = Group.query.filter_by(id=group_id).first()
return [
{
'id': group.id,
'name': group.name,
'thumbnail_full_path':
group.thumbnail.full_path if group.thumbnail else None,
'created_by_id': group.created_by.id,
'created_by_name': group.created_by.name,
'users_count': len(group.users),
}
] | 6d53d8969ecebb882c6626b128a72f24dafa6997 | 488 |
def create_histogram(path_to_image, target_path=''):
"""
creates a histogram of a given image and either shows or saves a plot
Args:
path_to_image: path to the image
target_path: if given, saves a plot, otherwise (if empty) shows the plot
Returns:
the histogram plot
"""
image = cv2.imread(path_to_image)
depth = image.shape[2]
for z in range(depth):
im = image[:, :, z]
mi = im.min()
ma = im.max()
if mi < 0 or ma > 255:
print("range error: min=" + str(mi) + " max=" + ma)
exit()
# V1
# plt.hist(im.ravel(), 256, [0, 256])
# V2
# calculate mean value from RGB channels and flatten to 1D array
vals = im.flatten()
# plot histogram with 255 bins
# b, bins, patches = plt.hist(vals, 255, stacked=True, density=True)
counts, bins = np.histogram(vals, 255)
counts = (counts - min(counts)) / (max(counts) - min(counts))
plt.hist(bins[:-1], bins, weights=counts)
plt.xlim([0, 255])
# plt.show()
#
plt.title(path_to_image)
plt.xlabel('pixel value')
plt.ylabel('count')
if target_path == '':
plt.show()
else:
plt.savefig(target_path + 'histo')
plt.clf()
return plt | cb68b5f8bd55120d4f720020b092af02b727a6ba | 489 |
def task_6_list_all_supplier_countries(cur) -> list:
"""
List all supplier countries
Args:
cur: psycopg cursor
Returns: 29 records
"""
cur.execute("""SELECT country FROM suppliers""")
return cur.fetchall() | a3d8af1eb2948ebc01e408265d20b0055f1a0504 | 490 |
def _energy_to_length_factor(e_unit, l_unit):
"""
Convert the units of Planck's constant and speed of light
:param e_unit:
:type e_unit: str
:param l_unit:
:type l_unit: str
:return: c,h
"""
dest_h_u = ug.parse_units('%s s' % e_unit)
dest_c_u = ug.parse_units('%s/s' % l_unit)
if dest_h_u.dimensionality != _h_unit.dimensionality:
raise ValueError("e_unit should be a valid energy unit")
if dest_c_u.dimensionality != _c_unit.dimensionality:
raise ValueError('l_unit should be a valid length unit')
h = ug.convert(sc.h, _h_unit, dest_h_u)
c = ug.convert(sc.c, _c_unit, dest_c_u)
return c, h | 23e1dbfac7265ff1df4cf62e3609f91d5e327a35 | 491 |
def kev_to_wavelength(kev):
"""Calculate the wavelength from kev"""
lamda = 12.3984 / kev #keV to Angstrom
return lamda | cfb3126e56bc0890dd8cf2caa50a240b380dad56 | 492 |
def _convert_rde_to_1_0_format(rde_data: dict) -> dict:
"""Convert defined entity to RDE 1.0.
:param DefEntity rde_data: Defined entity dictionary
:return: converted defined entity
:rtype: dict
"""
new_rde = common_models.DefEntity(**rde_data)
new_native_entity: AbstractNativeEntity = rde_utils.convert_runtime_rde_to_input_rde_version_format( # noqa: E501
new_rde.entity, rde_constants.RDEVersion.RDE_1_0_0)
new_rde.entity = new_native_entity
new_rde.entityType = common_models.EntityType.NATIVE_ENTITY_TYPE_1_0_0.value.get_id() # noqa: E501
return new_rde.to_dict() | 931bc93c7326a4640892a3876885fcc19430bbe1 | 493 |
def additive_symbols(tokens, base_url):
"""``additive-symbols`` descriptor validation."""
results = []
for part in split_on_comma(tokens):
result = pad(remove_whitespace(part), base_url)
if result is None:
return
if results and results[-1][0] <= result[0]:
return
results.append(result)
return tuple(results) | 346eae19c5d4d936d0ad7f2cdba2191943cc7bca | 494 |
def _index_list(key_or_list, direction=None):
"""Helper to generate a list of (key, direction) pairs.
Takes such a list, or a single key, or a single key and direction.
"""
if direction is not None:
return [(key_or_list, direction)]
else:
if isinstance(key_or_list, string_type):
return [(key_or_list, ASCENDING)]
elif not isinstance(key_or_list, (list, tuple)):
raise TypeError("if no direction is specified, "
"key_or_list must be an instance of list")
return key_or_list | e32ddb70a10d52e1f2595cac9cb99c0381b9a3e4 | 496 |
def CalculateOSNames(os_name, os_variants):
"""Calculates all the names an OS can be called, according to its variants.
@type os_name: string
@param os_name: base name of the os
@type os_variants: list or None
@param os_variants: list of supported variants
@rtype: list
@return: list of valid names
"""
if os_variants:
return ["%s+%s" % (os_name, v) for v in os_variants]
else:
return [os_name] | 5689ed7da55cec929045e95344c60e7a06af711d | 498 |
def c4x(c: Circuit, c0: int, c1: int, c2: int, c3: int, t: int) -> Circuit:
"""A macro of 4-controlled X gate"""
return c.h[t].c4z(c0, c1, c2, c3, t).h[t] | 89b5d790a70448a1d46452554ab234e113e63c59 | 500 |
def pad(data, pad_id):
""" Pad all lists in data to the same length. """
width = max(len(d) for d in data)
return [d + [pad_id] * (width - len(d)) for d in data] | a0951f4332879600d25c061cf1c553126d6df8d2 | 501 |
from netharn import util
def draw_boxes_on_image(img, boxes, color='blue', thickness=1,
box_format=None):
"""
Example:
>>> from netharn import util
>>> img = np.zeros((10, 10, 3), dtype=np.uint8)
>>> color = 'blue'
>>> thickness = 1
>>> boxes = util.Boxes([[1, 1, 8, 8]], 'tlbr')
>>> img2 = draw_boxes_on_image(img, boxes, color, thickness)
>>> # xdoc: +REQUIRES(--show)
>>> from netharn.util import mplutil
>>> mplutil.autompl() # xdoc: +SKIP
>>> mplutil.figure(doclf=True, fnum=1)
>>> mplutil.imshow(img2)
"""
if not isinstance(boxes, util.Boxes):
if box_format is None:
raise ValueError('specify box_format')
boxes = util.Boxes(boxes, box_format)
color = tuple(util.Color(color).as255('bgr'))
tlbr = boxes.to_tlbr().data
img2 = img.copy()
for x1, y1, x2, y2 in tlbr:
# pt1 = (int(round(x1)), int(round(y1)))
# pt2 = (int(round(x2)), int(round(y2)))
pt1 = (int(x1), int(y1))
pt2 = (int(x2), int(y2))
img2 = cv2.rectangle(img2, pt1, pt2, color, thickness=thickness)
return img2 | 3c4a3b547d39bac940ea9f6999a98f2db62f938b | 502 |
def _select_random_features(feature_list, amount):
"""Selects a given amount of random features from the feature list"""
set_size = len(feature_list) -1
random_features = []
for i in range(amount):
while(True):
random_feature = feature_list[randint(0, set_size)]
if(random_feature in random_features):
continue
else:
random_features.append(random_feature)
break
return random_features | e281bfa75e153aa195119f84777f41db9d5e806c | 503 |
def matrixop_inp_matr():
"""
Функция возвращает матрицу, введённую пользователем с клавиатуры.
Returns
-------
a : [[float, float, ...],
[float, float, ...],
...]
Матрица, введенная пользователем
"""
while True:
try:
m = int(input('Сколько будет строк в матрице? '))
except:
print('Вы ввели не число')
else:
if m > 0:
break
else:
print('Вы ввели не натуральное число')
while True:
try:
n = int(input('Сколько будет столбцов в матрице? '))
except:
print('Вы ввели не число')
else:
if n > 0:
break
else:
print('Вы ввели не натуральное число')
print("Введите элементы матрицы (заполнение идёт по строкам)")
a = []
for i in range(m):
a.append([])
for j in range(n):
while True:
try:
print(f'Введите элемент a[{i+1}][{j+1}]')
elem = eval(input())
except:
print('Вы ввели не число')
else:
break
a[i].append(elem)
return a | c373af0c7493ff32f919d903644b2031cc51162c | 504 |
def dropannotation(annotation_list):
"""
Drop out the annotation contained in annotation_list
"""
target = ""
for c in annotation_list:
if not c == "#":
target += c
else:
return target
return target | 9f4a695eaf80f79dce943f2f91926d9c823483b6 | 506 |
def do_associate_latest_edit(parser, token):
"""
AssociateLatestEdit
"""
try:
tag, node = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires one argument" % token.contents.split()[0]
return AssociateLatestEdit(node) | 75cf36f1cccd2191636f3cb603503c6655ae0c67 | 507 |
def open_monitoring_db(dbhost, dbuser, dbpass, database):
"""
Open MySQL monitoring DB
"""
try:
conn = MySQLdb.connect(host=dbhost, user=dbuser,
passwd=dbpass, db=database)
except MySQLdb.Error, err:
print "Error %d: %s" % (err.args[0], err.args[1])
sys.exit(1)
return conn | db33430d20c7c72c7428d85f161d1c186404dc05 | 508 |
def matdiff(matrix1,matrix2,figsize=None,cmap=None):
"""
display the difference between two real matrices, alongside this plot this difference
on a log- colour scale (if diff!=0)
"""
if not figsize:
figsize = defaults['figsize']
if not cmap:
cmap = defaults['cmap']
_matdiff = matrix1-matrix2
f, (ax1, ax2) = plt.subplots(1,2,figsize=(2*figsize[0],figsize[1]))
imreal = ax1.imshow(_matdiff,interpolation='nearest',cmap=cmap)
f.colorbar(imreal,ax=ax1)
# trying to plot the log-scale diff will fail if the difference is zero everywhere
if not np.all(_matdiff==np.zeros(_matdiff.shape)):
imimag = ax2.imshow(np.log10(np.abs(_matdiff)),interpolation='nearest',cmap=cmap)
f.colorbar(imimag,ax=ax2)
return f | fb11354f7388e461ac49bcac942a9b6a2b5528d4 | 509 |
def _tokens_by_class_of(tokens):
"""Generates lookup table of tokens in each class."""
out = defaultdict(set)
for token, token_classes in tokens.items():
for token_class in token_classes:
out[token_class].add(token)
return out | c335582785e4c8a5b82232849ccee579f5ab068f | 510 |
def load_mnist_dataset(shape=(-1, 784), path='data'):
"""Load the original mnist.
Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively.
Parameters
----------
shape : tuple
The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).
path : str
The path that the data is downloaded to.
Returns
-------
X_train, y_train, X_val, y_val, X_test, y_test: tuple
Return splitted training/validation/test set respectively.
Examples
--------
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets')
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
"""
return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/') | 856a758864ef85b4a5bea742dd90a49e997ad9b7 | 511 |
def EntryToSlaveName(entry):
"""Produces slave name from the slaves config dict."""
name = entry.get('slavename') or entry.get('hostname')
if 'subdir' in entry:
return '%s#%s' % (name, entry['subdir'])
return name | 258e68c683592c21ea8111f21ba3ab648ddb8c57 | 513 |
def is_symmetric_re(root: TreeNode) -> bool:
"""Check if a binary tree is a mirror of itself (symmetric around its center)."""
if not root:
return False
def is_mirror(t1, t2):
if not t1 and not t2:
return True
if not t1 or not t2:
return False
return t1.val == t2.val and is_mirror(t1.left, t2.right) and is_mirror(t1.right, t2.left)
return is_mirror(root, root) | b2d0450a881e0a1748575baa8d7c6ae1224fb3c0 | 515 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up paperless from a config entry."""
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | 8793312f379c510e3420e785eba6ce4db3f098c7 | 516 |
def azimuthal_average(image, center=None, stddev=True, binsize=0.5, interpnan=False):
"""
Modified based on https://github.com/keflavich/image_tools/blob/master/image_tools/radialprofile.py
Calculate the azimuthally averaged radial profile.
Parameters:
imgae (numpy ndarray): 2-D image
center (list): [x, y] pixel coordinates. If None, use image center.
Note that x is horizontal and y is vertical, y, x = image.shape.
stdev (bool): if True, the stdev of profile will also be returned.
binsize (float): size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large.
interpnan (bool): Interpolate over NAN values, i.e. bins where there is no data?
Returns:
If `stdev == True`, it will return [radius, profile, stdev];
else, it will return [radius, profile].
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0])
r = np.hypot(x - center[0], y - center[1])
# The 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize) + 1)
maxbin = nbins * binsize
bins = np.linspace(0, maxbin, nbins + 1)
# We're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:] + bins[:-1]) / 2.0
# There are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.histogram(r, bins)[0] # nr is how many pixels are within each bin
# Radial profile itself
profile = np.histogram(r, bins, weights=image)[0] / nr
if interpnan:
profile = np.interp(bin_centers, bin_centers[~np.isnan(profile)],
profile[~np.isnan(profile)])
if stddev:
# Find out which radial bin each point in the map belongs to
# recall that bins are from 1 to nbins
whichbin = np.digitize(r.ravel(), bins)
profile_std = np.array([image.ravel()[whichbin == b].std() for b in range(1, nbins + 1)])
profile_std /= np.sqrt(nr) # 均值的偏差
return [bin_centers, profile, profile_std]
else:
return [bin_centers, profile] | 3ebadf5fa93cc93e6a5b14327392a2fdecb5d266 | 517 |
import re
def find_assign(data, varname):
"""Finds a substring that looks like an assignment.
:param data: Source to search in.
:param varname: Name of the variable for which an assignment should be
found.
"""
ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname))
if len(ASSIGN_RE.findall(data)) > 1:
raise PluginError('Found multiple {}-strings.'.format(varname))
if len(ASSIGN_RE.findall(data)) < 1:
raise PluginError('No version assignment ("{}") found.'
.format(varname))
return ASSIGN_RE.search(data).group(2) | 99e1d1436307dd278fbef8b7e52c4d2eedd6d657 | 518 |
import requests
def remove(token: str, server: str="http://localhost:8080/remove", params: dict=None) -> int:
"""
Removes the data associated with the token.
:param token: the token to download the data for
:type token: str
:param server: the URL of the server to upload to
:type server: str
:param params: the additional parameters to send to the server, eg login information (user/password)
:type params: dict
:return: the status code, None if failed to download
:rtype: int
"""
if params is None:
files = {}
else:
files = params.copy()
files['token'] = token
r = requests.post(server, files=files)
return r.status_code | a74f2c5f84ae064a909df717917f4589b59eacb5 | 519 |
import requests
def get_pending_surveys_batch_number(batch_no):
"""
Gets batch number for the shared survey
:param batch_no: Shared survey batch number
:type batch_no: str
:raises ApiError: Raised when party returns api error
:return: list share surveys
"""
bound_logger = logger.bind(batch_no=batch_no)
bound_logger.info("Attempting to retrieve share surveys by batch number")
url = f"{app.config['PARTY_URL']}/party-api/v1/pending-surveys/{batch_no}"
response = requests.get(url, auth=app.config["BASIC_AUTH"])
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
bound_logger.error("Failed to retrieve share surveys by batch number")
raise ApiError(logger, response)
bound_logger.info("Successfully retrieved share surveys by batch number")
return response | ee31c28c393e29b6cd628aefc38d2ca948c7cdaf | 520 |
def make_sign_initializer(random_sign_init):
"""Random sign intitializer for HyperBatchEnsemble layers."""
if random_sign_init > 0:
return ed.initializers.RandomSign(random_sign_init)
else:
return tf.keras.initializers.RandomNormal(
mean=1.0, stddev=-random_sign_init) | e8ea2653ef9c7c5ba447921d7def990e29a7c9b2 | 522 |
def _parallel_predict_proba(ensemble, X, idx, results):
"""
Compute predictions of SCM estimators
"""
for k in idx:
res = ensemble.estimators[k].predict(X[:, ensemble.estim_features[k]])
results = results + res
return results | b0a2d5c59318506202c9331597ab2a11eacb7a32 | 523 |
def compute_FP_TP_Probs(Ycorr, Xcorr, Probs, is_tumor, evaluation_mask, Isolated_Tumor_Cells, level):
"""Generates true positive and false positive stats for the analyzed image
Args:
Probs: list of the Probabilities of the detected lesions
Xcorr: list of X-coordinates of the lesions
Ycorr: list of Y-coordinates of the lesions
is_tumor: A boolean variable which is one when the case cotains tumor
evaluation_mask: The evaluation mask
Isolated_Tumor_Cells: list of labels containing Isolated Tumor Cells
level: The level at which the evaluation mask was made
Returns:
FP_probs: A list containing the probabilities of the false positive detections
TP_probs: A list containing the probabilities of the True positive detections
NumberOfTumors: Number of Tumors in the image (excluding Isolate Tumor Cells)
detection_summary: A python dictionary object with keys that are the labels
of the lesions that should be detected (non-ITC tumors) and values
that contain detection details [confidence score, X-coordinate, Y-coordinate].
Lesions that are missed by the algorithm have an empty value.
FP_summary: A python dictionary object with keys that represent the
false positive finding number and values that contain detection
details [confidence score, X-coordinate, Y-coordinate].
"""
max_label = np.amax(evaluation_mask)
FP_probs = []
TP_probs = np.zeros((max_label,), dtype=np.float32)
detection_summary = {}
FP_summary = {}
for i in range(1, max_label + 1):
if i not in Isolated_Tumor_Cells:
label = 'Label ' + str(i)
detection_summary[label] = []
FP_counter = 0
if (is_tumor):
for i in range(0, len(Xcorr)):
# note: the x, y coordinates are switched, I make the x, y to be int, so that the array of evaluation_mask
#HittedLabel = evaluation_mask[int(Xcorr[i] / pow(2, level)), int(Ycorr[i] / pow(2, level))]
HittedLabel = evaluation_mask[int(
Ycorr[i]/pow(2, level)), int(Xcorr[i]/pow(2, level))]
print(HittedLabel)
# HittedLabel = evaluation_mask[int(Ycorr[i]/pow(2, level)), int(Xcorr[i]/pow(2, level))]
# HittedLabel = evaluation_mask[Ycorr[i]/pow(2, level), Xcorr[i]/pow(2, level)]
if HittedLabel == 0:
FP_probs.append(Probs[i])
key = 'FP ' + str(FP_counter)
FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]]
FP_counter += 1
elif HittedLabel not in Isolated_Tumor_Cells:
if (Probs[i] > TP_probs[HittedLabel - 1]):
label = 'Label ' + str(HittedLabel)
detection_summary[label] = [Probs[i], Xcorr[i], Ycorr[i]]
TP_probs[HittedLabel - 1] = Probs[i]
else:
for i in range(0, len(Xcorr)):
FP_probs.append(Probs[i])
key = 'FP ' + str(FP_counter)
FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]]
FP_counter += 1
print(FP_counter)
num_of_tumors = max_label - len(Isolated_Tumor_Cells)
# just for diagnose
print('number of isolated tumor cells =', len(Isolated_Tumor_Cells))
return FP_probs, TP_probs, num_of_tumors, detection_summary, FP_summary | 434abefeba56201d20799b1f00783bc77dcbf2c0 | 524 |
def make_sentences(text, src):
"""
Builds a list of dictionaries, one for each sentence resulting from
the sentence parser. The dictionary schema is
{"src": src, "label": 0, "sentence": sent}
Substitutions are made for the identified tokens.
Args:
text (str): text to process
src (str): identifier (file name) to include in the output
Returns:
List[Dict]
"""
no_sec = True
text = text.replace(USC_DOT, USC)
text = text.replace(PL, PL_SPACE)
text = text.replace(EO, EO_SPACE)
sents = [scrubber(sent, no_sec=no_sec) for sent in sent_tokenize(text)]
sent_list = list()
for sent in sents:
if not sent:
continue
sent_list.append({"src": src, "label": 0, "sentence": sent})
return sent_list | 5da57a55f76a3d4d29f1d0ed681b8597e958b9d0 | 525 |
def read_test_case(file_path):
"""
reads one test case from file.
returns contents of test case
Parameters
----------
file_path : str
the path of the test case file to read.
Returns
-------
list
a list of contents of the test case.
"""
file = open(file_path, "r")
number = int(file.readline().strip())
case = list()
for i in range(number):
case.append(file.readline().strip())
file.close()
return case | 6a87ff979d0b1ccf838ebef56401a48760711541 | 526 |
import torch
def accuracy4batch(model, testloader, criterion):
"""save a model checkpoint
INPUT:
model: pytorch nn model.
testloader: DataLoader. test data set
criterion: criterion. loss criterion
device: torch.device. device on which model/data is based
OUTPUT:
accuracy: float in [0:1]. percenct proportion of correct classifications in testloader
test_loss: float. absolute error
"""
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(model.device), labels.to(model.device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
accuracy = accuracy/len(testloader)
return accuracy, test_loss | 2005984b94f17bf601034953bbea3dca6542143d | 528 |
def pick_an_experiment(i):
"""
Input: {
(repo_uoa) - experiment repository name (defaults to hackathon_local_repo, but can be overridden by '*')
(extra_tags) - extra tags to filter
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
repo_uoa = i.get('repo_uoa', hackathon_local_repo)
extra_tags = i.get('extra_tags')
list_exp_adict = { 'action': 'list_experiments',
'module_uoa': work['self_module_uoa'],
'repo_uoa': repo_uoa,
'extra_tags': extra_tags,
}
r=ck.access( list_exp_adict )
if r['return']>0: return r
if len(r['lst'])==0:
return {'return':1, 'error':'No experiments to choose from - please relax your filters'}
all_experiment_names = [ '{repo_uoa}:{module_uoa}:{data_uoa}'.format(**entry_dict) for entry_dict in r['lst']]
number_of_experiments = len(all_experiment_names)
select_adict = {'action': 'select_string',
'module_uoa': 'misc',
'options': all_experiment_names,
'default': str(number_of_experiments-1),
'question': 'Please select the experiment entry',
}
r=ck.access( select_adict )
if r['return']>0:
return r
else:
cid = r['selected_value']
return {'return':0, 'cid': cid} | 97671967ab153173280a142143fb4b07892c92fa | 530 |
import uuid
import pathlib
def run(args, image: str) -> str:
"""
Run docker image and mount user-provided folder with C++ files.
Parameters
----------
args : dict-like
User provided arguments parsed by argparse.ArgumentParser instance.
image : str
Name of image from which container is run
Returns
-------
str:
Name of created container. Consist of torchlambda prefix and random string
"""
def _add_components(args):
return (
'"' + ";".join(args.aws_components) + '"'
if args.aws_components
else '"core"'
)
def _compilation(args):
return '"' + args.compilation + '"' if args.compilation else ""
container_name = "torchlambda-" + str(uuid.uuid4())
source_directory = pathlib.Path(args.source).absolute()
if source_directory.is_dir():
command = "docker {} run {} -v {}:/usr/local/user_code --name {} {} {} ".format(
*general.parse_none(
args.docker,
args.docker_run,
source_directory,
container_name,
image,
_add_components(args),
)
)
command += _compilation(args)
general.run(
command,
operation="building inference AWS Lambda package.",
silent=args.silent,
)
return container_name
print("torchlambda:: Provided source files are not directory, exiting.")
exit(1) | eba0ab0e93543410ae4a2375ec80a9015168d303 | 531 |
def get_top_design_list(oProject):
"""
Returns a list of the names of the top-level designs.
Parameters
----------
oProject : pywin32 COMObject
The HFSS project in which the operation will be performed.
designname : str
Name of the design to insert.
Returns
-------
design_list : list of str
The top-level design list.
"""
design_list = list(oProject.GetTopDesignList())
return map(str,design_list) | 6610cd68a90e20fd916a2ec13b54f37a75c31050 | 532 |
from typing import Union
from typing import Dict
from typing import Any
def chi01(param_name: Union[str, None], yval: float, **kwargs) -> Dict[str, Any]:
"""Plot defaults for sweep_plotting.chi01"""
kwargs["xlabel"] = kwargs.get("xlabel") or recast_name(param_name)
kwargs["ylabel"] = kwargs.get("ylabel") or r"$\chi_{{01}}$ [{}]".format(
units.get_units()
)
kwargs["title"] = kwargs.get("title") or r"$\chi_{{01}}=${:.4f} {}".format(
yval, units.get_units()
)
return kwargs | 58e5d09152062a9307526bd953ff91832ef80321 | 533 |
def cstring(*args, **kwargs):
"""Return a colored string.
Parameters
----------
args : iterable of str
bold : bool
color : str, {'HEADER', 'LIGHTBLUE', 'LIGHTGREEN', 'WARNING', 'FAIL',
'ENDC', 'BOLD', 'UNDERLINE' 'BLACK', 'RED', 'GREEN',
'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE'}
Terminal color to use.
"""
args, kwargs = _colorize(*args, **kwargs)
cstr = " ".join(args)
return cstr | 69bc81f0e9267743297bec25b2fccc8f2da2c89d | 534 |
from . import routes # Import routes
def create_app():
"""Construct the core application."""
app = Flask(__name__, instance_relative_config=False)
app.config.from_object('config.Config')
db.init_app(app)
admin.init_app(app)
basic_auth.init_app(app)
with app.app_context():
db.create_all() # Create sql tables for our data models
admin.add_view(ArticleView(Articles, db.session))
return app | 28bfb75626a5be05aef21404956f6ef7adf8f80a | 535 |
def bytes_index(x: bytes, sub: bytes, start: int, end: int) -> int:
"""Where is the first location of a subsequence within a given slice of a bytes object?
Compiling bytes.index compiles this function, when sub is a bytes object.
This function is only intended to be executed in this compiled form.
Args:
x: The bytes object in which to search.
sub: The subsequence to look for.
start: Beginning of slice of x. Interpreted as slice notation.
end: End of slice of x. Interpreted as slice notation.
Returns:
Lowest index of match within slice of x.
Raises:
ValueError: If sub is not found.
"""
ret = bytes_find(x, sub, start, end)
if ret == -1:
raise ValueError("subsection not found")
return ret | 3a78e029a96d27fdf5b5cdea397b21f57ca939d9 | 536 |
def uniform(minimum, maximum, shape=[]):
"""uniform(minimum, maximum, shape=[]) returns array of given shape of random reals
in given range"""
if shape == []:
shape = None
return mt.uniform(minimum, maximum, shape) | 882cd915cb7dfec0e1b6857f99ecefe876ae21b1 | 538 |
def unquote_to_bytes(urlencoded_string):
"""Replace %xx escapes by their single-character equivalent,
using the “iso-8859-1” encoding to decode all 8-bit values.
"""
return bytes(
unquote(urlencoded_string, encoding='iso-8859-1'),
encoding='iso-8859-1'
) | 60216d170381e356520c283f308add08754c987d | 539 |
def qualifiedName(item):
"""Return the full name of an item, including any projects that it's in.
If the item does not have a name, return ``None``.
XXX: Doesn't include folders.
"""
names = []
# Note: assumes that the presence of a single null name in the parent tree
# means that the item is not properly named.
for i in iterParents(item):
name = i.name()
if name is None:
return None
names.append(name)
return " / ".join(reversed(names)) | 2a1cdc9e15897c104f63793041ed2d4fe91e383d | 540 |
def assign_columns_of_sector_levels(df_load):
"""
Add additional column capturing the sector level in the two columns
:param df_load: df with at least on sector column
:param ambiguous_sector_assignment: if there are sectors that can be
assigned to multiple sector lengths (e.g., for government or household
sectors), option to specify which sector assignment to keep.
:return: df with new column for sector length
"""
df = replace_NoneType_with_empty_cells(df_load)
# load cw with column of sector levels
cw = load_sector_length_cw_melt()
# merge df assigning sector lengths
for s in ['Produced', 'Consumed']:
df = df.merge(cw, how='left', left_on=f'Sector{s}By',
right_on='Sector').drop(columns=['Sector']).rename(
columns={'SectorLength': f'Sector{s}ByLength'})
df[f'Sector{s}ByLength'] = df[f'Sector{s}ByLength'].fillna(0)
# There are cases where non-traditional sectors (non naics) have
# multiple naics assignments. If there is a non-zero value in the other
# sector length column, keep that row because sector lengths must always
# match.
# subset df into two dfs, one where one sector column length has a zero
# value and the second where both sector length columns have non-zero
# values
df1 = df[(df['SectorProducedByLength'] == 0) |
(df['SectorConsumedByLength'] == 0)]
df2 = df[(df['SectorProducedByLength'] != 0) &
(df['SectorConsumedByLength'] != 0)]
# only keep rows where the values are equal
df2e = df2[df2['SectorProducedByLength'] == df2['SectorConsumedByLength']]
# concat dfs
dfc = pd.concat([df1, df2e], ignore_index=True)
# check for duplicates. Rows might be duplicated if a sector is the same
# for multiple sector lengths
duplicate_cols = [e for e in dfc.columns if e not in [
'SectorProducedByLength', 'SectorConsumedByLength']]
duplicate_df = dfc[dfc.duplicated(subset=duplicate_cols,
keep=False)].reset_index(drop=True)
if len(duplicate_df) > 0:
log.warning('There are duplicate rows caused by ambiguous sectors.')
dfc = dfc.sort_values(['SectorProducedByLength',
'SectorConsumedByLength']).reset_index(drop=True)
return dfc | 4dd60267702f21e103cab18293975a5f62c934d2 | 541 |
def add_matrices(matrix_a, matrix_b):
"""Add two n x n matrices
"""
return [[x + y for x, y in zip(matrix_a[i], matrix_b[i])]
for i in range(len(matrix_a))] | a9f6a857892872fde584b6884e59a8b624220061 | 542 |
import scipy
def no_pretrain_inner_speech(subject):
"""This function aims at training a model without pretraining by training
only on the inner speech condition of a sigle subject
:return: metric history for every of the n k-folds
:rtype: list of dictonaries
"""
###### DATA
data, events = dp.load_data(subjects=[subject], filter_action=True)
# shuffle data and labels
data, events = sklearn.utils.shuffle(data, events)
# save memory by converting from 64bit to 32bit floats
data = data.astype(np.float32)
# filter out only the inner speech condition
data, events = dp.choose_condition(data, events, 'inner speech')
# select the column containing directions (up, down, left, right)
events = events[:, 1]
# one-hot event data
events = np_utils.to_categorical(events, 4)
# zscore normalize the data
data = scipy.stats.zscore(data, axis=2)
# reshape
data = data.reshape(*data.shape, 1)
print("Data Prepared.")
###### MODEL
gpus = tf.config.list_logical_devices('GPU')
mirrored_strategy = tf.distribute.MirroredStrategy(gpus)
with mirrored_strategy.scope():
# create EEGNet (source: https://github.com/vlawhern/arl-eegmodels)
model = EEGNet(nb_classes=4, Chans=data.shape[1],
Samples=data.shape[2], dropoutRate=DROPOUT,
kernLength=KERNEL_LENGTH, F1=8, D=2, F2=16,
dropoutType='Dropout')
# adam optimizer
optimizer = tf.keras.optimizers.Adam()
# compile model
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.build(input_shape=(BATCH_SIZE, *data.shape[1:]))
path = './models/saved_models/no_pretrain_inner_speech'
model.save(path)
del model
###### KFOLD TRAINING
history_accumulator = []
for _ in range(N_CHECKS):
history = kfold_training(data, events, path, BATCH_SIZE, EPOCHS)
history_accumulator += history
print(history_accumulator)
print("Subject", subject, " Mean Accuracy:", np.mean([h['val_accuracy'][-1] for h in history_accumulator]))
return history_accumulator | daa08a8ea88838b8e66c3fc23c2b6997bfdff490 | 543 |
from typing import Any
def build_put_big_decimal_negative_decimal_request(**kwargs: Any) -> HttpRequest:
"""Put big decimal value -99999999.99.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: The default value is -99999999.99. Note that overriding this default value may
result in unsupported behavior.
:paramtype json: float
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
json = kwargs.pop("json", -99999999.99) # type: float
accept = "application/json"
# Construct URL
url = "/number/big/decimal/-99999999.99"
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, **kwargs) | e41800c8bee6a9c4874b3625ed374c9514c81fe7 | 544 |
def make_subplots(
rows=1,
cols=1,
shared_xaxes=False,
shared_yaxes=False,
start_cell="top-left",
print_grid=False,
horizontal_spacing=None,
vertical_spacing=None,
subplot_titles=None,
column_widths=None,
row_heights=None,
specs=None,
insets=None,
column_titles=None,
row_titles=None,
x_title=None,
y_title=None,
figure=None,
**kwargs,
) -> go.Figure:
"""
Return an instance of plotly.graph_objs.Figure with predefined subplots
configured in 'layout'.
Parameters
----------
rows: int (default 1)
Number of rows in the subplot grid. Must be greater than zero.
cols: int (default 1)
Number of columns in the subplot grid. Must be greater than zero.
shared_xaxes: boolean or str (default False)
Assign shared (linked) x-axes for 2D cartesian subplots
- True or 'columns': Share axes among subplots in the same column
- 'rows': Share axes among subplots in the same row
- 'all': Share axes across all subplots in the grid.
shared_yaxes: boolean or str (default False)
Assign shared (linked) y-axes for 2D cartesian subplots
- 'columns': Share axes among subplots in the same column
- True or 'rows': Share axes among subplots in the same row
- 'all': Share axes across all subplots in the grid.
start_cell: 'bottom-left' or 'top-left' (default 'top-left')
Choose the starting cell in the subplot grid used to set the
domains_grid of the subplots.
- 'top-left': Subplots are numbered with (1, 1) in the top
left corner
- 'bottom-left': Subplots are numbererd with (1, 1) in the bottom
left corner
print_grid: boolean (default True):
If True, prints a string representation of the plot grid. Grid may
also be printed using the `Figure.print_grid()` method on the
resulting figure.
horizontal_spacing: float (default 0.2 / cols)
Space between subplot columns in normalized plot coordinates. Must be
a float between 0 and 1.
Applies to all columns (use 'specs' subplot-dependents spacing)
vertical_spacing: float (default 0.3 / rows)
Space between subplot rows in normalized plot coordinates. Must be
a float between 0 and 1.
Applies to all rows (use 'specs' subplot-dependents spacing)
subplot_titles: list of str or None (default None)
Title of each subplot as a list in row-major ordering.
Empty strings ("") can be included in the list if no subplot title
is desired in that space so that the titles are properly indexed.
specs: list of lists of dict or None (default None)
Per subplot specifications of subplot type, row/column spanning, and
spacing.
ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
- Indices of the outer list correspond to subplot grid rows
starting from the top, if start_cell='top-left',
or bottom, if start_cell='bottom-left'.
The number of rows in 'specs' must be equal to 'rows'.
- Indices of the inner lists correspond to subplot grid columns
starting from the left. The number of columns in 'specs'
must be equal to 'cols'.
- Each item in the 'specs' list corresponds to one subplot
in a subplot grid. (N.B. The subplot grid has exactly 'rows'
times 'cols' cells.)
- Use None for a blank a subplot cell (or to move past a col/row span).
- Note that specs[0][0] has the specs of the 'start_cell' subplot.
- Each item in 'specs' is a dictionary.
The available keys are:
* type (string, default 'xy'): Subplot type. One of
- 'xy': 2D Cartesian subplot type for scatter, bar, etc.
- 'scene': 3D Cartesian subplot for scatter3d, cone, etc.
- 'polar': Polar subplot for scatterpolar, barpolar, etc.
- 'ternary': Ternary subplot for scatterternary
- 'mapbox': Mapbox subplot for scattermapbox
- 'domain': Subplot type for traces that are individually
positioned. pie, parcoords, parcats, etc.
- trace type: A trace type which will be used to determine
the appropriate subplot type for that trace
* secondary_y (bool, default False): If True, create a secondary
y-axis positioned on the right side of the subplot. Only valid
if type='xy'.
* colspan (int, default 1): number of subplot columns
for this subplot to span.
* rowspan (int, default 1): number of subplot rows
for this subplot to span.
* l (float, default 0.0): padding left of cell
* r (float, default 0.0): padding right of cell
* t (float, default 0.0): padding right of cell
* b (float, default 0.0): padding bottom of cell
- Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust
the spacing in between the subplots.
insets: list of dict or None (default None):
Inset specifications. Insets are subplots that overlay grid subplots
- Each item in 'insets' is a dictionary.
The available keys are:
* cell (tuple, default=(1,1)): (row, col) index of the
subplot cell to overlay inset axes onto.
* type (string, default 'xy'): Subplot type
* l (float, default=0.0): padding left of inset
in fraction of cell width
* w (float or 'to_end', default='to_end') inset width
in fraction of cell width ('to_end': to cell right edge)
* b (float, default=0.0): padding bottom of inset
in fraction of cell height
* h (float or 'to_end', default='to_end') inset height
in fraction of cell height ('to_end': to cell top edge)
column_widths: list of numbers or None (default None)
list of length `cols` of the relative widths of each column of suplots.
Values are normalized internally and used to distribute overall width
of the figure (excluding padding) among the columns.
For backward compatibility, may also be specified using the
`column_width` keyword argument.
row_heights: list of numbers or None (default None)
list of length `rows` of the relative heights of each row of subplots.
If start_cell='top-left' then row heights are applied top to bottom.
Otherwise, if start_cell='bottom-left' then row heights are applied
bottom to top.
For backward compatibility, may also be specified using the
`row_width` kwarg. If specified as `row_width`, then the width values
are applied from bottom to top regardless of the value of start_cell.
This matches the legacy behavior of the `row_width` argument.
column_titles: list of str or None (default None)
list of length `cols` of titles to place above the top subplot in
each column.
row_titles: list of str or None (default None)
list of length `rows` of titles to place on the right side of each
row of subplots. If start_cell='top-left' then row titles are
applied top to bottom. Otherwise, if start_cell='bottom-left' then
row titles are applied bottom to top.
x_title: str or None (default None)
Title to place below the bottom row of subplots,
centered horizontally
y_title: str or None (default None)
Title to place to the left of the left column of subplots,
centered vertically
figure: go.Figure or None (default None)
If None, a new go.Figure instance will be created and its axes will be
populated with those corresponding to the requested subplot geometry and
this new figure will be returned.
If a go.Figure instance, the axes will be added to the
layout of this figure and this figure will be returned. If the figure
already contains axes, they will be overwritten.
Examples
--------
Example 1:
>>> # Stack two subplots vertically, and add a scatter trace to each
>>> from plotly.subplots import make_subplots
>>> import plotly.graph_objects as go
>>> fig = make_subplots(rows=2)
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
[ (2,1) xaxis2,yaxis2 ]
>>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
or see Figure.append_trace
Example 2:
>>> # Stack a scatter plot
>>> fig = make_subplots(rows=2, shared_xaxes=True)
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
[ (2,1) xaxis2,yaxis2 ]
>>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 3:
>>> # irregular subplot layout (more examples below under 'specs')
>>> fig = make_subplots(rows=2, cols=2,
... specs=[[{}, {}],
... [{'colspan': 2}, None]])
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ] [ (1,2) xaxis2,yaxis2 ]
[ (2,1) xaxis3,yaxis3 - ]
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 4:
>>> # insets
>>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
This is the format of your plot grid:
[ (1,1) xaxis1,yaxis1 ]
With insets:
[ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]
>>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS
Figure(...)
Example 5:
>>> # include subplot titles
>>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
>>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
Example 6:
Subplot with mixed subplot types
>>> fig = make_subplots(rows=2, cols=2,
... specs=[[{'type': 'xy'}, {'type': 'polar'}],
... [{'type': 'scene'}, {'type': 'ternary'}]])
>>> fig.add_traces(
... [go.Scatter(y=[2, 3, 1]),
... go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),
... go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),
... go.Scatterternary(a=[0.1, 0.2, 0.1],
... b=[0.2, 0.3, 0.1],
... c=[0.7, 0.5, 0.8])],
... rows=[1, 1, 2, 2],
... cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS
Figure(...)
"""
return _sub.make_subplots(
rows,
cols,
shared_xaxes,
shared_yaxes,
start_cell,
print_grid,
horizontal_spacing,
vertical_spacing,
subplot_titles,
column_widths,
row_heights,
specs,
insets,
column_titles,
row_titles,
x_title,
y_title,
figure,
**kwargs,
) | 1119aba8e9b9f35b18959f765e4528e2d065a5b8 | 545 |
from re import T
def op_table(name):
"""Get the symbol `name' as an int8_t[]."""
return gdb.parse_and_eval("&'" + name + "'").cast(T('int8_t').pointer()) | 8266128cb1bf59b9d71d7dabb7d002ff22e41192 | 546 |
from typing import Dict
from typing import Any
def nf_masks_to_neurof_dict(binary_masks: np.ndarray, dataset_name: str) -> Dict[str, Any]:
"""
Take as input a tensor of binary mask and produces dict format for neurofinder
Args:
binary_masks: 3d ndarray (components x dimension 1 x dimension 2)
dataset_filename: name of the dataset
Returns:
dset: dict
dataset in neurofinder format to be saved in json
"""
regions = []
for m in binary_masks:
coords = [[int(x), int(y)] for x, y in zip(*np.where(m))]
regions.append({"coordinates": coords})
dset = {"regions": regions, "dataset": dataset_name}
return dset | daee76afc56e6e0da0939530a49f4a77b4c1d5f6 | 547 |
def get_domain_machine_command():
"""Retrieves a collection of Machines that have communicated to or from a given domain address.
Returns:
(str, dict, dict). Human readable, context, raw response
"""
headers = ['ID', 'ComputerDNSName', 'OSPlatform', 'LastIPAddress', 'LastExternalIPAddress', 'HealthStatus',
'RiskScore', 'ExposureLevel']
domain = demisto.args().get('domain')
response = get_domain_machines_request(domain)
machines_list = get_machines_list(response)
human_readable = tableToMarkdown(f'Machines that have communicated with {domain} domain:', machines_list,
headers=headers, removeNull=True)
context_output = {
'Domain': domain,
'Machines': machines_list
}
entry_context = {
'MicrosoftATP.DomainMachine(val.Domain === obj.Domain)': context_output
}
return human_readable, entry_context, response | 0e614d15abd3e99408d4a3d7a93ecc49dba694ad | 548 |
import copy
from functools import reduce
def flatten_dict(source_dict, name_delimiter='_', inner_name=False):
"""
flatten nest dict
Parameters
----------
source_dict : nest dict
name_delimiter : flatten name delimiter(non-use when inner_name is True)
inner_name : False, use innermost name as retrun dict key or not
Returns
-------
flatten dict
Examples
--------
>>> from tidyframe import flatten_dict
>>> nest_dict = {
... 'a': 1,
... 'b': [1, 2],
... 'c': {
... 'cc1': 3,
... 'cc2': 4
... },
... 'd': {
... 'd1': 5,
... 'd2': {
... 'dd1': 6,
... 'dd2': 7
... }
... }
... }
>>> flatten_dict(nest_dict)
{'a': 1, 'b': [1, 2], 'c_cc1': 3, 'c_cc2': 4, 'd_d1': 5, 'd_d2_dd1': 6, 'd_d2_dd2': 7}
>>> flatten_dict(nest_dict, inner_name=True)
{'a': 1, 'b': [1, 2], 'cc1': 3, 'cc2': 4, 'd1': 5, 'dd1': 6, 'dd2': 7}
"""
assert isinstance(source_dict, dict), "import source_dict is not dict"
json_name = {}
for key in source_dict.keys():
if isinstance(get_in(source_dict, [key]), dict):
val = [True, [key]]
json_name.update({key: val})
else:
val = [False, [key]]
json_name.update({key: val})
while True:
key_inner = list(filter(lambda x: json_name.get(x)[0], json_name))
if key_inner:
for x in key_inner:
dict_to_update_json_name = {}
val = json_name.get(x)[1]
for key in get_in(source_dict, val).keys():
val_in = copy(val)
val_in.append(key)
if isinstance(get_in(source_dict, val_in), dict):
dict_to_update = {
reduce(lambda x, y: x + name_delimiter + y, val_in):
[True, val_in]
}
else:
dict_to_update = {
reduce(lambda x, y: x + name_delimiter + y, val_in):
[False, val_in]
}
dict_to_update_json_name.update(dict_to_update)
json_name.update(dict_to_update_json_name)
json_name.pop(x)
else:
break
if inner_name:
return {
json_name.get(x)[1][-1]: get_in(source_dict,
json_name.get(x)[1])
for x in json_name.keys()
}
else:
return {
x: get_in(source_dict,
json_name.get(x)[1])
for x in json_name.keys()
} | fbba7666c25f5eafd47642b8308a486e25cdc6f9 | 549 |
def matrix_multiply(A, B):
""" Multiply two matrices A and B.
:param A: the right matrix
:param B: the left matrix
:return: A * B
"""
# define m and n for the matrix as well as l, the connecting dimension between A and B
m, l, n = len(A), len(A[0]), len(B[0])
# initialize an all zeros matrix
C = [[0.0 for _ in range(len(B[0]))] for _ in range(len(A))]
# iterative over the rows of C
for i in range(m):
# iterative over the columns of C
for j in range(n):
# set C[i][j] to the dot product of ith row of A and the jth column of B
C[i][j] = sum(A[i][k] * B[k][j] for k in range(l))
# return the matrix C = A @ B
return C | 3cd551ea87d9f925654a4153106c2fe87e33fa8c | 550 |
def hard_light(image1, image2):
"""
Superimposes two videos on top of each other using the Hard Light algorithm
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_hard_light(image2.im)) | dcfd25a83f142c2d42c38a787569001af6f6bfc9 | 551 |
def show(*actors, **options):
"""
Create on the fly an instance of class ``Plotter`` and show the object(s) provided.
Allowed input objects types are:
``str``, ``Mesh``, ``Volume``, ``Picture``, ``Assembly``
``vtkPolyData``, ``vtkActor``, ``vtkActor2D``, ``vtkImageActor``,
``vtkAssembly`` or ``vtkVolume``.
If filename is given, its type is guessed based on its extension.
Supported formats are:
`vtu, vts, vtp, ply, obj, stl, 3ds, xml, neutral, gmsh, pcd, xyz, txt, byu,
tif, slc, vti, mhd, png, jpg`.
:param int at: number of the renderer to plot to, if more than one exists
:param list shape: Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a shape as string descriptor. E.g.:
- shape="3|1" means 3 plots on the left and 1 on the right,
- shape="4/2" means 4 plots on top of 2 at bottom.
:param int axes: set the type of axes to be shown
- 0, no axes
- 1, draw three gray grid walls
- 2, show cartesian axes from (0,0,0)
- 3, show positive range of cartesian axes from (0,0,0)
- 4, show a triad at bottom left
- 5, show a cube at bottom left
- 6, mark the corners of the bounding box
- 7, draw a 3D ruler at each side of the cartesian axes
- 8, show the ``vtkCubeAxesActor`` object
- 9, show the bounding box outLine
- 10, show three circles representing the maximum bounding box
- 11, show a large grid on the x-y plane
- 12, show polar axes
- 13, draw a simple ruler at the bottom of the window
Axis type-1 can be fully customized by passing a dictionary ``axes=dict()`` where:
Check ``addons.Axes()`` for the full list of options.
:param float azimuth/elevation/roll: move camera accordingly
:param str viewup: either ['x', 'y', 'z'] or a vector to set vertical direction
:param bool resetcam: re-adjust camera position to fit objects
:param dict camera: Camera parameters can further be specified with a dictionary
assigned to the ``camera`` keyword (E.g. `show(camera={'pos':(1,2,3), 'thickness':1000,})`)
- pos, `(list)`, the position of the camera in world coordinates
- focalPoint `(list)`, the focal point of the camera in world coordinates
- viewup `(list)`, the view up direction for the camera
- distance `(float)`, set the focal point to the specified distance from the camera position.
- clippingRange `(float)`, distance of the near and far clipping planes along the direction
of projection.
- parallelScale `(float)`,
scaling used for a parallel projection, i.e. the height of the viewport
in world-coordinate distances. The default is 1. Note that the "scale" parameter works as
an "inverse scale", larger numbers produce smaller images.
This method has no effect in perspective projection mode.
- thickness `(float)`,
set the distance between clipping planes. This method adjusts the far clipping
plane to be set a distance 'thickness' beyond the near clipping plane.
- viewAngle `(float)`,
the camera view angle, which is the angular height of the camera view
measured in degrees. The default angle is 30 degrees.
This method has no effect in parallel projection mode.
The formula for setting the angle up for perfect perspective viewing is:
angle = 2*atan((h/2)/d) where h is the height of the RenderWindow
(measured by holding a ruler up to your screen) and d is the distance
from your eyes to the screen.
:param bool interactive: pause and interact with window (True)
or continue execution (False)
:param float rate: maximum rate of `show()` in Hertz
:param int interactorStyle: set the type of interaction
- 0 = TrackballCamera [default]
- 1 = TrackballActor
- 2 = JoystickCamera
- 3 = JoystickActor
- 4 = Flight
- 5 = RubberBand2D
- 6 = RubberBand3D
- 7 = RubberBandZoom
- 8 = Context
- 9 = 3D
- 10 = Terrain
- 11 = Unicam
:param bool q: force program to quit after `show()` command returns.
:param bool new: if set to `True`, a call to ``show`` will instantiate
a new ``Plotter`` object (a new window) instead of reusing the first created.
:return: the current ``Plotter`` class instance.
.. note:: With multiple renderers, keyword ``at`` can become a `list`, e.g.
.. code-block:: python
from vedo import *
s = Sphere()
c = Cube()
p = Paraboloid()
show(s, c, at=[0, 1], shape=(3,1))
show(p, at=2, interactive=True)
#
# is equivalent to:
plt = Plotter(shape=(3,1))
s = Sphere()
c = Cube()
p = Paraboloid()
plt.show(s, at=0)
plt.show(p, at=1)
plt.show(c, at=2, interactive=True)
"""
at = options.pop("at", None)
shape = options.pop("shape", (1, 1))
N = options.pop("N", None)
pos = options.pop("pos", (0, 0))
size = options.pop("size", "auto")
screensize = options.pop("screensize", "auto")
title = options.pop("title", "")
bg = options.pop("bg", "white")
bg2 = options.pop("bg2", None)
axes = options.pop("axes", settings.defaultAxesType)
interactive = options.pop("interactive", None)
offscreen = options.pop("offscreen", False)
sharecam = options.pop("sharecam", True)
resetcam = options.pop("resetcam", True)
zoom = options.pop("zoom", None)
viewup = options.pop("viewup", "")
azimuth = options.pop("azimuth", 0)
elevation = options.pop("elevation", 0)
roll = options.pop("roll", 0)
camera = options.pop("camera", None)
interactorStyle = options.pop("interactorStyle", 0)
q = options.pop("q", False)
newPlotter = options.pop("new", False)
if len(options):
for op in options:
printc("Warning: unknown keyword in show():", op, c='y')
if len(actors) == 0:
actors = None
elif len(actors) == 1:
actors = actors[0]
else:
actors = utils.flatten(actors)
if settings.plotter_instance and not newPlotter: # Plotter exists
plt = settings.plotter_instance
else: # Plotter must be created
if utils.isSequence(at): # user passed a sequence for "at"
if not utils.isSequence(actors):
printc("show() Error: input must be a list.", c='r')
raise RuntimeError()
if len(at) != len(actors):
printc("show() Error: lists 'input' and 'at', must have equal lengths.", c='r')
raise RuntimeError()
if len(at) > 1 and (shape == (1, 1) and N is None):
N = max(at) + 1
elif at is None and (N or shape != (1, 1)):
if not utils.isSequence(actors):
printc('show() Error: N or shape is set, but input is not a sequence.', c='r')
printc(' you may need to specify e.g. at=0', c='r')
raise RuntimeError()
at = list(range(len(actors)))
plt = Plotter(
shape=shape,
N=N,
pos=pos,
size=size,
screensize=screensize,
title=title,
axes=axes,
sharecam=sharecam,
resetcam=resetcam,
interactive=interactive,
offscreen=offscreen,
bg=bg,
bg2=bg2,
)
# use _plt_to_return because plt.show() can return a k3d/panel plot
_plt_to_return = None
if utils.isSequence(at):
for i, a in enumerate(actors):
_plt_to_return = plt.show(
a,
at=i,
zoom=zoom,
resetcam=resetcam,
viewup=viewup,
azimuth=azimuth,
elevation=elevation,
roll=roll,
camera=camera,
interactive=False,
interactorStyle=interactorStyle,
bg=bg,
bg2=bg2,
axes=axes,
q=q,
)
plt.interactive = interactive
if interactive or len(at)==N \
or (isinstance(shape[0],int) and len(at)==shape[0]*shape[1]):
# note that shape can be a string
if not offscreen:
plt.interactor.Start()
else:
_plt_to_return = plt.show(
actors,
at=at,
zoom=zoom,
resetcam=resetcam,
viewup=viewup,
azimuth=azimuth,
elevation=elevation,
roll=roll,
camera=camera,
interactive=interactive,
interactorStyle=interactorStyle,
bg=bg,
bg2=bg2,
axes=axes,
q=q,
)
return _plt_to_return | e95c9bbd325e8e6da6ee4f33ca861496c3048bd3 | 552 |
from typing import List
def get_followup_question_list(intent: str) -> List[str]:
"""
Get all imported followup questions for this intent as a list
* `intent`: name-parameter of the yml-section with which the followup questions were imported
**Returns:** None if no followup questions are known for this intent, otherwise list of followup questions for this intent
"""
return None if not qa.get(intent) else qa.get(intent).followup_questions | 00581a497d4e09670edaa0d44870a2a0d7589ada | 554 |
from typing import Tuple
def event_train_test_split(
evs: np.ndarray, n_evs: int, train_split: float, random_seed: int=1
) -> Tuple[np.ndarray, np.ndarray]:
"""[summary]
Args:
n_evs (int): [description]
train_split (float): [description]
random_seed (int, optional): [description]. Defaults to 1.
Returns:
Tuple[np.ndarray, np.ndarray]: [description]
"""
# some basic checks
assert 0 < train_split < 1, "Variable train_split (ts) must be 0<ts<1."
assert n_evs > 1, "Need more than 1 event to split."
# set the random state locally
r = np.random.RandomState(random_seed)
# compute the number of test and train samples
train_samples = int(np.round(train_split * n_evs, 0))
test_samples = int(n_evs - train_samples)
# split the events
train_events = r.choice(evs, train_samples, replace=False)
test_events = evs[~np.isin(evs, train_events)]
# make sure they add up to the total number!
assert len(train_events) + len(test_events) == n_evs
return train_events, test_events | 69deb28d199fc8636ed45a90630a69753f4c1066 | 555 |
def get_cd(wcs, n=1):
"""
Get the value of the change in world coordinate per pixel across a linear axis.
Defaults to wcs.wcs.cd if present. Does not support rotated headers (e.g.,
with nonzero CDm_n where m!=n)
"""
if hasattr(wcs.wcs,'cd'):
if wcs.wcs.cd[n-1,n-1] != 0:
return wcs.wcs.cd[n-1,n-1]
else:
return wcs.wcs.get_cdelt()[n-1] | 9b31c81a1a5e87efeb201ffef7f8f65f846fe0b7 | 556 |
def mock_clear():
"""Clear MOCK_DATA_HEAP"""
MOCK_DATA_HEAP.clear()
return "" | 96212726db3f1e29ac4da54e62df70cb89ba1f2e | 557 |
from datetime import datetime
def cls_merge_type(classification):
""" classification type이 2가지일 때 합쳐주는 함수
Parameters
----------
classification: cls
classification 리스트
Returns
-------
list of cls
변환된 classification 리스트
"""
cls_type = {'instant' if cls.get('instant_datetime') else 'not_instant' for cls in classification }
if len(cls_type) == 2:
for cls in classification:
instant_datetime = cls.get('instant_datetime')
if instant_datetime:
year = instant_datetime.year
start_datetime = datetime.datetime(year, 1, 1) # 해당년도 1월 1일로 설정
end_datetime = instant_datetime
cls['instant_datetime'] = None
cls['start_datetime'] = start_datetime
cls['end_datetime'] = end_datetime
return classification | e6a59f45adecdc21acb81f6890ac79cfaa93b4d6 | 558 |
def duplicate_detector(gate_orders: list[tuple[str]]) -> int:
"""Detects any schematics that have an identical combination of gates."""
difference = len(gate_orders) - len(list(set(gate_orders))) # List - list with no duplicates
return difference | e439a106abc0ff21bfe9773b3185d35b5bf05aa0 | 559 |
def permutations(x):
"""Return all permutations of x"""
def fn(i):
if i == len(x): ans.append(x.copy())
for k in range(i, len(x)):
x[i], x[k] = x[k], x[i]
fn(i+1)
x[i], x[k] = x[k], x[i]
ans = []
fn(0)
return ans | 691c701e1ac17da5dabb0fc3fe607ff68ac8fcdc | 560 |
def encode(model, text, out_file=None, topic_priors=None, prior_weight=1.0):
"""
Perform text-to-image encoding.
Parameters
----------
model : :obj:`gclda.model.Model`
Model object needed for decoding.
text : :obj:`str` or :obj:`list`
Text to encode into an image.
out_file : :obj:`str`, optional
If not None, writes the encoded image to a file.
topic_priors : :obj:`numpy.ndarray` of :obj:`float`, optional
A 1d array of size (n_topics) with values for topic weighting.
If None, no weighting is done. Default is None.
prior_weight : :obj:`float`, optional
The weight by which the prior will affect the encoding.
Default is 1.
Returns
-------
img : :obj:`nibabel.Nifti1Image`
The encoded image.
topic_weights : :obj:`numpy.ndarray` of :obj:`float`
The weights of the topics used in encoding.
Notes
-----
====================== ==============================================================
Notation Meaning
====================== ==============================================================
:math:`v` Voxel
:math:`t` Topic
:math:`w` Word type
:math:`h` Input text
:math:`p(v|t)` Probability of topic given voxel (``p_topic_g_voxel``)
:math:`\\tau_{t}` Topic weight vector (``topic_weights``)
:math:`p(w|t)` Probability of word type given topic (``p_word_g_topic``)
:math:`\omega` 1d array from input image (``input_values``)
====================== ==============================================================
1. Compute :math:`p(v|t)`
(``p_voxel_g_topic``).
- From :obj:`gclda.model.Model.get_spatial_probs()`
2. Compute :math:`p(t|w)`
(``p_topic_g_word``).
3. Vectorize input text according to model vocabulary.
4. Reduce :math:`p(t|w)` to only include word types in input text.
5. Compute :math:`p(t|h)` (``p_topic_g_text``) by multiplying :math:`p(t|w)`
by word counts for input text.
6. Sum topic weights (:math:`\\tau_{t}`) across
words.
- :math:`\\tau_{t} = \sum_{i}{p(t|h_{i})}`
7. Compute voxel
weights.
- :math:`p(v|h) \propto p(v|t) \cdot \\tau_{t}`
8. The resulting array (``voxel_weights``) reflects arbitrarily scaled
voxel weights for the input text.
9. Unmask and reshape ``voxel_weights`` into brain image.
"""
if isinstance(text, list):
text = " ".join(text)
# Assume that words in word_labels are underscore-separated.
# Convert to space-separation for vectorization of input string.
vocabulary = [term.replace("_", " ") for term in model.dataset.word_labels]
max_len = max([len(term.split(" ")) for term in vocabulary])
vectorizer = CountVectorizer(
vocabulary=model.dataset.word_labels, ngram_range=(1, max_len)
)
word_counts = np.squeeze(vectorizer.fit_transform([text]).toarray())
keep_idx = np.where(word_counts > 0)[0]
text_counts = word_counts[keep_idx]
n_topics_per_word_token = np.sum(model.n_word_tokens_word_by_topic, axis=1)
p_topic_g_word = (
model.n_word_tokens_word_by_topic / n_topics_per_word_token[:, None]
)
p_topic_g_word = np.nan_to_num(p_topic_g_word, 0)
p_topic_g_text = p_topic_g_word[keep_idx] # p(T|W) for words in text only
prod = p_topic_g_text * text_counts[:, None] # Multiply p(T|W) by words in text
topic_weights = np.sum(prod, axis=0) # Sum across words
if topic_priors is not None:
weighted_priors = weight_priors(topic_priors, prior_weight)
topic_weights *= weighted_priors
_, p_voxel_g_topic = model.get_spatial_probs()
voxel_weights = np.dot(p_voxel_g_topic, topic_weights)
img = unmask(voxel_weights, model.dataset.mask_img)
if out_file is not None:
img.to_filename(out_file)
return img, topic_weights | 941745be7b84c3af9d7f7e62cb2d93fabc3b22c1 | 562 |
def clean_string(s: str) -> str:
"""Cleans and returns an input string
>>> clean_string(" xYz ")
'XYZ'
"""
return str(s).strip().upper() | c97281505492ded5b9167076312959c5eee41a6c | 563 |
import collections
def get_unique_region_cov_df(unique_region_dict, fuzzer_names):
"""Returns a DataFrame where the two columns are fuzzers and the number
of unique regions covered."""
fuzzers = collections.defaultdict(int)
for region in unique_region_dict:
for fuzzer in unique_region_dict[region]:
fuzzers[fuzzer] += 1
dict_to_transform = {'fuzzer': [], 'unique_regions_covered': []}
for fuzzer in fuzzer_names:
covered_num = fuzzers[fuzzer]
dict_to_transform['fuzzer'].append(fuzzer)
dict_to_transform['unique_regions_covered'].append(covered_num)
return pd.DataFrame(dict_to_transform) | 923227fb804549252bf51cd94e65180c3f8564e8 | 564 |
def display_generation_hit_results(hit_info, hit_results):
"""Displays the results of a generation HIT
Parameters
----------
hit_info : GenerationHITInfo
HITInfo object storing information regarding the HIT
hit_results : GenerationResults
HIT results object storing the results of the relevant HIT
Returns
-------
bool
returns True
"""
dec_string = format_decomposition_string(hit_results.decomposition)
print(
'HIT ID: {hit_id}'
'\nAssignment ID: {assignment_id}'
'\nHIT Type: Generation'
'\n'
'\nResults'
'\n======='
'\nAnnotation ID: {annotation_id}'
'\nQuestion ID: {question_id}'
'\nQuestion Text: {question_text}'
'\nDecomposition: {decomposition}'.format(
hit_id=hit_results.hit_id,
assignment_id=hit_results.assignment_id,
annotation_id=hit_info.annotation_id,
question_id=hit_info.question_id,
question_text=hit_info.question_text,
decomposition=dec_string))
return True | 649cf05d0ee3a87032993fd05fc120162c7f8d2b | 566 |
def XOR(v1, v2):
"""
XOR operation element by element from 2 lists
:param v1: [1, 0, 1, 0, 0, 1]
:param v2: [1, 1, 0, 0, 1, 1]
:return: [0, 1, 1, 0, 1, 0]
"""
return [a ^ b for a, b in zip(v1, v2)] | e3b94b35ccf4e1dd99cc51f32c70f96c5fe99795 | 568 |