content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import functools
from operator import add
def gen_cand_keyword_scores(phrase_words, word_score):
"""
Computes the score for the input phrases.
:param phrase_words: phrases to score
:type phrase_words: list
:param word_score: calculated word scores
:type word_score: list
:return: dict *{phrase: score, ...}*
"""
keyword_candidates = defaultdict(int)
for phrase, word_list in phrase_words:
if not word_list:
continue
candidate_score = functools.reduce(
add, [word_score[word] for word in word_list]
)
keyword_candidates[phrase] = candidate_score
return keyword_candidates | d219256938ab2538214cbc075451f7da5a253b06 | 1,031 |
def analyze_network(directed=False, base_url=DEFAULT_BASE_URL):
"""Calculate various network statistics.
The results are added to the Node and Edge tables and the Results Panel.
The summary statistics in the Results Panel are also returned by the function
as a list of named values.
Args:
directed (bool): If True, the network is considered a directed graph. Default is False.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: Named list of summary statistics
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> analyze_network()
{'networkTitle': 'galFiltered.sif (undirected)', 'nodeCount': '330', 'edgeCount': '359', 'avNeighbors': '2.379032258064516', 'diameter': '27', 'radius': '14', 'avSpl': '9.127660963823953', 'cc': '0.06959203036053131', 'density': '0.009631709546819902', 'heterogeneity': '0.8534500004035027', 'centralization': '0.06375695335900727', 'ncc': '26'}
>>> analyze_network(True)
{'networkTitle': 'galFiltered.sif (directed)', 'nodeCount': '330', 'edgeCount': '359', 'avNeighbors': '2.16969696969697', 'diameter': '10', 'radius': '1', 'avSpl': '3.4919830756382395', 'cc': '0.03544266191325015', 'density': '0.003297411808050106', 'ncc': '26', 'mnp': '1', 'nsl': '0'}
"""
res = commands.commands_post(f'analyzer analyze directed={directed}', base_url=base_url)
return res | 0edd9e848e3b3060055e6845aa5fbb2792c7a1f4 | 1,032 |
def create_user():
"""
Create new user
"""
# request.get_json(): extract the JSON from the request and return it as
# a Python structure.
data = request.get_json() or {}
# Validate mandatory fields
if 'username' not in data or 'email' not in data or \
'password' not in data:
return bad_request('must include username, email and password fields')
if User.query.filter_by(username=data['username']).first():
return bad_request('please use a different username')
if User.query.filter_by(email=data['email']).first():
return bad_request('please use a different email address')
# Create user
user = User()
user.from_dict(data, new_user=True)
db.session.add(user)
db.session.commit()
# Make response
response = jsonify(user.to_dict())
# The status code for a POST request that creates a resource should be 201
response.status_code = 201
response.headers['Location'] = url_for('api.get_user', id=user.id)
return response | a416e0d5bbb6539cee3ce5174ab3cf1186680ee9 | 1,033 |
import hashlib
import base64
def hash_long_to_short(long_url):
"""
turn a long input url into a short url's url-safe 5 character hash
this is deterministic and the same long_url will always have the same hash
"""
encoded = long_url.encode("utf-8")
md5_hash = hashlib.md5(encoded).digest()
return base64.urlsafe_b64encode(md5_hash)[:SHORT_URL_HASH_LENGTH] | 050de3e30feeac46f98b152890d82dd8e416f2d0 | 1,034 |
def has_prefix(sub_s):
"""
Test possibility of sub_s before doing recursion.
:param sub_s: sub_string of input word from its head.
:return: (boolean) whether word stars with sub_s.
"""
for word in DATABASE:
if word.startswith(sub_s):
return True | 2dde507f7b0b3c56f8a5a9a582d52b784607dd5d | 1,036 |
def transform_results(search_result, user, department_filters):
"""
Transform podcast and podcast episode, and userlist and learning path in aggregations
Add 'is_favorite' and 'lists' fields to the '_source' attributes for learning resources.
Args:
search_result (dict): The results from ElasticSearch
user (User): the user who performed the search
Returns:
dict: The Elasticsearch response dict with transformed aggregates and source values
"""
for aggregation_key in [
"type",
"topics",
"offered_by",
"audience",
"certification",
"department_name",
"level",
"course_feature_tags",
"resource_type",
]:
if f"agg_filter_{aggregation_key}" in search_result.get("aggregations", {}):
if aggregation_key == "level":
levels = (
search_result.get("aggregations", {})
.get(f"agg_filter_{aggregation_key}", {})
.get("level", {})
.get("level", {})
)
if levels:
search_result["aggregations"]["level"] = {
"buckets": [
{
"key": bucket["key"],
"doc_count": bucket["courses"]["doc_count"],
}
for bucket in levels.get("buckets", [])
if bucket["courses"]["doc_count"] > 0
]
}
else:
search_result["aggregations"][aggregation_key] = search_result[
"aggregations"
][f"agg_filter_{aggregation_key}"][aggregation_key]
search_result["aggregations"].pop(f"agg_filter_{aggregation_key}")
types = search_result.get("aggregations", {}).get("type", {})
if types:
type_merges = dict(
zip(
(PODCAST_EPISODE_TYPE, LEARNING_PATH_TYPE),
(PODCAST_TYPE, USER_LIST_TYPE),
)
)
for child_type, parent_type in type_merges.items():
child_type_bucket = None
parent_type_bucket = None
for type_bucket in search_result["aggregations"]["type"]["buckets"]:
if type_bucket["key"] == child_type:
child_type_bucket = type_bucket
elif type_bucket["key"] == parent_type:
parent_type_bucket = type_bucket
if child_type_bucket and parent_type_bucket:
parent_type_bucket["doc_count"] = (
child_type_bucket["doc_count"] + parent_type_bucket["doc_count"]
)
search_result["aggregations"]["type"]["buckets"].remove(
child_type_bucket
)
elif child_type_bucket:
child_type_bucket["key"] = parent_type
search_result["aggregations"]["type"]["buckets"].sort(
key=lambda bucket: bucket["doc_count"], reverse=True
)
if not user.is_anonymous:
favorites = (
FavoriteItem.objects.select_related("content_type")
.filter(user=user)
.values_list("content_type__model", "object_id")
)
for hit in search_result.get("hits", {}).get("hits", []):
object_type = hit["_source"]["object_type"]
if object_type in LEARNING_RESOURCE_TYPES:
if object_type == LEARNING_PATH_TYPE:
object_type = USER_LIST_TYPE
object_id = hit["_source"]["id"]
hit["_source"]["is_favorite"] = (object_type, object_id) in favorites
hit["_source"]["lists"] = get_list_items_by_resource(
user, object_type, object_id
)
search_result = _transform_search_results_suggest(search_result)
if len(department_filters) > 0:
_transform_search_results_coursenum(search_result, department_filters)
return search_result | 93bbb9cb3effa4b0f602e42549a961f4fd53faeb | 1,037 |
def kl_div_loss(inputs: Tensor, targets: Tensor) -> Tensor:
"""Computes the Kullback–Leibler divergence loss between two probability distributions."""
return F.kl_div(F.log_softmax(inputs, dim=-1), F.softmax(targets, dim=-1), reduction="none") | 9a45dacfe8fd529893cf7fa813869a97da562f65 | 1,038 |
from typing import List
def get_schema_names(connection: psycopg2.extensions.connection) -> List[psycopg2.extras.RealDictRow]:
"""Function for getting the schema information from the given connection
:param psycopg2.extensions.connection connection: The connection
:return: List of rows using key-value pairs for the data
:rtype: List[psycopg2.extras.RealDictRow]
"""
with connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
query = """SELECT *
FROM information_schema.schemata"""
cursor.execute(query)
results = cursor.fetchall()
return results | 69a4e0b70ef443c2480f0fbb1e1e859bbf6f69bd | 1,039 |
def parse(string):
"""Returns a list of specs from an input string.
For creating one spec, see Spec() constructor.
"""
return SpecParser().parse(string) | 788849ebaa29b4dab5e4babcb13573acbc8b8525 | 1,040 |
def get_provider_idx(provider_type):
"""Return the index associated to the type.
"""
try:
return PROVIDERS_TYPE[provider_type]['idx']
except KeyError as error:
raise ProviderError(
"Provider type (%s) is not supported yet." % (provider_type, )
) | 47272903415825c870222b3531fddc11129d62c0 | 1,041 |
import collections
def file_based_convert_examples_to_features(
examples, slot_label_list, intent_label_list, max_seq_length, tokenizer, output_file):
"""
将InputExamples转成tf_record,并写入文件
Convert a set of InputExample to a TFRecord file.
:param examples: [(text, CRF_label, class_label), ...]
:param slot_label_list: CRF标签列表(String)
:param intent_label_list: 触发词类别列表(String)
:param max_seq_length:
:param tokenizer:
:param output_file: TFRecord file
:return:
"""
writer = tf.io.TFRecordWriter(output_file)
for ex_index, example in enumerate(examples):
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
if ex_index % 10000 == 0:
logger.info("Writing example %d of length %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list,
max_seq_length, tokenizer)
# convert to tensorflow format
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["slot_ids"] = create_int_feature(feature.slot_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features['is_value_ids'] = create_int_feature(feature.is_value_ids)
features["is_real_example"] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString()) # 写入一个样本到tf_record
writer.close() | b5d4a9228af4169307a8a22f4c56a0c3eb6e8f27 | 1,042 |
def create_readme(df):
"""Retrieve text from README.md and update it."""
readme = str
categories = pd.unique(df["category"])
categories.sort()
with open('README.md', 'r', encoding='utf-8') as read_me_file:
read_me = read_me_file.read()
splits = read_me.split('<!---->')
# Initial project description
text_intro = splits[0]
# Contribution and contacts
text_contributing = splits[3]
text_contacts = splits[4]
# TOC
toc = "\n\n- [Awesome Citizen Science Projects](#awesome-citizen-science-projects)\n"
# Add categories
for cat in range(len(categories)):
toc += f" - [{categories[cat]}](#{categories[cat]})" + "\n"
# Add contributing and contact to TOC
toc += "- [Contributing guidelines](#contributing-guidelines)\n"
toc += "- [Contacts](#contacts)\n"
# Add first part and toc to README
readme = text_intro + "<!---->" + toc + "\n<!---->\n"
# Add projects subtitle
readme += "\n## Projects\n"
# Add individual categories to README
list_blocks = ""
for cat in range(len(categories)):
block = f"\n### {categories[cat]}\n\n"
filtered = df[df["category"] == categories[cat]]
list_items = ""
for i, r in filtered.iterrows():
try:
start_date = int(r['start_date'])
except:
start_date = "NA"
if not pd.isna(r['icon']):
project = f"- {r['icon']} [{r['name']}]({r['main_source']}) - {r['description']} (`{start_date}` - `{str(r['end_date'])}`)\n"
list_items = list_items + project
else:
project = f"- [{r['name']}]({r['main_source']}) - {r['description']} (`{start_date}` - `{str(r['end_date'])}`)\n"
list_items = list_items + project
list_blocks = list_blocks + block + list_items
# Add to categories to README.md
readme += list_blocks + "\n"
# Add contribution and contacts
readme += '<!---->' + text_contributing
readme += '<!---->' + text_contacts
return readme | 5e0d207baa3d5c1e1f68b6f2e1a347bffece901a | 1,043 |
async def get_leaderboard_info_by_id(
# ScoreSaber leaderboardId
leaderboardId: float
):
"""
GET /api/leaderboard/by-id/{leaderboardId}/info
"""
# request
request_url = f'{SERVER}/api/leaderboard/by-id/{leaderboardId}/info'
response_dict = await request.get(request_url)
return LeaderboardInfo.gen(response_dict) | ab081d17b462a0738c578c9caed93c7b4a1ec9a6 | 1,044 |
def distance(lat1,lon1,lat2,lon2):
"""Input 2 points in Lat/Lon degrees.
Calculates the great circle distance between them in radians
"""
rlat1= radians(lat1)
rlon1= radians(lon1)
rlat2= radians(lat2)
rlon2= radians(lon2)
dlat = rlat1 - rlat2
dlon = rlon1 - rlon2
a = pow(sin(dlat/2.0),2) + cos(rlat1)*cos(rlat2)*pow(sin(dlon/2.0),2)
c = 2* atan2(sqrt(a), sqrt(1-a))
return c | 2c6b1692843db3f69c750f4b2acda43d49227e7a | 1,045 |
def minimumSwaps(arr):
"""
O(nlogn)
"""
len_arr = len(arr)
arr_dict = {key+1:value for key, value in enumerate(arr)}
arr_checked = [False]*len_arr
total_count = 0
for key, value in arr_dict.items():
count = 0
while key != value and arr_checked[key-1] is False:
arr_checked[value-1] = True
count += 1
value = arr_dict.get(value)
arr_checked[key-1] = True
total_count += count
return total_count | d5251297fd52f99aefce69986bd5c8c126b7e6b6 | 1,046 |
def store_user_bot(user_id, intended_user, bot_id):
"""Store an uploaded bot in object storage."""
if user_id != intended_user:
raise api_util.user_mismatch_error(
message="Cannot upload bot for another user.")
if bot_id != 0:
raise util.APIError(
400, message="Sorry, only one bot allowed per user.")
uploaded_file = validate_bot_submission()
with model.engine.connect() as conn:
team = conn.execute(model.team_leader_query(user_id)).first()
if team:
user_id = intended_user = team["leader_id"]
bot_where_clause = (model.bots.c.user_id == user_id) & \
(model.bots.c.id == bot_id)
bot = conn.execute(model.bots.select(bot_where_clause)).first()
if not bot:
raise util.APIError(404, message="Bot not found.")
# Check if the user already has a bot compiling
if bot["compile_status"] == model.CompileStatus.IN_PROGRESS.value:
raise util.APIError(400, message="Cannot upload new bot until "
"previous one is compiled.")
blob = gcloud_storage.Blob("{}_{}".format(user_id, bot_id),
model.get_compilation_bucket(),
chunk_size=262144)
blob.upload_from_file(uploaded_file)
# Flag the user as compiling
update = model.bots.update() \
.where(bot_where_clause) \
.values(
compile_status=model.CompileStatus.UPLOADED.value,
update_time=sqlalchemy.sql.func.now(),
timeout_sent=False,
)
conn.execute(update)
return util.response_success({
"user_id": user_id,
"bot_id": bot["id"],
}) | 2b19e4092df3cb93fdadf5f06176ec4ec9300f63 | 1,047 |
def parse_conv(weights_file, cfg_parser, section, layer_dict):
""" parse conv layer
Args:
weights_file (file object): file object of .weights file
cfg_parser (ConfigParser object): ConfigParser object of .cfg file for net
section (str): name of conv layer
layer_dict (dictionary): dict storing layer info
Returns:
dict storing layer info and weights values
"""
prev_layer_channel = layer_dict['prev_layer_channel']
count = layer_dict['count']
filters = int(cfg_parser[section]['filters'])
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
pad = int(cfg_parser[section]['pad'])
activation = cfg_parser[section]['activation']
batch_normalize = 'batch_normalize' in cfg_parser[section]
weights_shape = (size, size, prev_layer_channel, filters)
darknet_w_shape = (filters, weights_shape[2], size, size)
weights_size = np.product(weights_shape)
prev_layer_channel = filters
print('conv2d', 'bn'
if batch_normalize else ' ', activation, weights_shape)
bn_weight_list = []
conv_bias = []
if batch_normalize:
bn_weights = np.ndarray(
shape=(4, filters),
dtype='float32',
buffer=weights_file.read(filters * 16))
count += 4 * filters
bn_weight_list = [
bn_weights[1], # scale gamma
bn_weights[0], # shift beta
bn_weights[2], # running mean
bn_weights[3] # running var
]
else:
conv_bias = np.ndarray(
shape=(filters, ),
dtype='float32',
buffer=weights_file.read(filters * 4))
count += filters
conv_weights = np.ndarray(
shape=darknet_w_shape,
dtype='float32',
buffer=weights_file.read(weights_size * 4))
count += weights_size
# DarkNet conv_weights are serialized Caffe-style:
# (out_dim, in_dim, height, width)
# We would like to set these to Tensorflow order:
# (height, width, in_dim, out_dim)
conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
layer_dict['prev_layer_channel'] = prev_layer_channel
layer_dict['count'] = count
layer_dict['conv_weights'] = conv_weights
layer_dict['conv_bias'] = conv_bias
layer_dict['bn_weight_list'] = bn_weight_list
return layer_dict | 6e7cc1d2b4115dc44eaf2ad90240144f7157b30b | 1,049 |
def generate_format_spec(num_vals, sep, dtypes, decimals=None):
"""
Generate a format specifier for generic input.
--------------------------------------------------------------
Input
num_vals : number of wild-cards
sep : separator string (could be '_', '-', '--' ...)
used to separate wild-cards
dtypes : data types of the wildcards ('str', 'float', 'int')
decimals : number of decimals (only relevant for floats)
--------------------------------------------------------------
Output
String of the form: "{0:<dtype>}<sep>{1:<dtype>}<sep>...",
where each occurrence of <dtype> is replaced by the dtype value of
the current wild-card and <sep> is replaced by the separator string.
"""
assert type(num_vals) is int
# dictionary of identifiers for supported data types
dident = dict([(str, 's'),
(int, 'd'), \
(float, ''), #'.1f'\
(np.float64, '') #'.1f'
]
)
if decimals is not None:
assert type(decimals) is int
dident[float] = '.{}f'.format(decimals)
dident[np.float64] = '.{}f'.format(decimals)
if not hasattr(dtypes, '__iter__'):
dtypes = [dtypes,] * num_vals
elif type(dtypes) is str:
dtypes = [dtypes,] * num_vals
elif len(dtypes) < num_vals:
dtypes = [dtypes[0],] * num_vals
for dt in dtypes:
assert dt in dident.keys(), dt
# construct actual output
out = ""
for i in range(num_vals):
out += "{" + str(i) + ":" + dident[dtypes[i]] + "}"
out += sep
# remove additional separator from output
return out[:-len(sep)] | 3b65ad3b436b6c578fa2504a2ea4a475700432ce | 1,050 |
from typing import Optional
def products_with_low_stock(threshold: Optional[int] = None):
"""Return queryset with stock lower than given threshold."""
if threshold is None:
threshold = settings.LOW_STOCK_THRESHOLD
stocks = (
Stock.objects.select_related("product_variant")
.values("product_variant__product_id", "warehouse_id")
.annotate(total_stock=Sum("quantity"))
)
return stocks.filter(total_stock__lte=threshold).distinct() | 29bbdd3236b42bf3cef17f84a919ab201946c084 | 1,051 |
def robust_topological_sort(deps):
"""
A topological sorting algorithm which is robust enough to handle cyclic graphs.
First, we bucket nodes into strongly connected components (we use Tarjan's linear algorithm for that).
Then, we topologically sort these buckets grouping sibling buckets into sets.
:param deps: a dictionary representing the dependencies between nodes
:return: groups of buckets (a bucket is a strongly connected component) sorted bottom-up
>>> deps1 = {'S':{'S','X', 'A'}, 'X':{'Y', 'B'}, 'Y':{'Z'}, 'Z':{'X'}, 'A':{'B'}, 'B':{}}
>>> expected = [frozenset({frozenset({'B'})}), frozenset({frozenset({'A'}), frozenset({'Y', 'X', 'Z'})}), frozenset({frozenset({'S'})})]
>>> order = robust_topological_sort(deps1)
>>> order == expected
True
"""
# correspondences between nodes and buckets (strongly connected components)
n2c = defaultdict(None)
components = tarjan(deps)
for i, component in enumerate(components):
for v in component:
n2c[v] = i
# find the dependencies between strongly connected components
cdeps = defaultdict(set)
for head, tail in deps.items():
hc = n2c[head]
for t in tail:
tc = n2c[t]
if hc != tc:
cdeps[hc].add(tc)
# topsort buckets and translate bucket ids back into nodes
return deque(frozenset(components[c] for c in group) for group in topological_sort(cdeps)) | fb2b70f21ccb97880767e73362b46e27804c2d17 | 1,052 |
import inspect
import functools
import warnings
def deprecated(reason):
"""
This is a decorator which can be used to mark functions and classes
as deprecated. It will result in a warning being emitted
when the function is used.
From https://stackoverflow.com/a/40301488
"""
string_types = (type(b""), type(u""))
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter("default", DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
fmt2.format(name=func2.__name__),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter("default", DeprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason))) | 1b75306b9b712caf3cd6c8425d2344b8ca170fcb | 1,053 |
import torch
def rotate_tensor(l: torch.Tensor, n: int = 1) -> torch.Tensor:
"""Roate tensor by n positions to the right
Args:
l (torch.Tensor): input tensor
n (int, optional): positions to rotate. Defaults to 1.
Returns:
torch.Tensor: rotated tensor
"""
return torch.cat((l[n:], l[:n])) | 9cdaa7be718f0676ad85e05b01ee918459697c60 | 1,054 |
def generate_all_fish(
n_fish,
n_replica_fish,
channel,
interaction,
k_coh,
k_ar,
alpha,
lim_neighbors,
weights = [1],
neighbor_weights=None,
fish_max_speeds=None,
clock_freqs=None,
verbose=False,
names=None
):
"""Generate both replica and regular fish
Arguments:
n_fish {int} -- Number of ideal fish to generate
n_replica_fish {int} -- Number of replica fish to generate
channel {Channel} -- Channel instance
interaction {Interaction} -- Interaction instance
k_coh {float} -- Parameter to Delight Fish
k_ar {float} -- Weighting of neighbors in Delight Fish
alpha {int} -- Goal distance from neighbor for Delight Fish
lim_neighbors {list} -- Tuple of min and max neighbors
weights {float|list} -- List of weights for replica fish learned function
neighbor_weight {float|list} -- List of neighbor weights
fish_max_speeds {float|list} -- List of max speeds
clock_freqs {int|list} -- List of clock speeds
names {list} -- List of names for your replica fish
"""
n = n_fish + n_replica_fish
if neighbor_weights is None:
neighbor_weights = [1.0] * n
elif not isinstance(neighbor_weights, list):
neighbor_weights = [neighbor_weights] * n
if fish_max_speeds is None:
fish_max_speeds = [1.0] * n
elif not isinstance(fish_max_speeds, list):
fish_max_speeds = [fish_max_speeds] * n
if clock_freqs is None:
clock_freqs = [1] * n
elif not isinstance(clock_freqs, list):
clock_freqs = [clock_freqs] * n
if names is None:
names = ['Unnamed'] * n
all_fish = []
for i in range(n_fish):
all_fish.append(Fish(
id=i,
channel=channel,
interaction=interaction,
k_coh = k_coh,
k_ar = k_ar,
alpha = alpha,
lim_neighbors=lim_neighbors,
neighbor_weight=neighbor_weights[i],
fish_max_speed=fish_max_speeds[i],
clock_freq=clock_freqs[i],
verbose=verbose,
name=names[i]
))
for i in range(n_fish, n_fish + n_replica_fish):
all_fish.append(ReplicaFish(
id=i,
channel=channel,
interaction=interaction,
weights = weights,
fish_max_speed=fish_max_speeds[i],
clock_freq=clock_freqs[i],
name=names[i],
verbose=verbose
))
return all_fish | 3924235d7bdcf25a91dcb1ec40220b761b85f15f | 1,055 |
def allclose(a, b):
""" close to machine precision """
return np.allclose(a, b, rtol=1e-14, atol=1e-14) | ad7ee29d7432947aec0030936985b456a5919eaa | 1,056 |
def check_pwhash(pwhash, password):
"""Check a password against a given hash value. Since
many forums save md5 passwords with no salt and it's
technically impossible to convert this to an sha hash
with a salt we use this to be able to check for
plain passwords::
plain$$default
md5 passwords without salt::
md5$$c21f969b5f03d33d43e04f8f136e7682
md5 passwords with salt::
md5$123456$7faa731e3365037d264ae6c2e3c7697e
sha passwords::
sha$123456$118083bd04c79ab51944a9ef863efcd9c048dd9a
Note that the integral passwd column in the table is
only 60 chars long. If you have a very large salt
or the plaintext password is too long it will be
truncated.
>>> check_pwhash('plain$$default', 'default')
True
>>> check_pwhash('sha$$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'password')
True
>>> check_pwhash('sha$$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'wrong')
False
>>> check_pwhash('md5$xyz$bcc27016b4fdceb2bd1b369d5dc46c3f', u'example')
True
>>> check_pwhash('sha$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'password')
False
>>> check_pwhash('md42$xyz$bcc27016b4fdceb2bd1b369d5dc46c3f', 'example')
False
"""
if isinstance(password, unicode):
password = password.encode('utf-8')
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
if method == 'plain':
return hashval == password
elif method == 'md5':
h = md5()
elif method == 'sha':
h = sha1()
else:
return False
h.update(salt)
h.update(password)
return h.hexdigest() == hashval | 618cdc8a9f7f7d7062e1e0ae26cf81157a8dbba7 | 1,057 |
def make_markov_model(tweets):
"""Wrapper around making Markov Chain"""
return markovify.Text(" ".join(tweets)) | 0bd98d1a2f3a5aae37591389b06d402073f1a7ec | 1,058 |
def slice_image(sitk_image, start=(0, 0, 0), end=(-1, -1, -1)):
""""Returns the `sitk_image` sliced from the `start` index (x,y,z) to the `end` index.
"""
size = sitk_image.GetSize()
assert len(start) == len(end) == len(size)
# replace -1 dim index placeholders with the size of that dimension
end = [size[i] if end[i] == -1 else end[i] for i in range(len(end))]
slice_filter = sitk.SliceImageFilter()
slice_filter.SetStart(start)
slice_filter.SetStop(end)
return slice_filter.Execute(sitk_image) | eda4477c016d1130bb185a5793409ff95b9cd44c | 1,059 |
def MakeGlyphs(src, reverseNormals):
"""
Glyph the normals on the surface.
You may need to adjust the parameters for maskPts, arrow and glyph for a
nice appearance.
:param: src - the surface to glyph.
:param: reverseNormals - if True the normals on the surface are reversed.
:return: The glyph object.
"""
# Sometimes the contouring algorithm can create a volume whose gradient
# vector and ordering of polygon (using the right hand rule) are
# inconsistent. vtkReverseSense cures this problem.
reverse = vtk.vtkReverseSense()
# Choose a random subset of points.
maskPts = vtk.vtkMaskPoints()
maskPts.SetOnRatio(5)
maskPts.RandomModeOn()
if reverseNormals:
reverse.SetInputData(src)
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
maskPts.SetInputConnection(reverse.GetOutputPort())
else:
maskPts.SetInputData(src)
# Source for the glyph filter
arrow = vtk.vtkArrowSource()
arrow.SetTipResolution(16)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInputConnection(maskPts.GetOutputPort())
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(1)
glyph.SetColorModeToColorByVector()
glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
return glyph | 0bb28c943a2c371f5e536851208ac0d4b09cd51a | 1,060 |
def get_tags_categorys(self):
"""02返回添加文档的变量"""
tags = Tag.all()
categorys = Category.all()
return tags, categorys | 557e5182dd3dbf3571e005c4e105a20e2cdd3dd1 | 1,061 |
import pprint
import warnings
def single_mode_constant_rotation(**kwargs):
"""Return WaveformModes object a single nonzero mode, with phase proportional to time
The waveform output by this function will have just one nonzero mode. The behavior of that mode will be fairly
simple; it will be given by exp(i*omega*t). Note that omega can be complex, which gives damping.
Parameters
----------
s : int, optional
Spin weight of the waveform field. Default is -2.
ell, m : int, optional
The (ell, m) values of the nonzero mode in the returned waveform. Default value is (abs(s), -abs(s)).
ell_min, ell_max : int, optional
Smallest and largest ell values present in the output. Default values are abs(s) and 8.
data_type : int, optional
Default value is whichever psi_n corresponds to the input spin. It is important to choose these, rather than
`h` or `sigma` for the analytical solution to translations, which doesn't account for the direct contribution
of supertranslations (as opposed to the indirect contribution, which involves moving points around).
t_0, t_1 : float, optional
Beginning and end of time. Default values are -20. and 20.
dt : float, optional
Time step. Default value is 0.1.
omega : complex, optional
Constant of proportionality such that nonzero mode is exp(i*omega*t). Note that this can be complex, which
implies damping. Default is 0.5.
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
omega = complex(kwargs.pop("omega", 0.5))
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = np.exp(1j * omega * t)
if kwargs:
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
) | cc31bf0587ff397cb79c42863efd3d8173cddc72 | 1,063 |
def get_file(file_pattern: list, sub_type: str = None) -> list:
"""Get a subset from file patterns that belong to a sub-type.
If no sub-type is specified, return all file patterns.
Args:
file_pattern (list): The input file patterns
sub_type (str, optional): A string to search in file patterns. Defaults to None.
Raises:
ValueError: No file pattern matches the sub-type provided.
Returns:
list: A filtered sub list of file patterns.
"""
if sub_type is None:
return file_pattern
result = []
for entry in file_pattern:
if sub_type in entry:
result.append(entry)
if len(result) < 1:
raise ValueError(
"No file found for sub-type {}: {}".format(sub_type, file_pattern)
)
else:
return result | 7d39c05fa8a1f7a9370de459472ecf7070aa6569 | 1,064 |
def get_all_report_data(db):
"""
Gets all report data for pre report page
"""
query = r'SELECT * FROM report WHERE relevent=1 ORDER BY id DESC'
return db_get(db, query) | 727c4c9ec2125747237d40d7f0dd019b3d116d00 | 1,066 |
def find_center_projection(mat1, mat2, flip=True, chunk_height=None,
start_row=None, denoise=True, norm=False,
use_overlap=False):
"""
Find the center-of-rotation (COR) using projection images at 0-degree
and 180-degree based on a method in Ref. [1].
Parameters
----------
mat1 : array_like
2D array. Projection image at 0-degree.
mat2 : array_like
2D array. Projection image at 180-degree.
flip : bool, optional
Flip the 180-degree projection in the left-right direction if True.
chunk_height : int or float, optional
Height of the sub-area of projection images. If a float is given, it
must be in the range of [0.0, 1.0].
start_row : int, optional
Starting row used to extract the sub-area.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
cor : float
Center-of-rotation.
References
----------
.. [1] https://doi.org/10.1364/OE.418448
"""
(nrow, ncol) = mat1.shape
if flip is True:
mat2 = np.fliplr(mat2)
win_width = ncol // 2
if chunk_height is None:
chunk_height = int(0.1 * nrow)
if isinstance(chunk_height, float):
if 0.0 < chunk_height <= 1.0:
chunk_height = int(chunk_height * nrow)
else:
chunk_height = int(0.1 * nrow)
chunk_height = np.clip(chunk_height, 1, nrow - 1)
if start_row is None:
start = nrow // 2 - chunk_height // 2
elif start_row < 0:
start = nrow + start_row - chunk_height // 2
else:
start = start_row - chunk_height // 2
stop = start + chunk_height
start = np.clip(start, 0, nrow - chunk_height - 1)
stop = np.clip(stop, chunk_height, nrow - 1)
mat1_roi = mat1[start: stop]
mat2_roi = mat2[start: stop]
(overlap, side, _) = find_overlap(mat1_roi, mat2_roi, win_width, side=None,
denoise=denoise, norm=norm,
use_overlap=use_overlap)
if side == 0:
cor = overlap / 2.0 - 1.0
else:
cor = ncol - overlap / 2.0 - 1.0
return cor | 21661a6b9ed33a220ede918954ac18a420e638ae | 1,067 |
def parse_date(str):
"""
parsing given str
to date
"""
ymd = str.split('-')
return date(int(ymd[0]), int(ymd[1]), int(ymd[2])) | 29d0f79e2428e315c072c7801d927154c3bfee57 | 1,068 |
def mark_as_widget(view):
"""
Marks @view as a widget so we can later inspect that attribute, for
example, when hiding panels in _vi_enter_normal_mode.
Used prominently by '/', '?' and ':'.
XXX: This doesn't always work as we expect. For example, changing
settings to a panel created instants before does not make those
settings visible when the panel is activated. Investigate.
We still need this so that contexts will ignore widgets, though.
However, the fact that they are widgets should suffice to disable
Vim keys for them...
"""
view.settings().set('is_vintageous_widget', True)
return view | 965555660b82f834e09ba3ffc985755d4fd7fa66 | 1,069 |
def module_name(ctx, f):
"""Given Haskell source file path, turn it into a dot-separated module name.
module_name(
ctx,
"some-workspace/some-package/src/Foo/Bar/Baz.hs",
) => "Foo.Bar.Baz"
Args:
ctx: Rule context.
f: Haskell source file.
Returns:
string: Haskell module name.
"""
return _drop_extension(_rel_path_to_module(ctx, f).replace('/', '.')) | 77a38f62211a827ac8fe9af0cc36636b11e561d5 | 1,070 |
def store(key):
"""Gets the configured default store. The default is PickleStore
:return store: Store object
"""
global __stores
if __stores is None:
__stores = {}
if key not in __stores:
__stores[key] = __configuration[STORE](key)
return __stores[key] | 76197d8cedc44e15a75c81f1bcb07d3a4e59e021 | 1,071 |
def get_label_for_line(line, leg):
"""
Can't remember what I was using this for but seems useful to keep
"""
# leg = line.figure.legends[0]
# leg = line.axes.get_legend()
for h, t in zip(leg.legendHandles, leg.texts):
if h.get_label() == line.get_label():
return t.get_text() | 4180ae7fd7fe5b98ebafa20fbdf2528205e4ec31 | 1,072 |
def _node_parent_listener(target, value, oldvalue, initiator):
"""Listen for Node.parent being modified and update path"""
if value != oldvalue:
if value is not None:
if target._root != (value._root or value):
target._update_root(value._root or value)
target._update_path(newparent=value)
else:
# This node just got orphaned. It's a new root
target._update_root(target)
target._update_path(newparent=target)
return value | 06c06b144c777f33673e2051f1d4173204720f65 | 1,073 |
import torch
import typing
def sequential_to_momentum_net(module: torch.nn.Sequential,
split_dim=1,
coupling_forward: typing.Optional[typing.List[typing.Optional[typing.Callable]]] = None,
coupling_inverse: typing.Optional[typing.List[typing.Optional[typing.Callable]]] = None,
memory_mode: MemoryModes = MemoryModes.autograd_function,
target_device: str = "",
fused_optimizer: FUSED_OPTIMIZER = None,
residual: bool = False,
beta: float = 0.9) -> ReversibleSequential:
"""
Creates a sequential MomentumNet by unrolling a nn.Sequential module and dispatching to `momentum_net()`
:param module: An existing nn.Sequential module that should be converted into a ReversibleSequential module.
:param split_dim: RevNets require two streams. This parameter specifies which dimension to split in half to
create the two streams. `None` would mean the input gets replicated for both streams. It's usually best to split
along the features, which is why the default (1) is compatible with convolutions.
:param coupling_forward: RevNet uses y0 = (x0 + f(x1)) as a coupling function, but this allows you to set a
custom one. For example, MomentumNet (https://arxiv.org/abs/2102.07870) uses
y0 = (beta * x0 + (1 - beta) * f(x1)). The inputs to the coupling function are the residual stream and the
function output. For more information, look at the examples. default = revnet couplint
:param coupling_inverse: The inverse of the coupling function. default = revnet inverse
:param memory_mode: One of `MemoryModes`'s values. Some things are only supported in one mode while others
might only be supported in another. default = autograd function (highest coverage but spotty XLA support)
:param target_device: Specifies where the parameters should be moved to before computing the forward and
backward pass. This allows efficient CPU-offloading.
default = no offloading (keep parameters on the device they're on)
:param fused_optimizer: Allows an optimizer step to run while the model is computing its backward pass. This
means that the gradients don't have to be fully instantiated anymore and can improve speed when used with
cpu-offload due to asynchronous compute. It expects a function that generates an optimizer from a list of
parameters. (like Adam.__init__) default = no fused optimizer step
:param residual: Whether to "undo" a residual stream or not. Using y = f(x0) + x0 + x1 is generally not a good idea,
so this would subtract `x0` from y allowing you to patch existing residual modules without modifying their code.
:param beta: MomentumNet beta value that controls how much of the velocity stream is kept.
:return: Instantiated MomentumNet (instance of `ReversibleSequential`)
"""
return momentum_net(*maybe_residual_to_plain(module, residual), split_dim=split_dim,
coupling_forward=coupling_forward, coupling_inverse=coupling_inverse, memory_mode=memory_mode,
target_device=target_device, beta=beta, fused_optimizer=fused_optimizer) | 269d45cf845555988c3284a88a7e3ca83fb697b5 | 1,075 |
def user_view(request, name):
"""Render the view page for users"""
# argument is the login name, not the uuid in Cassandra
user = User.find(name)
if not user:
return redirect("users:home")
ctx = {
"req_user": request.user,
"user_obj": user,
"groups": [Group.find(gname) for gname in user.groups],
}
return render(request, "users/view.html", ctx) | f7f5bc01d2b60bcca048e0b2183eefcc5f4eb907 | 1,076 |
def grelha_nr_colunas(g):
"""
grelha_nr_colunas: grelha --> inteiro positivo
grelha_nr_colunas(g) devolve o numero de colunas da grelha g.
"""
return len(g[0]) | 740b06c186ad1455aecadfaf112f253fb434d5ff | 1,077 |
def rmsd(array_a, array_b):
"""
Calculate the RMSD between two 1d arrays
Parameters
----------
array_a, array_b : 1d numpy arrays
The arrays to be compared
Returns
-------
rmsd : float
The Root Mean Square Deviation of the elements of the array
"""
diff = array_a - array_b
diff2 = np.square(diff)
diff2_sum = np.sum(diff2)
norm_diff2_sum = diff2_sum/len(array_a)
rmsd = np.sqrt(norm_diff2_sum)
return rmsd | 7390cebff27d73bc9268cdc23e21c2d362bca2cc | 1,078 |
def readFile(sFile, sMode = 'rb'):
"""
Reads the entire file.
"""
oFile = open(sFile, sMode);
sRet = oFile.read();
oFile.close();
return sRet; | d44e8217ae7dcab1c826ccbbe80e066d76db31b5 | 1,079 |
def VI_cgivens_d( a, b):
"""
returns cos, sin, r
"""
c = vsip_cmplx_d(0.0,0.0)
s = vsip_cmplx_d(0.0,0.0)
r = vsip_cmplx_d(0.0,0.0)
am = vsip_cmag_d(a)
bm = vsip_cmag_d(b)
if am == 0.0:
r.r = b.r; r.i=b.i;
s.r = 1.0;
else:
scale = am + bm;
alpha = vsip_cmplx_d(a.r/am, a.i/am)
scalesq = scale * scale
norm = scale * sqrt((am*am)/scalesq + (bm * bm)/scalesq)
c.r =am/norm
s.r = (alpha.r * b.r + alpha.i * b.i)/norm
s.i = (-alpha.r * b.i + alpha.i * b.r)/norm
r.r = alpha.r * norm; r.i = alpha.i * norm
return (c,s,r) | 7ed08b3c583a805cd9a7b0dfcfb80eb67a054e1e | 1,080 |
import json
def documint_request_factory(request):
"""
Create a function that issues a request to a Documint endpoint.
Status codes outside the 2xx range are treated as errors. If error
responses are JSON then `DocumintError` is raised, otherwise
`MalformedDocumintError` is raised.
If the status code indicates success, the `IResponse` is returned.
"""
def _raise_error(data, response):
if content_type(response.headers) == b'application/json':
try:
causes = json.loads(data).get(u'causes', [])
raise DocumintError(
causes=[DocumintErrorCause(cause.get(u'type'),
cause.get(u'reason'),
cause.get(u'description'))
for cause in causes])
except ValueError:
pass
raise MalformedDocumintError(data)
def _check_status(response):
if 200 <= response.code < 300:
return response
d = response.content()
d.addCallback(_raise_error, response)
return d
def _request(*a, **kw):
d = request(*a, **kw)
d.addCallback(_check_status)
return d
return _request | 9dc4dcba0df1094c394dbe8d9424f874e3ac3169 | 1,081 |
def skip_for_tf2(f):
"""Decorator that skips tests when using TensorFlow 2."""
def test_wrapper(*args, **kwargs):
"""Wraps the decorated function to determine whether to skip."""
# Extract test case instance from args.
self = args[0]
try:
# If tf.contrib doesn't exist, we are in TF 2.0.
_ = tf.contrib
_ = tf.contrib.estimator.regression_head(
loss_reduction=tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE)
except (AttributeError, ImportError):
self.skipTest("Skipping test in TF 2.0.")
return f(*args, **kwargs)
return test_wrapper | 02059cc9c8e6b83ab49dcd3b69d447fa3ec26324 | 1,084 |
import yaml
def clean_logfile(logfile_lines,to_remove):
"""Remove yaml fields from a list of lines.
Removes from a set of lines the yaml_fields contained in the to_remove list.
Arguments:
logfile_lines (list): list of the lines of the logfile. Generated from a file by e.g. :py:meth:`~io.IOBase.readlines`.
to_remove (list): list of keys to remove from logfile_lines
Returns:
list of lines where the removed keys have as values the `"<folded>"` string
"""
line_rev=logfile_lines #list of the lines of the logfile
#loop in the reversed from (such as to parse by blocks)
extra_lines=20 #internal variable to be customized
line_rev.reverse()
#clean the log
cleaned_logfile=[]
removed=[]
#for line in line_rev: #line_iter:
while len(line_rev) >0:
line=line_rev.pop()
to_print=line
#check if the line contains interesting information
for remove_it in to_remove :
stream_list=[]
#line without comments
valid_line=line.split('#')[0]
spaces='nospace'
#control that the string between the key and the semicolon is only spaces
if remove_it in valid_line and ":" in valid_line:
#print "here",remove_it,remove_it in valid_line and ":" in valid_line,valid_line
starting_point=valid_line.find(remove_it)
tmp_buf=valid_line[:starting_point]
#find the closest comma to the staring point, if exists
tmp_buf=tmp_buf[::-1]
starting_comma=tmp_buf.find(',')
if starting_comma <0: st=0
tmp_buf=tmp_buf[st:]
tmp_buf=tmp_buf[::-1]
tmp_buf=tmp_buf.strip(' ')
#print "there",tmp_buf,'starting',starting_point,len(tmp_buf)
valid_line= valid_line[starting_point+len(remove_it):]
spaces= valid_line[1:valid_line.find(':')]
#if remove_it+':' in line.split('#')[0]:
if len(spaces.strip(' ')) == 0 and len(tmp_buf)==0: #this means that the key has been found
#creates a new Yaml document starting from the line
#treat the rest of the line following the key to be removed
header=''.join(line.split(':')[1:])
header=header.rstrip()+'\n'
#eliminate the anchor
header=header.lstrip(' ')
header=header.lstrip('*')
if len(header) > 0 :
stream_list.append(header)
#part to be printed, updated
to_print = line.split(':')[0] + ": <folded> \n"
#then check when the mapping will end:
while True:
#create a stream with extra_lines block
for i in range(0,min(extra_lines,len(line_rev))):
stream_list.append(line_rev.pop())
#create a stream to be parsed
stream=''.join(stream_list)
#then parse the stream until the last valid position has been found
try:
for i in yaml.parse(stream,Loader=yaml.CLoader):
endpos=i.end_mark.index
except Exception(e):
# print 'error',str(e),stream
#convert back the valid stream into a list
#if needed the stream can be loaded into a document
item_list=stream[:endpos].split('\n')
#if lengths are different there is no need to add lines
if len(item_list) != len(stream_list):
#last line might be shorter, therefore treat it separately
last_line=item_list.pop()
#purge the stream
for item in item_list:
stream_list.remove(item+'\n')
#extract the remaining line which should be compared with the last one
strip_size=len(last_line.rstrip())
if strip_size > 0:
first_line=stream_list.pop(0)[strip_size:]
if '*' in first_line or '&' in first_line:
first_line='' #eliminate anchors
else:
first_line=''
#then put the rest in the line to be treated
to_print.rstrip('\n')
to_print += first_line+'\n'
# the item has been found
break
stream_list.reverse()
#put back the unused part in the document
line_rev.extend(stream_list)
# mark that the key has been removed
if (remove_it not in removed):
removed.append(remove_it)
write('removed: ',remove_it)
# then print out the line
cleaned_logfile.append(to_print)
# check that everything has been removed, at least once
if (set(removed) != set(to_remove)):
write('WARNING, not all the requested items have been removed!')
write('To_remove : ',to_remove)
write('removed : ',removed)
write('Difference: ',list(set(to_remove) - set(removed) ))
return cleaned_logfile | 5e066584488230e777684fcf4e8d25784343afaf | 1,085 |
def no_red_sum(tokens):
"""Using import json is cheating, let's parse it ourselves in a sinlge pass. Hope you like stacks."""
sums = [0]
stack = []
is_red = False
for token in tokens:
if token == 'red' and not is_red and stack[-1] == '{':
is_red = True
sums[-1] = 0
stack.append('red')
elif token == '{':
sums.append(0)
stack.append('{')
elif token == '}':
last_sum = sums.pop()
sums[-1] += last_sum
if stack[-1] == 'red':
stack.pop()
is_red = False
stack.pop()
elif token == '[':
stack.append('[')
sums.append(0)
elif token == ']':
stack.pop()
last_sum = sums.pop()
sums[-1] += last_sum
elif not is_red:
sums[-1] += neg_safe_cast(token)
assert len(sums) == 1
return sums.pop() | 7945618bcc76c03b457cacf4f995e767d5b6160c | 1,086 |
def get_all_projects():
"""
Return a list with all the projects (open and closed).
"""
return gazu.project.all_projects() | 7279d46e9049f3ff9802dcc93b8e41b2e118c9a2 | 1,087 |
def install(opts):
"""
Install one or more resources.
"""
resources = _load(opts.resources, opts.output_dir)
if opts.all:
opts.resource_names = ALL
success = _install(resources, opts.resource_names, opts.mirror_url,
opts.destination, opts.skip_top_level)
if success:
if not opts.quiet:
print("All resources successfully installed")
return 0
else:
if not opts.quiet:
invalid = _invalid(resources, opts.resource_names)
print("Unable to install some resources: {}".format(', '.join(invalid)))
return 1 | 9487490eb9ccb13ce7f9797defacf823161a60a9 | 1,088 |
import torch
def seq2seq_att(mems, lengths, state, att_net=None):
"""
:param mems: [B, T, D_mem] This are the memories.
I call memory for this variable because I think attention is just like read something and then
make alignments with your memories.
This memory here is usually the input hidden state of the encoder.
:param lengths: [B]
:param state: [B, D_state]
I call state for this variable because it's the state I percepts at this time step.
:param att_net: This is the attention network that will be used to calculate the alignment score between
state and memories.
input of the att_net is mems and state with shape:
mems: [exB, D_mem]
state: [exB, D_state]
return of the att_net is [exB, 1]
So any function that map a vector to a scalar could work.
:return: [B, D_result]
"""
d_state = state.size(1)
if not att_net:
return state
else:
batch_list_mems = []
batch_list_state = []
for i, l in enumerate(lengths):
b_mems = mems[i, :l] # [T, D_mem]
batch_list_mems.append(b_mems)
b_state = state[i].expand(b_mems.size(0), d_state) # [T, D_state]
batch_list_state.append(b_state)
packed_sequence_mems = torch.cat(batch_list_mems, 0) # [sum(l), D_mem]
packed_sequence_state = torch.cat(batch_list_state, 0) # [sum(l), D_state]
align_score = att_net(packed_sequence_mems, packed_sequence_state) # [sum(l), 1]
# The score grouped as [(a1, a2, a3), (a1, a2), (a1, a2, a3, a4)].
# aligned_seq = packed_sequence_mems * align_score
start = 0
result_list = []
for i, l in enumerate(lengths):
end = start + l
b_mems = packed_sequence_mems[start:end, :] # [l, D_mems]
b_score = align_score[start:end, :] # [l, 1]
softed_b_score = F.softmax(b_score.transpose(0, 1)).transpose(0, 1) # [l, 1]
weighted_sum = torch.sum(b_mems * softed_b_score, dim=0, keepdim=False) # [D_mems]
result_list.append(weighted_sum)
start = end
result = torch.stack(result_list, dim=0)
return result | 992fa8329443a2505c6ff0d83e9c34e69be620d4 | 1,089 |
def convert_for_webkit(new_path, filename, reference_support_info, host=Host()):
""" Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.
Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
contents = host.filesystem.read_binary_file(filename)
converter = _W3CTestConverter(new_path, filename, reference_support_info, host)
if filename.endswith('.css'):
return converter.add_webkit_prefix_to_unprefixed_properties(contents.decode('utf-8'))
else:
converter.feed(contents.decode('utf-8'))
converter.close()
return converter.output() | 098774b42f9086b1b61dc231318731ab7eb1a998 | 1,090 |
import re
def clean_repeated_symbols(text):
"""
Filters text, replacing symbols repeated more than twice (not allowed
in most languages) with a single repetition of the symbol.
:param text: the text to be filtered
:type: str
:return: the filtered text
:type: str
"""
pattern = re.compile(r"(.)\1{2,}", re.DOTALL)
return pattern.sub(r"\1\1", text) | bfa758994cfae716caaa715d5a990416a300f9d9 | 1,092 |
def sample(x,y, numSamples):
"""
gives numSamples samples from the distribution funciton fail
parameters
"""
y /= y.sum()
return np.random.choice(x, size=numSamples, replace=True, p=y) | 4cfbb6977bcd5fa43f27de40b15beff487f1c071 | 1,093 |
def make_path_strictly_increase(path):
"""
Given a warping path, remove all rows that do not
strictly increase from the row before
"""
toKeep = np.ones(path.shape[0])
i0 = 0
for i in range(1, path.shape[0]):
if np.abs(path[i0, 0] - path[i, 0]) >= 1 and np.abs(path[i0, 1] - path[i, 1]) >= 1:
i0 = i
else:
toKeep[i] = 0
return path[toKeep == 1, :] | 1a5043bdb469c9dd3f9bf57e1b9752ebd8567182 | 1,094 |
def set_group_selector(*args):
"""set_group_selector(sel_t grp, sel_t sel) -> int"""
return _idaapi.set_group_selector(*args) | 1fbf3807791bf94511f4c7da52278db2815c757e | 1,096 |
def data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_latency_characteristictraffic_property_name_get(uuid, node_uuid, node_rule_group_uuid, traffic_property_name): # noqa: E501
"""data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_latency_characteristictraffic_property_name_get
returns tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param node_uuid: Id of node
:type node_uuid: str
:param node_rule_group_uuid: Id of node-rule-group
:type node_rule_group_uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:rtype: TapiTopologyLatencyCharacteristic
"""
return 'do some magic!' | 5f0aff58f5f5e7f72f6622fdb8a400b03f6aae15 | 1,097 |
def getPendingReviewers(db, review):
"""getPendingReviewers(db, review) -> dictionary
Returns a dictionary, like the ones returned by getReviewersAndWatchers(), but
with details about remaining unreviewed changes in the review. Changes not
assigned to a reviewer are handled the same way."""
cursor = db.cursor()
cursor.execute("""SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file
FROM reviewfiles
LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'""",
(review.id,))
reviewers = {}
for user_id, changeset_id, file_id in cursor.fetchall():
reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)
return reviewers | 869a6bb752c4e7c1e40a0000b3aceb62adc28ce1 | 1,098 |
import base64
def base64_encode_string(string):
# type: (str or bytes) -> str
"""Base64 encode a string
:param str or bytes string: string to encode
:rtype: str
:return: base64-encoded string
"""
if on_python2():
return base64.b64encode(string)
else:
return str(base64.b64encode(string), 'ascii') | 0c13ca527171fecdbc5eb93376c6019c0b95e2b7 | 1,099 |
def get_error_signature(error_type, n_top, **kwargs):
"""Generates a signature for the specified settings of pose error
calculation.
:param error_type: Type of error.
:param n_top: Top N pose estimates (with the highest score) to be evaluated
for each object class in each image.
:return: Generated signature.
"""
error_sign = "error:" + error_type + "_ntop:" + str(n_top)
if error_type == "vsd":
if kwargs["vsd_tau"] == float("inf"):
vsd_tau_str = "inf"
else:
vsd_tau_str = "{:.3f}".format(kwargs["vsd_tau"])
error_sign += "_delta:{:.3f}_tau:{}".format(kwargs["vsd_delta"], vsd_tau_str)
return error_sign | 82036a650862a7b3a6b55493458ff3b7dc6cd2ff | 1,100 |
import re
def clean_text_from_multiple_consecutive_whitespaces(text):
"""Cleans the text from multiple consecutive whitespaces, by replacing these with a single whitespace."""
multi_space_regex = re.compile(r"\s+", re.IGNORECASE)
return re.sub(multi_space_regex, ' ', text) | f25b27da070d6a984012a4cb5b1ae4a477713033 | 1,101 |
import re
def run(filename):
"""
MUST HAVE FUNCTION!
Begins the plugin processing
Returns a list of endpoints
"""
run_results = set()
r_rule = re.compile(r"(Route\(\"[^,)]+)", flags=re.IGNORECASE)
for line in filename:
try:
route_match = r_rule.search(line)
if route_match:
run_results.add(route_match.group(1)[7:-1])
except Exception:
# Print the offending line the BurpSuite's extension Output tab
print("Error! Couldn't parse: %s" % line)
return list(run_results) | e5ad233e3c3e07769b2f8f61657fa712b1f151c4 | 1,102 |
import re
from unittest.mock import patch
async def setup_script(hass, notify_q, notify_q2, now, source, config=None):
"""Initialize and load the given pyscript."""
conf_dir = hass.config.path(FOLDER)
file_contents = {f"{conf_dir}/hello.py": source}
Function.hass = None
mock_open = MockOpen()
for key, value in file_contents.items():
mock_open[key].read_data = value
def isfile_side_effect(arg):
return arg in file_contents
def glob_side_effect(path, recursive=None):
result = []
path_re = path.replace("*", "[^/]*").replace(".", "\\.")
path_re = path_re.replace("[^/]*[^/]*/", ".*")
for this_path in file_contents:
if re.match(path_re, this_path):
result.append(this_path)
return result
if not config:
config = {DOMAIN: {CONF_ALLOW_ALL_IMPORTS: True}}
with patch("custom_components.pyscript.os.path.isdir", return_value=True), patch(
"custom_components.pyscript.glob.iglob"
) as mock_glob, patch("custom_components.pyscript.global_ctx.open", mock_open), patch(
"custom_components.pyscript.trigger.dt_now", return_value=now
), patch(
"custom_components.pyscript.open", mock_open
), patch(
"homeassistant.config.load_yaml_config_file", return_value=config
), patch(
"custom_components.pyscript.install_requirements", return_value=None,
), patch(
"custom_components.pyscript.watchdog_start", return_value=None
), patch(
"custom_components.pyscript.os.path.getmtime", return_value=1000
), patch(
"custom_components.pyscript.global_ctx.os.path.getmtime", return_value=1000
), patch(
"custom_components.pyscript.os.path.isfile"
) as mock_isfile:
mock_isfile.side_effect = isfile_side_effect
mock_glob.side_effect = glob_side_effect
assert await async_setup_component(hass, "pyscript", config)
#
# I'm not sure how to run the mock all the time, so just force the dt_now()
# trigger function to return the given list of times in now.
#
def return_next_time():
nonlocal now
if isinstance(now, list):
if len(now) > 1:
return now.pop(0)
return now[0]
return now
trigger.__dict__["dt_now"] = return_next_time
if notify_q or notify_q2:
async def state_changed(event):
var_name = event.data["entity_id"]
if var_name == "pyscript.done":
value = event.data["new_state"].state
if notify_q:
await notify_q.put(value)
if var_name == "pyscript.done2":
value = event.data["new_state"].state
if notify_q2:
await notify_q2.put(value)
hass.bus.async_listen(EVENT_STATE_CHANGED, state_changed) | d1d194af7686cbf5bb5e61ebc692d7fd7e9aae71 | 1,103 |
def get_assignment_grade_summaries(course_id):
""" return a list of a course's assignments with a grade summary for each
https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_assignments """
assignments = api.get_list('courses/{}/analytics/assignments'.format(course_id))
return [] if 'errors' in assignments else assignments | 69dddeee4389ee457201c3d1195537f869d0ea57 | 1,104 |
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return sorted(
[
desc for (_, desc) in XModuleDescriptor.load_classes()
] + XBLOCK_CLASSES,
key=str
) | e19b7957b3a65495e1d0fb7c33b4b2748bc1473f | 1,105 |
def e3p0(tof,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10):
"""
Background function for TOF spectra
Parameters
----------
tof : array-like
The time-of-flight spectrum
p1 : float
constant background
p2 : float
multiplier on 1st exponential
p3 : float
multiplier on time-of-flight in 1st exponent
p4 : float
constant added to 1st exponent
p5-p10 : float
(see equation in notes)
Returns
-------
e3p0 : array-like
The function in the length of t (see notes)
Notes
-----
.. math:: f(t) = p1 + p2e^{p3t+p4} + p5e^{p6t+p7} + p8e^{p9t+p10}
"""
return p1 + p2*np.exp(p3*tof+p4) + p5*np.exp(p6*tof+p7) + p8*np.exp(p9*tof+p10) | 0ad3aad94c4e8b5f48a6ec4458329d0de6eda612 | 1,106 |
def dice_coefficient(pred, gt):
"""
Computes dice coefficients between two masks
:param pred: predicted masks - [0 ,1]
:param gt: ground truth masks - [0 ,1]
:return: dice coefficient
"""
d = (2 * np.sum(pred * gt) + 1) / ((np.sum(pred) + np.sum(gt)) + 1)
return d | d1d97b749ce365c6181a2b17c41d946195339c96 | 1,107 |
def get_keep_score(source_counts, prediction_counts, target_counts):
"""Compute the keep score (Equation 5 in the paper)."""
source_and_prediction_counts = source_counts & prediction_counts
source_and_target_counts = source_counts & target_counts
true_positives = sum((source_and_prediction_counts & source_and_target_counts).values())
selected = sum(source_and_prediction_counts.values())
relevant = sum(source_and_target_counts.values())
return _get_fbeta_score(true_positives, selected, relevant) | ce2d94f3ffc353a3a9843f5c0b6a846608efe962 | 1,108 |
def dict_comparator(first_dict, second_dict):
"""
Функция проверяет на совпадение множеств пар ключ-значение для двух словарей
Возвращает True в случае совпадения, иначе False
"""
if set(first_dict.keys()) != set(second_dict.keys()):
return False
for key, value in first_dict.items():
if value != second_dict[key]:
return False
return True | 47f28e8810b8437cc0e3bfca6ccba6734c988890 | 1,110 |
def word_check(seq1,seq2,word):
"""Returns False and aborts if seq2 contains a substring of seq1 of length word. Returns True otherwise"""
for i in range(len(seq1)-word+1):
if seq2.find(seq1[i:i+word])>-1: return seq2.find(seq1[i:i+word])
return -1 | 86b4cad571fdbf55073f30f9c5fd9a5e25da46d7 | 1,111 |
def plasma_parameter(N_particles, N_grid, dx):
"""
Estimates the plasma parameter as the number of particles per step.
Parameters
----------
N_particles : int, float
Number of physical particles
N_grid : int
Number of grid cells
dx : float
grid step size
"""
return (N_particles / N_grid) * dx | 51d3b96ccba2689db461fd6117cb5c2961dc3812 | 1,112 |
import torch
def get_ious_and_iou_loss(inputs,
targets,
weight=None,
loss_type="iou",
reduction="none"):
"""
Compute iou loss of type ['iou', 'giou', 'linear_iou']
Args:
inputs (tensor): pred values
targets (tensor): target values
weight (tensor): loss weight
box_mode (str): 'xx' or 'lr', 'lr' is currently supported.
loss_type (str): 'giou' or 'iou' or 'linear_iou'
reduction (str): reduction manner
Returns:
loss (tensor): computed iou loss.
"""
# box_mode = "lr"
inputs = torch.cat((-inputs[..., :1], inputs[..., 1:]), dim=-1)
targets = torch.cat((-targets[..., :1], targets[..., 1:]), dim=-1)
eps = torch.finfo(torch.float32).eps
inputs_area = (inputs[..., 1] - inputs[..., 0]).clamp_(min=0)
targets_area = (targets[..., 1] - targets[..., 0]).clamp_(min=0)
w_intersect = (torch.min(inputs[..., 1], targets[..., 1])
- torch.max(inputs[..., 0], targets[..., 0])).clamp_(min=0)
area_intersect = w_intersect
area_union = targets_area + inputs_area - area_intersect
ious = area_intersect / area_union.clamp(min=eps)
if loss_type == "iou":
loss = -ious.clamp(min=eps).log()
elif loss_type == "linear_iou":
loss = 1 - ious
elif loss_type == "giou":
g_w_intersect = torch.max(inputs[..., 1], targets[..., 1]) \
- torch.min(inputs[..., 0], targets[..., 0])
ac_uion = g_w_intersect
gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
loss = 1 - gious
else:
raise NotImplementedError
if weight is not None:
loss = loss * weight.view(loss.size())
if reduction == "mean":
loss = loss.sum() / max(weight.sum().item(), eps)
else:
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
return ious, loss | 302fb70c888caf33cf0077b553cd0d055ff4003a | 1,113 |
def load_chembl():
"""Downloads a small subset of the ChEMBL dataset.
Returns
-------
ic50_train: sparse matrix
sparse train matrix
ic50_test: sparse matrix
sparse test matrix
feat: sparse matrix
sparse row features
"""
# load bioactivity and features
ic50 = load_one("chembl-IC50-346targets.mm")
feat = load_one("chembl-IC50-compound-feat.mm")
## creating train and test sets
ic50_train, ic50_test = make_train_test(ic50, 0.2)
return (ic50_train, ic50_test, feat) | f9c5017ab7892f7fbf6c3ee1a1dd9da0e322f66f | 1,114 |
import re
def validate_name_dynamotable(table_name):
"""Validate if table name matches DynamoDB naming standards."""
if not isinstance(table_name, str):
ValueError('Input argument \"name\" must a string')
if table_name.__len__() < 3 or table_name.__len__() > (255 - 5):
# note: deduct 5 chars to allow postfix space (e.g. for .lock)
return (False, 'TableName should be of length: [3-255]')
if not re.match(r'^[a-zA-Z0-9]', table_name):
return (False, 'BucketName should start with a lowercase letter or number')
if re.search(r'[-\._]{2}', table_name):
return (False, 'TableName can\'t contain two special characters [-, ., _] in a row')
if not re.match(r'^[-a-zA-Z0-9\._]*$', table_name):
return (False, re.sub(' +', ' ', 'TableName contains invalid character. \
Allowed characters: [a-z, A-Z, 0-9, \'.\', \'-\', \'_\']'))
return (True, 'Success') | 139391e3ece6cacae24d5bd72fd0fd77b65ecc41 | 1,115 |
def delete_item(item_id):
"""
The method deletes item with the provided id.
:param item_id: id of the item to be deleted
:return: http response
"""
try:
if DATA_CONTROLLER.delete_bucketlist_item(item_id):
return make_response("", 200)
else:
return make_response("", 404)
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response | 95e0bb38d30cbda6d617bb5f396dba4cfd4ef328 | 1,116 |
def import_from_text_file(filename, defaultExt, readDataFcn, verbose=False):
"""
Opens a given text file and reads data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
readDataFcn : callable
the function to read data from the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the readDataFcn
"""
return _open_file(filename, defaultExt, 'r', readDataFcn, verbose) | 4f5602e09d02446ce9770656e4b42df5dd018ccd | 1,117 |
def is_template_definition(metric_name):
"""Return if the given metric name is a template definition by
convention."""
fields = metric_name.split('/')
return fields[0].lower() == TEMPLATE_DEFINITION_PREFIX | da5fb191cf451b542a656c352d64258be74f7710 | 1,118 |
def _cm_ramp_points_and_voltages(abf):
"""
Return [points, voltages] if the sweep contains a ramp suitable for
capacitance calculation using a matching doward and upward ramp.
points is a list of 3 numbers depicting index values important to this
ramp. The first number is the index at the start of the downward ramp, the
second is the index of its nadir, and the third is the index where it
returns to the original level.
voltages is a list of 2 numbers: voltage before and during the ramp.
"""
assert isinstance(abf, pyabf.ABF)
if abf.sweepUnitsY != "pA":
raise Exception("must be in voltage clamp configuration")
for i, p1 in enumerate(abf.sweepEpochs.p1s):
if i == 0:
continue
# ensure this sweep and the last are both ramps
if abf.sweepEpochs.types[i] != "Ramp":
continue
if abf.sweepEpochs.types[i-1] != "Ramp":
continue
# ensure the levels are different
if abf.sweepEpochs.levels[i] == abf.sweepEpochs.levels[i-1]:
continue
ptStart = abf.sweepEpochs.p1s[i-1]
ptTransition = abf.sweepEpochs.p1s[i]
ptEnd = abf.sweepEpochs.p2s[i]
points = [ptStart, ptTransition, ptEnd]
voltageBefore = abf.sweepEpochs.levels[i-1]
voltageDuring = abf.sweepEpochs.levels[i]
voltages = [voltageBefore, voltageDuring]
return [points, voltages]
return None | c73b5f5cbc44c0794b332f6010864e9f25fcff0c | 1,119 |
def single_model_embeddings_specify(single_model_embeddings):
"""Returns an instance of MultiTaskLSTMCRF initialized with the default configuration file,
loaded embeddings and single specified model."""
single_model_embeddings.specify()
return single_model_embeddings | fe23c571ca29dbbf87cbccdbfc1e11aaaf784c01 | 1,120 |
import bz2
import gzip
import json
def load_json(filename):
"""
Load a JSON file that may be .bz2 or .gz compressed
"""
if '.bz2' in filename:
with bz2.open(filename, 'rt') as infile:
return json.load(infile)
elif '.gz' in filename:
with gzip.open(filename, 'rt') as infile:
return json.load(infile)
else:
with open(filename, 'rt') as infile:
return json.load(infile) | 1b985db386e85c3b8e87911d89a7652133bfee7b | 1,121 |
def get_future_contracts(underlying_symbol, date=None):
"""
获取某期货品种在策略当前日期的可交易合约标的列表
:param security 期货合约品种,如 ‘AG’(白银)
:return 某期货品种在策略当前日期的可交易合约标的列表
"""
assert underlying_symbol, "underlying_symbol is required"
dt = to_date_str(date)
return JQDataClient.instance().get_future_contracts(underlying_symbol=underlying_symbol, dt=dt) | 9945c897c643e410f8a127da5b77525d6e3ba28c | 1,122 |
import urllib3
import requests
def rodeo_query(fc, pallet): # 3.5-4 seconds for 150 elem
"""
Get pd DataFrame with info from rodeo about pallet/tote in TS Out.
:param fc: str
:param pallet: Pallet or Tote are accepted.
:return: df or "No data was found" if status_code = 200, "There was an error while connecting to {url}"
otherwise.
"""
url = f"https://rodeo-dub.amazon.com/{fc}/Search?_enabledColumns=on&enabledColumns=ASIN_TITLES&enabledColumns" \
f"=FC_SKU&enabledColumns=OUTER_SCANNABLE_ID&&searchKey={pallet} "
urllib3.disable_warnings() # prevent warnings for unverified request
print(COLOR + "Downloading manifested pallet's content from Rodeo.")
with requests.Session() as req:
resp = req.get(url,
timeout=30,
verify=False,
allow_redirects=True,
auth=HTTPKerberosAuth(mutual_authentication=OPTIONAL))
if resp.status_code == 200:
data = pd.read_html(resp.text, flavor=None, header=0, parse_dates=["Need To Ship By Date"])
if data is not None and len(data[0]) > 0:
df = pd.concat(data, sort=False)
df = df.drop(columns='Unnamed: 0')
return df
else:
return f"No data was found at {url}\nPlease check that {pallet} is correct.\nIf the error persists, " \
f"please check Rodeo status for your FC: {fc}."
else:
# return resp.raise_for_status() # to see error
return f"There was an error while connecting to {url}" | 926a9f42b5ed82128d4e5fae4adc2c74dab3e567 | 1,123 |
import itertools
def plan_to_joint_configuration(robot, qgoal, pname='BiRRT', max_iters=20,
max_ppiters=40, try_swap=False):
"""
Plan a trajectory to the given `qgoal` configuration.
Parameters
----------
robot: orpy.Robot
The OpenRAVE robot
qgoal: array_like
The goal configuration
pname: str
Name of the planning algorithm. Available options are: `BasicRRT`, `BiRRT`
max_iters: float
Maximum iterations for the planning stage
max_ppiters: float
Maximum iterations for the post-processing stage. It will use a parabolic
smoother wich short-cuts the trajectory and then smooths it
try_swap: bool
If set, will compute the direct and reversed trajectory. The minimum
duration trajectory is used.
Returns
-------
traj: orpy.Trajectory
Planned trajectory. If plan fails, this function returns `None`.
"""
qstart = robot.GetActiveDOFValues()
env = robot.GetEnv()
planner = orpy.RaveCreatePlanner(env, pname)
params = orpy.Planner.PlannerParameters()
params.SetMaxIterations(max_iters)
if max_ppiters > 0:
params.SetPostProcessing('ParabolicSmoother',
'<_nmaxiterations>{0}</_nmaxiterations>'.format(max_ppiters))
else:
params.SetPostProcessing('', '')
# Plan trajectory
best_traj = None
min_duration = float('inf')
reversed_is_better = False
count = 0
for qa, qb in itertools.permutations([qstart, qgoal], 2):
count += 1
with robot:
robot.SetActiveDOFValues(qa)
params.SetGoalConfig(qb)
params.SetRobotActiveJoints(robot)
initsuccess = planner.InitPlan(robot, params)
if initsuccess:
traj = orpy.RaveCreateTrajectory(env, '')
status = planner.PlanPath(traj) # Plan the trajectory
if status == orpy.PlannerStatus.HasSolution:
duration = traj.GetDuration()
if duration < min_duration:
min_duration = duration
best_traj = orpy.RaveCreateTrajectory(env, traj.GetXMLId())
best_traj.Clone(traj, 0)
if count == 2:
reversed_is_better = True
if not try_swap:
break
# Check if we need to reverse the trajectory
if reversed_is_better:
best_traj = orpy.planningutils.ReverseTrajectory(best_traj)
return best_traj | 78bf727bede2d886ba93825e3a0cd8ccaa99f57e | 1,124 |
def _get_texinfo(data):
"""Return the texture information of a texture data.
Arguments:
* data: the texture data as an array.
Returns:
* texinfo: a dictionary with the information related to the texture data.
"""
assert data.ndim == 3
size = data.shape[:2]
if size[0] == 1:
ndim = 1
elif size[0] > 1:
ndim = 2
ncomponents = data.shape[2]
return dict(size=size, ndim=ndim, ncomponents=ncomponents) | 9dfa3b88e65b4c7b7eaa60149f4f24381b36e762 | 1,125 |
def set_featured_notebooks(notebook_ids): # noqa: E501
"""set_featured_notebooks
:param notebook_ids: Array of notebook IDs to be featured.
:type notebook_ids: List[str]
:rtype: None
"""
update_multiple(ApiNotebook, [], "featured", False)
if notebook_ids:
update_multiple(ApiNotebook, notebook_ids, "featured", True)
return None, 200 | 7add2e120bf803cf8fa36c0fa56c854654c447fa | 1,126 |
def speed_to_cadences(bicycle, speed, digits=None):
"""
Return cadences in hertz (revolutions per second).
Speed is measured in kilometers per hour.
Assume the following bicycle attributes are non-null and non-empty:
- front_cogs
- rear_cogs
- crank_length
- rear_wheel
Raise a ``ValueError``, if that is not the case.
EXAMPLES::
>>> w = Wheel(diameter=600)
>>> b = Bicycle(front_cogs=[40], rear_cogs=[20, 30], crank_length=100, rear_wheel=w)
>>> speed_to_cadences(b, 18.1, digits=1)
{(40, 30): 2.0, (40, 20): 1.3}
"""
b = bicycle
attrs = ['front_cogs', 'rear_cogs', 'crank_length', 'rear_wheel']
check_attrs(b, *attrs)
check_attrs(b.rear_wheel, 'diameter')
gr = gain_ratios(b)
result = {}
for (k, g) in gr.items():
result[k] = speed/(2*pi*b.crank_length*g*(3600/1e6))
if digits is not None:
result = {k: round(v, digits) for k, v in result.items()}
return result | 358343831e341f49facd8b2c0af940ee765083aa | 1,127 |
import hashlib
def _gen_version(fields):
"""Looks at BotGroupConfig fields and derives a digest that summarizes them.
This digest is going to be sent to the bot in /handshake, and bot would
include it in its state (and thus send it with each /poll). If server detects
that the bot is using older version of the config, it would ask the bot
to restart.
Args:
fields: dict with BotGroupConfig fields (without 'version').
Returns:
A string that going to be used as 'version' field of BotGroupConfig tuple.
"""
# Just hash JSON representation (with sorted keys). Assumes it is stable
# enough. Add a prefix and trim a bit, to clarify that is it not git hash or
# anything like that, but just a dumb hash of the actual config.
digest = hashlib.sha256(utils.encode_to_json(fields)).hexdigest()
return 'hash:' + digest[:14] | a4bd4420ce548f8a0c40f3120c119f89c158a371 | 1,128 |
def pay_and_save_financing(req: request, request_json, account_id):
"""Set up the financing statement, pay if there is an account id, and save the data."""
# Charge a fee.
token: dict = g.jwt_oidc_token_info
statement = FinancingStatement.create_from_json(request_json, account_id, token.get('username', None))
invoice_id = None
registration = statement.registration[0]
pay_trans_type, fee_quantity = resource_utils.get_payment_type_financing(registration)
pay_ref = None
if not is_reg_staff_account(account_id):
pay_account_id: str = account_id if not is_sbc_office_account(account_id) else None
payment = Payment(jwt=jwt.get_token_auth_header(),
account_id=pay_account_id,
details=resource_utils.get_payment_details_financing(registration))
pay_ref = payment.create_payment(pay_trans_type, fee_quantity, None, registration.client_reference_id)
else:
payment_info = resource_utils.build_staff_registration_payment(req, pay_trans_type, fee_quantity)
payment = Payment(jwt=jwt.get_token_auth_header(),
account_id=None,
details=resource_utils.get_payment_details_financing(registration))
pay_ref = payment.create_payment_staff_registration(payment_info, registration.client_reference_id)
invoice_id = pay_ref['invoiceId']
registration.pay_invoice_id = int(invoice_id)
registration.pay_path = pay_ref['receipt']
# Try to save the financing statement: failure throws an exception.
try:
statement.save()
except Exception as db_exception: # noqa: B902; handle all db related errors.
current_app.logger.error(SAVE_ERROR_MESSAGE.format(account_id, 'financing', repr(db_exception)))
if account_id and invoice_id is not None:
current_app.logger.info(PAY_REFUND_MESSAGE.format(account_id, 'financing', invoice_id))
try:
payment.cancel_payment(invoice_id)
except SBCPaymentException as cancel_exception:
current_app.logger.error(PAY_REFUND_ERROR.format(account_id, 'financing', invoice_id,
repr(cancel_exception)))
raise db_exception
return statement | 0de221d2e6acb090e2e2a135b1280f2daa73b63c | 1,130 |
def resolve_cmds_path(cmds, singlesrv_mode):
"""Resolve the cmds path if in single server mode.
Args:
cmds: A list of sender/receiver commands.
singlesrv_mode: A bool on whether running in single server mode.
Returns:
The commands that path has been resolved if needed
(in single server mode).
"""
if not singlesrv_mode:
return cmds
r_cmds = []
for cmd in cmds:
r_cmds.append(_resolve_binary_path_for_timed_cmd(cmd))
return r_cmds | 6d7e673a48c657a446785716fc09f47d4f87d81d | 1,131 |
import base64
def _encode_base64(data: str) -> str:
"""Base 64 encodes a string."""
ebytes = base64.b64encode(data.encode("utf-8"))
estring = str(ebytes, "utf-8")
return estring | 5304972fec4cc54d9fa652cbd977b7c069d228d5 | 1,132 |
from typing import Mapping
from typing import Any
def workflow_spec(
dag: DAG,
workflow: Workflow,
) -> Mapping[str, Any]:
"""
Return a minimal representation of a WorkflowSpec for the supplied DAG and metadata.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#workflowspec
Parameters
----------
dag
The DAG to generate the spec for
workflow
The configuration for this workflow
Raises
------
ValueError
If any of the extra_spec_options collides with a property used by the runtime.
"""
validate_parameters(inputs=dag.inputs, params=workflow.params)
spec = {
"entrypoint": BASE_DAG_NAME,
"templates": _templates(
node=dag,
container_image=workflow.container_image,
container_command=workflow.container_entrypoint_to_dag_cli,
params=workflow.params,
),
}
if workflow.params:
spec["arguments"] = _workflow_spec_arguments(workflow.params)
spec = with_extra_spec_options(
original=spec,
extra_options=workflow.extra_spec_options,
context="the Workflow spec",
)
return spec | d13b7242f3158ea7528141ca65a17df01968c14e | 1,133 |
def redirect(request):
"""
Handling what happens when the groupcode is submitted by user and handles input from user's when they are answering
questions.
:param request:
:return: The methods returns the student view page which is the actual game to the user if they entered a correct
groupcode, it will also return messages when user's are answering questions in the quiz telling them if the answers
are correct or not
"""
"""handling what happens when the groupcode is entered and submitted aswell as the question logic"""
global score
global num
map_check = False
# Below is to check if whether the button is for groupcode or answer to question
# process the group code passed from the landing page
if request.method == 'POST' and 'submit-groupcode' in request.POST:
# Get inputted groupcode from the user
groupcode = str(request.POST.get('groupCode'))
# if the group code exists, load the treasure hunt page with the correct questions
if Gamecode.objects.filter(groupcode=groupcode).exists():
#Below is for question loading and getting question informations
questionNum = Gamecode.objects.get(groupcode=groupcode)
mapCheck = questionNum.map
routeID = questionNum.routeID_id
num = questionNum.questionNum
score = questionNum.score
# Get question by using the question number the group is currently on
info = Questions.objects.filter(node_num=int(num),routeID=routeID)
# Add group code into user's session
request.session['groupcode'] = groupcode
# Add score into user's session
request.session['score'] = score
# Add routeID into user's session
request.session['routeID'] = routeID
#To show the correct map for the user to go to when the join the game after a question is answered but the
# map check is not yet done
if num >1:
print(num)
#set map value to the previous question
num -=1
print(num)
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
#Return number to the correct question number
num +=1
else:
latest_question = Questions.objects.get(node_num=num , routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
return render(request, 'app/studentview.html',{"groupcode":groupcode, "data":info, "id":id, "score":score,"map_check":mapCheck,"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name})
# otherwise show an error message
else:
print("Wrong")
messages.error(request, 'The game code does not exist')
return render(request, 'app/index.html')
# if an answer to question is submitted, check if it is correct
if request.method == 'POST' and 'submit-question' in request.POST:
# Get routeID from user's session
routeID = request.session['routeID']
# Get groupcode from user's session
groupcode = request.session['groupcode']
# Get text from the input answer box
data = str(request.POST.get('answer'))
# Retrieve the current question the group is on from the database
questionNum = Gamecode.objects.get(groupcode=groupcode)
# if answer is correct for the current node, move onto the next question if it exists,
# otherwise show they have finished the quiz
if Questions.objects.filter(answers__icontains=data.strip(), node_num=int(num), routeID=routeID).exists():
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
map_check = "True"
# Add 1 to the counter so the questions moves on to the next one
num += 1
# Check whether if the user is on the last question
if Questions.objects.filter(node_num=int(num), routeID=routeID).exists():
score += 3
questionNum.map = map_check
questionNum.questionNum = num
questionNum.score = score
questionNum.save()
print(location)
info = Questions.objects.filter(node_num=num, routeID=routeID)
messages.success(request, 'Correct!') #Generate message saying correct
return render(request, 'app/studentview.html',{"groupcode":groupcode,"data":info,"id":id,
"score":score, "map_check":map_check,
"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name})
# Case when the user is on the last question
else:
# To make sure user stays on the last question
num -=1
questionNum.questionNum = num
questionNum.map = map_check
questionNum.save()
info = Questions.objects.filter(node_num=num,routeID=routeID)
# Generate message when user finish the quiz
messages.success(request, 'You have finished the quiz, well done!')
# Return the information back to user's view
return render(request, 'app/studentview.html', {"groupcode":groupcode,"data":info,"id":id,
"score":score, "map_check":map_check,
"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name,"Finished":"True"})
# Case when user gets the answer wrong
else:
info = Questions.objects.filter(node_num=num, routeID=routeID)
# Return incorrect message
messages.error(request, 'That is the wrong answer, please try again')
# Return the information back to user's view
return render(request, 'app/studentview.html', {"groupcode": groupcode, "data": info, "id": id,"score":score})
# Case when user refreshes the page during the game
if 'groupcode' in request.session:
# Retrieve information about the questions
groupcode = request.session['groupcode']
routeID = request.session['routeID']
questionNum = Gamecode.objects.get(groupcode=groupcode)
num = questionNum.questionNum
mapcheck = questionNum.map
# Get question from the database using num counter
info = Questions.objects.filter(node_num=int(num), routeID=routeID)
if num > 1:
print(num)
# set map value to the previous question
num -= 1
print(num)
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
# Return number to the correct question number
num += 1
else:
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
# Return the information back to user's view
return render(request, 'app/studentview.html',
{"groupcode": groupcode, "data": info, "id": id, "score": score, "map_check": mapcheck,
"location": location, "longtitude": longtitude,
"latitude": latitude, "answer": place_name})
else:
# Redirect user back to start page
return render(request, 'app/index.html') | 5f906dae9bde9533b5b09bd5540b8458a766e583 | 1,134 |
import torch
def build_test_fn(policy, optim, log_dir, model_name, train_collector, save_train_buffer, obs_shape, stack_num, env_id,
num_episodes):
""" Build custom test function for maze world environment """
def custom_test_fn(epoch, env_step):
# Save agent
print(f"Epoch = {epoch}")
torch.save({'model': policy.state_dict(), 'optim': optim.state_dict()},
log_dir + model_name + f'_epoch{epoch}.pth')
if save_train_buffer:
train_collector.buffer.save_hdf5(f'{log_dir}/epoch{epoch}_train_buffer.hdf5')
# Record agent`s performance in video
policy.eval()
test_env = envpool.make_gym(env_id, num_envs=1, seed=0, episodic_life=False, reward_clip=True, stack_num=4,
gray_scale=False, img_height=160, img_width=160)
collector = ts.data.Collector(policy, test_env, exploration_noise=True)
record.collect_and_record(collector, n_episode=num_episodes // 2, obs_shape=obs_shape, stack_num=stack_num,
log_dir=log_dir, epoch=epoch, starting_episode=0)
collector = ts.data.Collector(policy, test_env, exploration_noise=False)
record.collect_and_record(collector, n_episode=num_episodes // 2, obs_shape=obs_shape, stack_num=stack_num,
log_dir=log_dir, epoch=epoch, starting_episode=num_episodes // 2)
return custom_test_fn | 6fefef23e1a502ce30f556ea9350d933e7303dfd | 1,135 |
def check_score(encoding, min_qual, qual_str):
"""Return True if the average quality score is at least min_qual
"""
qscores = [encoding[q] for q in qual_str]
return sum(qscores) >= min_qual * len(qscores) | 427dd8617d5ab425e3b7989923a271599fc7371a | 1,136 |
import functools
import warnings
def add_unsafe_warning(func, fig):
"""
Generate warning if not supported by Paxplot
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if fig._show_unsafe_warning:
warnings.warn(
f'The function you have called ({func.__name__}) is not '
'officially supported by Paxplot, but it may still work. '
'Report issues to '
'https://github.com/kravitsjacob/paxplot/issues',
Warning
)
return func(*args, **kwargs)
return wrapper | 8bca3fbc514315cd4c761b2e8f7f1168e01af7a9 | 1,137 |
from typing import List
from typing import Optional
def update_dask_partitions_shuffle(
ddf: dd.DataFrame,
table: str,
secondary_indices: List[str],
metadata_version: int,
partition_on: List[str],
store_factory: StoreFactoryType,
df_serializer: DataFrameSerializer,
dataset_uuid: str,
num_buckets: int,
sort_partitions_by: Optional[str],
bucket_by: List[str],
) -> da.Array:
"""
Perform a dataset update with dask reshuffling to control partitioning.
The shuffle operation will perform the following steps
1. Pack payload data
Payload data is serialized and compressed into a single byte value using
``distributed.protocol.serialize_bytes``, see also ``pack_payload``.
2. Apply bucketing
Hash the column subset ``bucket_by`` and distribute the hashes in
``num_buckets`` bins/buckets. Internally every bucket is identified by an
integer and we will create one physical file for every bucket ID. The
bucket ID is not exposed to the user and is dropped after the shuffle,
before the store. This is done since we do not want to guarantee at the
moment, that the hash function remains stable.
3. Perform shuffle (dask.DataFrame.groupby.apply)
The groupby key will be the combination of ``partition_on`` fields and the
hash bucket ID. This will create a physical file for every unique tuple
in ``partition_on + bucket_ID``. The function which is applied to the
dataframe will perform all necessary subtask for storage of the dataset
(partition_on, index calc, etc.).
4. Unpack data (within the apply-function)
After the shuffle, the first step is to unpack the payload data since
the follow up tasks will require the full dataframe.
5. Pre storage processing and parquet serialization
We apply important pre storage processing like sorting data, applying
final partitioning (at this time there should be only one group in the
payload data but using the ``MetaPartition.partition_on`` guarantees the
appropriate data structures kartothek expects are created.).
After the preprocessing is done, the data is serialized and stored as
parquet. The applied function will return an (empty) MetaPartition with
indices and metadata which will then be used to commit the dataset.
Returns
-------
A dask.Array holding relevant MetaPartition objects as values
"""
if ddf.npartitions == 0:
return ddf
group_cols = partition_on.copy()
if num_buckets is None:
raise ValueError("``num_buckets`` must not be None when shuffling data.")
meta = ddf._meta
meta[_KTK_HASH_BUCKET] = np.uint64(0)
ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)
group_cols.append(_KTK_HASH_BUCKET)
packed_meta = ddf._meta[group_cols]
packed_meta[_PAYLOAD_COL] = b""
unpacked_meta = ddf._meta
ddf = pack_payload(ddf, group_key=group_cols)
ddf = ddf.groupby(by=group_cols)
ddf = ddf.apply(
partial(
_store_partition,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
table=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
unpacked_meta=unpacked_meta,
),
meta=("MetaPartition", "object"),
)
return ddf | d7f050247d89997ec76d00cb6e2e7ed25a7b24fb | 1,138 |
def edit_paycheck(paycheck_id):
""" Edit a paycheck """
paycheck = Paycheck.query.get(paycheck_id)
form = PaycheckForm(obj=paycheck)
return render_template('pay/edit_paycheck.jinja', form=form, paycheck_id=paycheck_id) | 9e8a22af102bc818c35c32d49b5a26c348b0f221 | 1,139 |
def is_meeting_approved(meeting):
"""Returns True if the meeting is approved"""
if meeting.session_set.first().status.slug == 'apprw':
return False
else:
return True | 0dca106890d195f613477334d2bb6187c1587e15 | 1,140 |
import requests
def check_radarr():
"""
Connects to an instance of Radarr and returns a tuple containing the instances status.
Returns:
(str) an instance of the Status enum value representing the status of the service
(str) a short descriptive string representing the status of the service
"""
try:
req = requests.get('{}/api/system/status?apikey={}'.format(paths['Radarr'], keys['Radarr']), timeout=0.2)
req.raise_for_status()
except (requests.ConnectionError, requests.HTTPError, requests.Timeout):
return Status.ERROR.value, "NoAPI"
try:
data = req.json()
except ValueError:
return Status.ERROR.value, "BadJSON"
if data['version']:
return Status.ACTIVE.value, "Online"
else:
return Status.ERROR.value, "BadAPI" | f078ba526e0fb23dad323db92e9f6ac861da4bf0 | 1,142 |