content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def remove_tags(text, which_ones=(), keep=(), encoding=None):
""" Remove HTML Tags only.
`which_ones` and `keep` are both tuples, there are four cases:
============== ============= ==========================================
``which_ones`` ``keep`` what it does
============== ============= ==========================================
**not empty** empty remove all tags in ``which_ones``
empty **not empty** remove all tags except the ones in ``keep``
empty empty remove all tags
**not empty** **not empty** not allowed
============== ============= ==========================================
Remove all tags:
>>> import w3lib.html
>>> doc = '<div><p><b>This is a link:</b> <a href="http://www.example.com">example</a></p></div>'
>>> w3lib.html.remove_tags(doc)
u'This is a link: example'
>>>
Keep only some tags:
>>> w3lib.html.remove_tags(doc, keep=('div',))
u'<div>This is a link: example</div>'
>>>
Remove only specific tags:
>>> w3lib.html.remove_tags(doc, which_ones=('a','b'))
u'<div><p>This is a link: example</p></div>'
>>>
You can't remove some and keep some:
>>> w3lib.html.remove_tags(doc, which_ones=('a',), keep=('p',))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/w3lib/html.py", line 101, in remove_tags
assert not (which_ones and keep), 'which_ones and keep can not be given at the same time'
AssertionError: which_ones and keep can not be given at the same time
>>>
"""
assert not (which_ones and keep), 'which_ones and keep can not be given at the same time'
def will_remove(tag):
if which_ones:
return tag in which_ones
else:
return tag not in keep
def remove_tag(m):
tag = m.group(1)
return u'' if will_remove(tag) else m.group(0)
regex = '</?([^ >/]+).*?>'
retags = re.compile(regex, re.DOTALL | re.IGNORECASE)
return retags.sub(remove_tag, str_to_unicode(text, encoding)) | 3aa9bcab068c35245df022c7b09652a016d5070f | 0 |
def count_char(char, word):
"""Counts the characters in word"""
return word.count(char)
# If you want to do it manually try a for loop | 363222f4876c5a574a84fe14214760c505e920b0 | 1 |
def run_on_folder_evaluate_model(folder_path, n_imgs=-1, n_annotations=10):
"""
Runs the object detector on folder_path, classifying at most n_imgs images and manually asks the user if n_annotations crops are correctly classified
This is then used to compute the accuracy of the model
If all images are supposed to be used then set n_imgs to <= 0
"""
return runOnAllFramesInFolder(folder_path, "", False, True, n_imgs, n_annotations) | 0c546194a76598d645cfc3cb7dc5fa1fc854aeca | 2 |
def get_sos_model(sample_narratives):
"""Return sample sos_model
"""
return {
'name': 'energy',
'description': "A system of systems model which encapsulates "
"the future supply and demand of energy for the UK",
'scenarios': [
'population'
],
'narratives': sample_narratives,
'sector_models': [
'energy_demand',
'energy_supply'
],
'scenario_dependencies': [
{
'source': 'population',
'source_output': 'population_count',
'sink': 'energy_demand',
'sink_input': 'population'
}
],
'model_dependencies': [
{
'source': 'energy_demand',
'source_output': 'gas_demand',
'sink': 'energy_supply',
'sink_input': 'natural_gas_demand'
}
]
} | 885c251b8bbda2ebc5a950b083faed35c58f41cc | 3 |
from typing import Dict
from pathlib import Path
from typing import Tuple
import codecs
def generate_gallery_md(gallery_conf, mkdocs_conf) -> Dict[Path, Tuple[str, Dict[str, str]]]:
"""Generate the Main examples gallery reStructuredText
Start the mkdocs-gallery configuration and recursively scan the examples
directories in order to populate the examples gallery
Returns
-------
md_files_toc : Dict[str, Tuple[str, Dict[str, str]]]
A map of galleries src folders to title and galleries toc (map of title to path)
md_to_src_file : Dict[str, Path]
A map of posix absolute file path to generated markdown example -> Path of the src file relative to project root
"""
logger.info('generating gallery...') # , color='white')
# gallery_conf = parse_config(app) already done
seen_backrefs = set()
md_files_toc = dict()
md_to_src_file = dict()
# a list of pairs "gallery source" > "gallery dest" dirs
all_info = AllInformation.from_cfg(gallery_conf, mkdocs_conf)
# Gather all files except ignored ones, and sort them according to the configuration.
all_info.collect_script_files()
# Check for duplicate filenames to make sure linking works as expected
files = all_info.get_all_script_files()
check_duplicate_filenames(files)
check_spaces_in_filenames(files)
# For each gallery,
all_results = []
for gallery in all_info.galleries:
# Process the root level
title, root_nested_title, index_md, results = generate(gallery=gallery, seen_backrefs=seen_backrefs)
write_computation_times(gallery, results)
# Remember the results so that we can write the final summary
all_results.extend(results)
# Fill the md-to-srcfile dict
md_to_src_file[gallery.index_md_rel_site_root.as_posix()] = gallery.readme_file_rel_project
for res in results:
md_to_src_file[res.script.md_file_rel_site_root.as_posix()] = res.script.src_py_file_rel_project
# Create the toc entries
root_md_files = {res.script.title: res.script.md_file_rel_site_root.as_posix() for res in results}
root_md_files = dict_to_list_of_dicts(root_md_files)
if len(gallery.subsections) == 0:
# No subsections: do not nest the gallery examples further
md_files_toc[gallery.generated_dir] = (title, root_md_files)
else:
# There are subsections. Find the root gallery title if possible and nest the root contents
subsection_tocs = [{(root_nested_title or title): root_md_files}]
md_files_toc[gallery.generated_dir] = (title, subsection_tocs)
# Create an index.md with all examples
index_md_new = _new_file(gallery.index_md)
with codecs.open(str(index_md_new), 'w', encoding='utf-8') as fhindex:
# Write the README and thumbnails for the root-level examples
fhindex.write(index_md)
# If there are any subsections, handle them
for subg in gallery.subsections:
# Process the root level
sub_title, _, sub_index_md, sub_results = generate(gallery=subg, seen_backrefs=seen_backrefs)
write_computation_times(subg, sub_results)
# Remember the results so that we can write the final summary
all_results.extend(sub_results)
# Fill the md-to-srcfile dict
for res in sub_results:
md_to_src_file[res.script.md_file_rel_site_root.as_posix()] = res.script.src_py_file_rel_project
# Create the toc entries
sub_md_files = {res.script.title: res.script.md_file_rel_site_root.as_posix() for res in sub_results}
sub_md_files = dict_to_list_of_dicts(sub_md_files)
# Both append the subsection contents to the parent gallery toc
subsection_tocs.append({sub_title: sub_md_files})
# ... and also have an independent reference in case the subsection is directly referenced in the nav.
md_files_toc[subg.generated_dir] = (sub_title, sub_md_files)
# Write the README and thumbnails for the subgallery examples
fhindex.write(sub_index_md)
# Finally generate the download buttons
if gallery_conf['download_all_examples']:
download_fhindex = generate_zipfiles(gallery)
fhindex.write(download_fhindex)
# And the "generated by..." signature
if gallery_conf['show_signature']:
fhindex.write(MKD_GLR_SIG)
# Remove the .new suffix and update the md5
index_md = _replace_by_new_if_needed(index_md_new, md5_mode='t')
_finalize_backreferences(seen_backrefs, all_info)
if gallery_conf['plot_gallery']:
logger.info("computation time summary:") # , color='white')
lines, lens = _format_for_writing(all_results, kind='console')
for name, t, m in lines:
text = (' - %s: ' % (name,)).ljust(lens[0] + 10)
if t is None:
text += '(not run)'
logger.info(text)
else:
t_float = float(t.split()[0])
if t_float >= gallery_conf['min_reported_time']:
text += t.rjust(lens[1]) + ' ' + m.rjust(lens[2])
logger.info(text)
# Also create a junit.xml file if needed for rep
if gallery_conf['junit'] and gallery_conf['plot_gallery']:
write_junit_xml(all_info, all_results)
return md_files_toc, md_to_src_file | 766d6b371b6a2930c546ccc191a00c1eb3009dc1 | 4 |
from typing import Union
from typing import TextIO
from typing import List
import yaml
def load_all_yaml(stream: Union[str, TextIO], context: dict = None, template_env = None) -> List[AnyResource]:
"""Load kubernetes resource objects defined as YAML. See `from_dict` regarding how resource types are detected.
Returns a list of resource objects or raise a `LoadResourceError`.
**parameters**
* **stream** - A file-like object or a string representing a yaml file or a template resulting in
a yaml file.
* **context** - When is not `None` the stream is considered a `jinja2` template and the `context`
will be used during templating.
* **template_env** - `jinja2` template environment to be used for templating. When absent a standard
environment is used.
**NOTE**: When using the template functionality (setting the context parameter), the dependency
module `jinja2` need to be installed.
"""
if context is not None:
stream = _template(stream, context=context, template_env=template_env)
res = []
for obj in yaml.safe_load_all(stream):
res.append(from_dict(obj))
return res | b3be3b7eb82987849657165e67603b3c701e69cc | 5 |
from typing import Optional
from typing import Dict
def parse_gridspec(s: str, grids: Optional[Dict[str, GridSpec]] = None) -> GridSpec:
"""
"africa_10"
"epsg:6936;10;9600"
"epsg:6936;-10x10;9600x9600"
"""
if grids is None:
grids = GRIDS
named_gs = grids.get(_norm_gridspec_name(s))
if named_gs is not None:
return named_gs
return _parse_gridspec_string(s) | 4c0cc7dc8237a8232a8fb8d86109172d92678535 | 6 |
def make_quantile_normalizer(dist):
"""Returns f(a) that converts to the quantile value in each col.
dist should be an array with bins equally spaced from 0 to 1, giving
the value in each bin (i.e. cumulative prob of f(x) at f(i/len(dist))
should be stored in dist[i]) -- can generate from distribution or generate
empirically.
"""
def qn(a):
result = (quantiles(a)*len(dist)).astype('i')
return take(dist, result)
return qn | 395314821be4349d0c5a3b13058db0d498b03ab5 | 7 |
def text():
"""
Route that allows user to send json with raw text of title and body. This
route expects a payload to be sent that contains:
{'title': "some text ...",
'body': "some text ....}
"""
# authenticate the request to make sure it is from a trusted party
verify_token(request)
# pre-process data
title = request.json['title']
body = request.json['body']
data = app.inference_wrapper.process_dict({'title':title, 'body':body})
LOG.warning(f'prediction requested for {str(data)}')
# make prediction: you can only return strings with api
# decode with np.frombuffer(request.content, dtype='<f4')
return app.inference_wrapper.get_pooled_features(data['text']).detach().numpy().tostring() | 7bf4a602d603508894c8f86b2febc6d2a6e8e3c3 | 8 |
import yaml
def rbac_show_users(tenant=None):
"""show rbac"""
tstr = " -tenant=%s " % (tenant) if tenant else ""
rc = run_command("%s user-role -op list-user-roles %s" % (
g_araalictl_path, tstr), result=True, strip=False,
debug=False)
assert rc[0] == 0, rc[1]
return yaml.load(rc[1], yaml.SafeLoader) | f52faf92498d267453ed46a8a67cf7ec73acad9f | 9 |
def RPL_ENDOFINFO(sender, receipient, message):
""" Reply Code 374 """
return "<" + sender + ">: " + message | 02fc0ef666caf7921e4f4a78a908686fd3dded17 | 10 |
def combined_score(data, side_effect_weights=None):
"""
Calculate a top-level score for each episode.
This is totally ad hoc. There are infinite ways to measure the
performance / safety tradeoff; this is just one pretty simple one.
Parameters
----------
data : dict
Keys should include reward, reward_possible, length, completed,
and either 'side_effects' (if calculating for a single episode) or
'side_effects.<effect-type>' (if calculating from a log of many
episodes).
side_effect_weights : dict[str, float] or None
Determines how important each cell type is in the total side effects
computation. If None, uses 'side_effect.total' instead.
"""
reward = data['reward'] / np.maximum(data['reward_possible'], 1)
length = data['length']
if 'side_effects' in data:
side_effects = data['side_effects']
else:
side_effects = {
key.split('.')[1]: np.nan_to_num(val) for key, val in data.items()
if key.startswith('side_effects.')
}
if side_effect_weights:
total = sum([
weight * np.array(side_effects.get(key, 0))
for key, weight in side_effect_weights.items()
], np.zeros(2))
else:
total = np.array(side_effects.get('total', [0,0]))
agent_effects, inaction_effects = total.T
side_effects_frac = agent_effects / np.maximum(inaction_effects, 1)
if len(reward.shape) > len(side_effects_frac.shape): # multiagent
side_effects_frac = side_effects_frac[..., np.newaxis]
# Speed converts length ∈ [0, 1000] → [1, 0].
speed = 1 - length / 1000
# Note that the total score can easily be negative!
score = 75 * reward + 25 * speed - 200 * side_effects_frac
return side_effects_frac, score | 9d0161f67de99f10e9d4900114ecf12462fac542 | 11 |
def volatile(func):
"""Wrapper for functions that manipulate the active database."""
def inner(self, *args, **kwargs):
ret = func(self, *args, **kwargs)
self.refresh()
self.modified_db = True
return ret
return inner | bbd8107ecc6a2b36e3677254d2b26f4ef77c3eb3 | 12 |
def input_risk_tolerance():
"""
This allows the user to enter and edit their risk tolerance.
"""
if g.logged_in is True:
if g.inputs is True:
risk_tolerance_id = m_session.query(model.User).filter_by(
id=g.user.id).first().risk_profile_id
risk_tolerance = m_session.query(model.RiskProfile).filter_by(
id=risk_tolerance_id).first().name
else:
risk_tolerance = 0
return render_template(
"input_risk_tolerance.html", risk_tolerance=risk_tolerance)
else:
return redirect("/login") | 09f9ae246beb8e9a9e901e141e11c29e594cb9c7 | 13 |
from typing import Optional
import requests
async def get_stream_apps(
start: Optional[int] = 0, # pylint: disable=unsubscriptable-object
size: Optional[int] = 10, # pylint: disable=unsubscriptable-object
):
"""
Get all streaming applications.
start: Start index of the applications
size: Number of sessions to fetch
"""
conf = get_config()
livyConf = conf["livy"]
url = livyConf["url"] + "/batches"
parameters = {
"from": start,
"size": size,
}
headers = createLivyCommonHeaders()
livyResp = requests.get(url,
params=parameters,
headers=headers,
timeout=get_global_request_timeout(),
)
livyRespJson = livyResp.json()
if livyResp.status_code != status.HTTP_200_OK:
raise HTTPException(
status_code=livyResp.status_code,
detail=livyRespJson
)
livyRespJson["start"] = livyRespJson["from"]
resp = LivyGetBatchResp.parse_obj(livyRespJson)
fastapi_logger.debug(resp)
return resp | 03dd724b346c9952a91763f79ceab21eab323557 | 14 |
def check_context(model, sentence, company_name):
"""
Check if the company name in the sentence is actually a company name.
:param model: the spacy model.
:param sentence: the sentence to be analysed.
:param company_name: the name of the company.
:return: True if the company name means a company/product.
"""
doc = model(sentence)
for t in doc.ents:
if t.lower_ == company_name: #if company name is called
if t.label_ == "ORG" or t.label_ == "PRODUCT": #check they actually mean the company
return True
return False | 993c27924844b7cd0c570a9ce5fa404ef6d29b97 | 15 |
def getItemSize(dataType):
"""
Gets the size of an object depending on its data type name
Args:
dataType (String): Data type of the object
Returns:
(Integer): Size of the object
"""
# If it's a vector 6, its size is 6
if dataType.startswith("VECTOR6"):
return 6
# If it,s a vector 3, its size is 6
elif dataType.startswith("VECTOR3"):
return 3
# Else its size is only 1
return 1 | 2ab9c83bef56cd8dbe56c558d123e24c9da6eb0e | 16 |
def replace_symbol_to_no_symbol(pinyin):
"""把带声调字符替换为没有声调的字符"""
def _replace(match):
symbol = match.group(0) # 带声调的字符
# 去掉声调: a1 -> a
return RE_NUMBER.sub(r'', PHONETIC_SYMBOL_DICT[symbol])
# 替换拼音中的带声调字符
return RE_PHONETIC_SYMBOL.sub(_replace, pinyin) | a4c3d1a91fedf20016fb4c8b671326ad8cac008c | 17 |
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.elbow import elbow
from pyclustering.cluster.kmeans import kmeans_visualizer
def elbow_kmeans_optimizer(X, k = None, kmin = 1, kmax = 5, visualize = True):
"""k-means clustering with or without automatically determined cluster numbers.
Reference: https://pyclustering.github.io/docs/0.8.2/html/d3/d70/classpyclustering_1_1cluster_1_1elbow_1_1elbow.html
# Arguments:
X (numpy array-like): Input data matrix.
kmin: Minimum number of clusters to consider. Defaults to 1.
kmax: Maximum number of clusters to consider. Defaults to 5.
visualize: Whether to perform k-means visualization or not.
# Returns:
numpy arraylike: Clusters.
numpy arraylike: Cluster centers.
"""
if k is not None:
amount_clusters = k
else:
elbow_instance = elbow(X, kmin, kmax)
elbow_instance.process()
amount_clusters = elbow_instance.get_amount()
wce = elbow_instance.get_wce()
centers = kmeans_plusplus_initializer(X, amount_clusters).initialize()
kmeans_instance = kmeans(X, centers)
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
centers = kmeans_instance.get_centers()
kmeans_visualizer.show_clusters(X, clusters, centers)
return clusters, centers | 53fe501367e85c3d345d0bebdfdccff17e8b93db | 18 |
import time
def FloatDateTime():
"""Returns datetime stamp in Miro's REV_DATETIME format as a float,
e.g. 20110731.123456"""
return float(time.strftime('%Y%m%d.%H%M%S', time.localtime())) | 115aef9104124774692af1ba62a48a5423b9dc2a | 19 |
def xyz_to_rgb(xyz):
"""
Convert tuple from the CIE XYZ color space to the sRGB color space.
Conversion is based on that the XYZ input uses an the D65 illuminate with a 2° observer angle.
https://en.wikipedia.org/wiki/Illuminant_D65
The inverse conversion matrix used was provided by Bruce Lindbloom:
http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
Formulas for conversion:
http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html
https://easyrgb.com/en/math.php
Information about respective color space:
sRGB (standard Red Green Blue): https://en.wikipedia.org/wiki/SRGB
CIE XYZ: https://en.wikipedia.org/wiki/CIE_1931_color_space
"""
x = xyz[0] / 100.0
y = xyz[1] / 100.0
z = xyz[2] / 100.0
r = x * 3.2404542 + y * -1.5371385 + z * -0.4985314
g = x * -0.9692660 + y * 1.8760108 + z * 0.0415560
b = x * 0.0556434 + y * -0.2040259 + z * 1.0572252
r = _pivot_xyz_to_rgb(r)
g = _pivot_xyz_to_rgb(g)
b = _pivot_xyz_to_rgb(b)
r = r * 255.0
g = g * 255.0
b = b * 255.0
return r, g, b | 0c227f7d0ead08afdd0a3dd7946d45ad0cae011b | 20 |
import numbers
def _score(estimator, X_test, y_test, scorer, is_multimetric=False):
"""Compute the score(s) of an estimator on a given test set.
Will return a single float if is_multimetric is False and a dict of floats,
if is_multimetric is True
"""
if is_multimetric:
return _multimetric_score(estimator, X_test, y_test, scorer)
else:
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) "
"instead. (scorer=%r)"
% (str(score), type(score), scorer))
return score | 1b3c136098e625968664518940769678d978aca4 | 21 |
import functools
def asynchronous(datastore=False, obj_store=False, log_store=False):
"""Wrap request handler methods with this decorator if they will require asynchronous
access to DynamoDB datastore or S3 object store for photo storage.
If datastore=True, then a DynamoDB client is available to the handler as self._client. If
obj_store=True, then an S3 client for the photo storage bucket is available as self._obj_store.
If log_store is true, then an S3 client for the user log storage bucket is available as
self._log_store
Like tornado.web.asynchronous, this decorator disables the auto-finish functionality.
"""
def _asynchronous(method):
def _wrapper(self, *args, **kwargs):
"""Disables automatic HTTP response completion on exit."""
self._auto_finish = False
if datastore:
self._client = DBClient.Instance()
if obj_store:
self._obj_store = ObjectStore.GetInstance(ObjectStore.PHOTO)
if log_store:
self._log_store = ObjectStore.GetInstance(ObjectStore.USER_LOG)
with util.ExceptionBarrier(self._stack_context_handle_exception):
return method(self, *args, **kwargs)
return functools.wraps(method)(_wrapper)
return _asynchronous | 2bef0ba95993a4114ecb28b99a2952e2d269b54a | 22 |
def get_translatable_models():
"""
Get the translatable models according to django-modeltranslation
!! only use to migrate from django-modeltranslation !!
"""
_raise_if_not_django_modeltranslation()
return translator.get_registered_models() | b22ca513d3d29dfc7c2d3502cabdcf95e2e4bce9 | 23 |
def schedule_dense_arm_cpu(attrs, inputs, out_type, target):
"""dense arm cpu strategy"""
strategy = _op.OpStrategy()
isa = arm_isa.IsaAnalyzer(target)
if isa.has_dsp_support:
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.arm_cpu.schedule_dense_dsp),
name="dense_dsp",
)
else:
strategy.add_implementation(
wrap_compute_dense(
topi.nn.dense, need_auto_scheduler_layout=is_auto_scheduler_enabled()
),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic",
)
return strategy | 45b800ceecc14dd62734159d05baa8273cc4c3ff | 24 |
def default_select(identifier, all_entry_points): # pylint: disable=inconsistent-return-statements
"""
Raise an exception when we have ambiguous entry points.
"""
if len(all_entry_points) == 0:
raise PluginMissingError(identifier)
elif len(all_entry_points) == 1:
return all_entry_points[0]
elif len(all_entry_points) > 1:
raise AmbiguousPluginError(all_entry_points) | 331ca0108f05e97fcbec95e40111ca6eb5aa835b | 25 |
import json
def read_prediction_dependencies(pred_file):
"""
Reads in the predictions from the parser's output file.
Returns: two String list with the predicted heads and dependency names, respectively.
"""
heads = []
deps = []
with open(pred_file, encoding="utf-8") as f:
for line in f:
j = json.loads(line)
heads.extend(j["predicted_heads"])
deps.extend(j["predicted_dependencies"])
heads = list(map(str, heads))
return heads, deps | c8280c861d998d0574fb831cd9738b733fd53388 | 26 |
def add_new_ingredient(w, ingredient_data):
"""Adds the ingredient into the database """
combobox_recipes = generate_CBR_names(w)
combobox_bottles = generate_CBB_names(w)
given_name_ingredient_data = DB_COMMANDER.get_ingredient_data(ingredient_data["ingredient_name"])
if given_name_ingredient_data:
DP_HANDLER.standard_box("Dieser Name existiert schon in der Datenbank!")
return ""
DB_COMMANDER.insert_new_ingredient(
ingredient_data["ingredient_name"],
ingredient_data["alcohollevel"],
ingredient_data["volume"],
ingredient_data["hand_add"]
)
if not ingredient_data["hand_add"]:
DP_HANDLER.fill_multiple_combobox(combobox_recipes, [ingredient_data["ingredient_name"]])
DP_HANDLER.fill_multiple_combobox(combobox_bottles, [ingredient_data["ingredient_name"]])
return f"Zutat mit dem Namen: <{ingredient_data['ingredient_name']}> eingetragen" | ea0cbc371502d84223aeec5c18e2f19a020e229a | 27 |
def detect_entities(_inputs, corpus, threshold=None):
"""
Détecte les entités nommées sélectionnées dans le corpus donné en argument.
:param _inputs: paramètres d'entrainement du modèle
:param corpus: corpus à annoter
:param threshold: seuils de détection manuels. Si la probabilité d'une catégorie dépasse ce seuil, on prédit cette
catégorie meme si elle ne correspond pas à la probabilité maximale.
:return: corpus avec prédictions sur la nature des entités
"""
# Initialisation de la classe de pseudonymisation et entrainement du modèle.
ner = Ner(_inputs)
corpus_with_labels = ner.predict_with_model(corpus, threshold)
return corpus_with_labels | 1d7dc2ef42a9961daee6260c9fb6b9b2f099e96f | 28 |
def request_video_count(blink):
"""Request total video count."""
url = "{}/api/v2/videos/count".format(blink.urls.base_url)
return http_get(blink, url) | d847d840892908a66f99fae95b91e78b8ddc7dcb | 29 |
def version():
"""Return a ST version. Return 0 if not running in ST."""
if not running_in_st():
return 0
return int(sublime.version()) | d4f51b0a91301a8cdadff126931e7f0e72b8c850 | 30 |
import os, urllib
def getFile(path):
"""
Obtain a PDB file. First check the path given on the command
line - if that file is not available, obtain the file from the
PDB webserver at http://www.rcsb.org/pdb/ .
Parameters
path: Name of PDB file to obtain (string)
Returns
file: File object containing PDB file (file object)
"""
file = None
if not os.path.isfile(path):
URLpath = "http://www.rcsb.org/pdb/cgi/export.cgi/" + path + \
".pdb?format=PDB&pdbId=" + path + "&compression=None"
file = urllib.urlopen(URLpath)
else:
file = open(path)
return file | aef83d09ead21719211e3ddbbfe3ce10eae48f15 | 31 |
def get_intervention(action, time):
"""Return the intervention in the simulator required to take action."""
action_to_intervention_map = {
0: Intervention(time=time, epsilon_1=0.0, epsilon_2=0.0),
1: Intervention(time=time, epsilon_1=0.0, epsilon_2=0.3),
2: Intervention(time=time, epsilon_1=0.7, epsilon_2=0.0),
3: Intervention(time=time, epsilon_1=0.7, epsilon_2=0.3),
}
return action_to_intervention_map[action] | 11c145efc3eb9e7bafc05943294232c161b59952 | 32 |
def draw_labeled_bboxes(img, labels):
"""
Draw the boxes around detected object.
"""
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
return img | 7bf9a3a5a54a41c49845408d5e04dc5de67eea6c | 33 |
import os
def image_create(request, **kwargs):
"""Create image.
:param kwargs:
* copy_from: URL from which Glance server should immediately copy
the data and store it in its configured image store.
* data: Form data posted from client.
* location: URL where the data for this image already resides.
In the case of 'copy_from' and 'location', the Glance server
will give us a immediate response from create and handle the data
asynchronously.
In the case of 'data' the process of uploading the data may take
some time and is handed off to a separate thread.
"""
data = kwargs.pop('data', None)
location = kwargs.pop('location', None)
image = glanceclient(request).images.create(**kwargs)
if location is not None:
glanceclient(request).images.add_location(image.id, location, {})
if data:
if isinstance(data, str):
# The image data is meant to be uploaded externally, return a
# special wrapper to bypass the web server in a subsequent upload
return ExternallyUploadedImage(image, request)
if isinstance(data, TemporaryUploadedFile):
# Hack to fool Django, so we can keep file open in the new thread.
data.file._closer.close_called = True
elif isinstance(data, InMemoryUploadedFile):
# Clone a new file for InMemeoryUploadedFile.
# Because the old one will be closed by Django.
data = SimpleUploadedFile(data.name,
data.read(),
data.content_type)
def upload():
try:
return glanceclient(request).images.upload(image.id, data)
finally:
try:
filename = str(data.file.name)
except AttributeError:
pass
else:
try:
os.remove(filename)
except OSError as e:
LOG.warning('Failed to remove temporary image file '
'%(file)s (%(e)s)',
{'file': filename, 'e': e})
thread.start_new_thread(upload, ())
return Image(image) | c77ae63db7b23777e77d8d6aa410ec5769091ff5 | 34 |
def calc_diff(nh_cube, sh_cube, agg_method):
"""Calculate the difference metric"""
metric = nh_cube.copy()
metric.data = nh_cube.data - sh_cube.data
metric = rename_cube(metric, 'minus sh ' + agg_method)
return metric | 3eb62be75af265bd2fa7323c6c23e3735e1c87be | 35 |
def median_boxcar_filter(data, window_length=None, endpoints='reflect'):
"""
Creates median boxcar filter and deals with endpoints
Parameters
----------
data : numpy array
Data array
window_length: int
A scalar giving the size of the median filter window
endpoints : str
How to deal with endpoints.
Only option right now is 'reflect', which extends the data array
on both ends by reflecting the data
Returns
-------
filter : numpy array
The filter array
"""
filter_array = data
# Create filter array
if(endpoints == 'reflect'):
last_index = len(data) - 1
filter_array = np.concatenate((np.flip(data[0:window_length], 0),
data,
data[last_index - window_length:last_index]))
# Make filter
# Check that window_length is odd
if(window_length % 2 == 0):
window_length += 1
filt = medfilt(filter_array, window_length)
filt = filt[window_length:window_length + last_index + 1]
return filt | 1782998bad2ab02c628d8afbc456c1bea4c2533c | 36 |
import ray
import threading
def test_threaded_actor_api_thread_safe(shutdown_only):
"""Test if Ray APIs are thread safe
when they are used within threaded actor.
"""
ray.init(
num_cpus=8,
# from 1024 bytes, the return obj will go to the plasma store.
_system_config={"max_direct_call_object_size": 1024},
)
@ray.remote
def in_memory_return(i):
return i
@ray.remote
def plasma_return(i):
arr = np.zeros(8 * 1024 * i, dtype=np.uint8) # 8 * i KB
return arr
@ray.remote(num_cpus=1)
class ThreadedActor:
def __init__(self):
self.received = []
self.lock = threading.Lock()
def in_memory_return_test(self, i):
self._add(i)
return ray.get(in_memory_return.remote(i))
def plasma_return_test(self, i):
self._add(i)
return ray.get(plasma_return.remote(i))
def _add(self, seqno):
with self.lock:
self.received.append(seqno)
def get_all(self):
with self.lock:
return self.received
a = ThreadedActor.options(max_concurrency=10).remote()
max_seq = 50
# Test in-memory return obj
seqnos = ray.get(
[a.in_memory_return_test.remote(seqno) for seqno in range(max_seq)]
)
assert sorted(seqnos) == list(range(max_seq))
# Test plasma return obj
real = ray.get([a.plasma_return_test.remote(seqno) for seqno in range(max_seq)])
expected = [np.zeros(8 * 1024 * i, dtype=np.uint8) for i in range(max_seq)]
for r, e in zip(real, expected):
assert np.array_equal(r, e)
ray.kill(a)
ensure_cpu_returned(8) | f3fd0d4c2621e69c348b734040c7bd49f1f1578b | 37 |
from typing import Optional
def build_template_context(
title: str, raw_head: Optional[str], raw_body: str
) -> Context:
"""Build the page context to insert into the outer template."""
head = _render_template(raw_head) if raw_head else None
body = _render_template(raw_body)
return {
'page_title': title,
'head': head,
'body': body,
} | 38bf538c0c979b6e0aaba1367458140028332385 | 38 |
def inf_set_stack_ldbl(*args):
"""
inf_set_stack_ldbl(_v=True) -> bool
"""
return _ida_ida.inf_set_stack_ldbl(*args) | 2b343bb66229f6ba5f834b3d543fcd75ad08875c | 39 |
def _get_self_compatibility_dict(package_name: str) -> dict:
"""Returns a dict containing self compatibility status and details.
Args:
package_name: the name of the package to check (e.g.
"google-cloud-storage").
Returns:
A dict containing the self compatibility status and details for any
self incompatibilities. The dict will be formatted like the following:
{
'py2': { 'status': BadgeStatus.SUCCESS, 'details': {} },
'py3': { 'status': BadgeStatus.SUCCESS, 'details': {} },
}
"""
pkg = package.Package(package_name)
compatibility_results = badge_utils.store.get_self_compatibility(pkg)
missing_details = _get_missing_details(
[package_name], compatibility_results)
if missing_details:
result_dict = badge_utils._build_default_result(
status=BadgeStatus.MISSING_DATA, details=missing_details)
return result_dict
result_dict = badge_utils._build_default_result(
status=BadgeStatus.SUCCESS,
details='The package does not support this version of python.')
for res in compatibility_results:
pyver = badge_utils.PY_VER_MAPPING[res.python_major_version]
badge_status = PACKAGE_STATUS_TO_BADGE_STATUS.get(
res.status) or BadgeStatus.SELF_INCOMPATIBLE
result_dict[pyver]['status'] = badge_status
result_dict[pyver]['details'] = res.details
if res.details is None:
result_dict[pyver]['details'] = badge_utils.EMPTY_DETAILS
return result_dict | ca29593d3d5941f576a2d033f5754902828a1138 | 40 |
def checksum_md5(filename):
"""Calculates the MD5 checksum of a file."""
amd5 = md5()
with open(filename, mode='rb') as f:
for chunk in iter(lambda: f.read(128 * amd5.block_size), b''):
amd5.update(chunk)
return amd5.hexdigest() | 80cd2bf43274ea060a4d5001d6a319fae59b1e94 | 41 |
def CleanGrant(grant):
"""Returns a "cleaned" grant by rounding properly the internal data.
This insures that 2 grants coming from 2 different sources are actually
identical, irrespective of the logging/storage precision used.
"""
return grant._replace(latitude=round(grant.latitude, 6),
longitude=round(grant.longitude, 6),
height_agl=round(grant.height_agl, 2),
max_eirp=round(grant.max_eirp, 3)) | 648bb0a76f9a7cfe355ee8ffced324eb6ceb601e | 42 |
def OpenRegistryKey(hiveKey, key):
""" Opens a keyHandle for hiveKey and key, creating subkeys as necessary """
keyHandle = None
try:
curKey = ""
keyItems = key.split('\\')
for subKey in keyItems:
if curKey:
curKey = curKey + "\\" + subKey
else:
curKey = subKey
keyHandle = win32api.RegCreateKey(hiveKey, curKey)
except Exception, e:
keyHandle = None
print "OpenRegistryKey failed:", hiveKey, key, e
return keyHandle | d7555a752a08ed0e7bfedbb77583aed9e5b26fe1 | 43 |
import multiprocessing
def eval_py(input_text: str):
"""Runs eval() on the input text on a seperate process and returns output or error.
How to timout on a function call ? https://stackoverflow.com/a/14924210/13523305
Return a value from multiprocess ? https://stackoverflow.com/a/10415215/13523305
"""
def evaluate(input_text, return_val):
"""wrapper for eval"""
try:
return_val[input_text] = str(eval(input_text))
except Exception as error:
return_val[
input_text
] = f"""😔 /e feeds your expression to python's eval function.
The following error occured: \n\n{error}"""
if contains_restricted(input_text):
return restricted_message
# using multiprocessing and getting value returned by target function
manger = multiprocessing.Manager()
return_val = manger.dict() # enable target function to return a value
process = multiprocessing.Process(target=evaluate, args=(input_text, return_val))
process.start()
process.join(6) # allow the process to run for 6 seconds
if process.is_alive():
# kill the process if it is still alive
process.kill()
return timeout_message
output = return_val[input_text]
return output | 1058f2877e00370fa4600cf2bcfb334149347cba | 44 |
def trim(str):
"""Remove multiple spaces"""
return ' '.join(str.strip().split()) | ed98f521c1cea24552959aa334ffb0c314b9f112 | 45 |
def build_model_svr(model_keyvalue, inputs, encoder = None, context = None):
"""Builds model from, seal_functions, model params.
model_keyvalue: key identifying model
inputs: properly formatted encrypted inputs for model
encoder: SEAL encoder object
context: SEAL context object
"""
modeldict = MODELS[model_keyvalue]
params_path = MODELPARMS.joinpath(modeldict["path"])
alias = modeldict["seal_function"]
try:
func = alias(params_path, context=context, encoder=encoder)
except Exception as e:
raise ValueError(f"There was a problem with your inputs: {e}")
return func.eval(inputs) | 0e36d94c5305aa55523d76d2f8afac17b9c7d9b0 | 46 |
def find_similar(collection):
""" Searches the collection for (probably) similar artist and returns
lists containing the "candidates". """
spellings = defaultdict(list)
for artist in collection:
spellings[normalize_artist(artist)].append(artist)
return [spellings[artist] for artist in spellings
if len(spellings[artist]) > 1] | c11f93d0da7ff27f89c51d1d255d75e31c6c539f | 47 |
def vim_print(mse_ref, mse_values, x_name, ind_list=0, with_output=True,
single=True, partner_k=None):
"""Print Variable importance measure and create sorted output.
Parameters
----------
mse_ref : Numpy Float. Reference value of non-randomized x.
mse_values : Numpy array. MSE's for randomly permuted x.
x_name : List of strings. Variable names.
ind_list : List of INT, optional. Variable positions. Default is 0.
with_output : Boolean, optional. Default is True.
single : Boolean, optional. The default is True.
partner_k : List of None and Int or None. Index of variables that were
jointly randomized. Default is None.
Returns
-------
vim: Tuple of Numpy array and list of lists. MSE sorted and sort index.
"""
if partner_k is not None:
for idx, val in enumerate(partner_k):
if val is not None:
if (idx > (val-1)) and (idx > 0):
mse_values[idx-1] = mse_values[val-1]
mse = mse_values / np.array(mse_ref) * 100
var_indices = np.argsort(mse)
var_indices = np.flip(var_indices)
vim_sorted = mse[var_indices]
if single:
x_names_sorted = np.array(x_name, copy=True)
x_names_sorted = x_names_sorted[var_indices]
ind_sorted = list(var_indices)
else:
var_indices = list(var_indices)
ind_sorted = []
x_names_sorted = []
for i in var_indices:
ind_i = ind_list[i]
ind_sorted.append(ind_i)
x_name_i = []
for j in ind_i:
x_name_i.append(x_name[j])
x_names_sorted.append(x_name_i)
if with_output:
print('\n')
print('-' * 80)
print('Out of bag value of MSE: {:8.3f}'.format(mse_ref))
print('- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -')
print('Variable importance statistics in %-lost of base value')
for idx, vim in enumerate(vim_sorted):
if single:
print('{:<50}: {:>7.2f}'.format(x_names_sorted[idx],
vim-100), '%')
else:
print(x_names_sorted[idx])
print('{:<50}: {:>7.2f}'.format(' ', vim-100), '%')
print('-' * 80)
print('Computed as share of OOB MSE of estimated forest relative to',
'OOB MSE of variable (or group of variables) with randomized',
'covariate values in %.')
ind_sorted.reverse()
vim_sorted = np.flip(vim_sorted)
vim = (vim_sorted, ind_sorted)
first_time = True
if partner_k is not None:
for idx, val in enumerate(partner_k):
if val is not None:
if first_time:
print('The following variables are jointly analysed:',
end=' ')
first_time = False
if idx < val:
print(x_name[idx-1], x_name[val-1], ' / ', end='')
print()
print('-' * 80, '\n')
return vim | 4c7eef9dc15d50b904dfe3df51586d77af70d776 | 48 |
def from_column_list(
col_names, col_types=None,
col_blobs=None, col_metadata=None
):
"""
Given a list of names, types, and optionally values, construct a Schema.
"""
if col_types is None:
col_types = [None] * len(col_names)
if col_metadata is None:
col_metadata = [None] * len(col_names)
if col_blobs is None:
col_blobs = [None] * len(col_names)
assert len(col_names) == len(col_types), (
'col_names and col_types must have the same length.'
)
assert len(col_names) == len(col_metadata), (
'col_names and col_metadata must have the same length.'
)
assert len(col_names) == len(col_blobs), (
'col_names and col_blobs must have the same length.'
)
root = _SchemaNode('root', 'Struct')
for col_name, col_type, col_blob, col_metadata in zip(
col_names, col_types, col_blobs, col_metadata
):
columns = col_name.split(FIELD_SEPARATOR)
current = root
for i in range(len(columns)):
name = columns[i]
type_str = ''
field = None
if i == len(columns) - 1:
type_str = col_type
field = Scalar(
dtype=col_type,
blob=col_blob,
metadata=col_metadata
)
next = current.add_child(name, type_str)
if field is not None:
next.field = field
next.col_blob = col_blob
current = next
return root.get_field() | 22fc57657bc144304ef0afbdd74acf9ed63faba0 | 49 |
import torch
def get_optimizer(lr):
"""
Specify an optimizer and its parameters.
Returns
-------
tuple(torch.optim.Optimizer, dict)
The optimizer class and the dictionary of kwargs that should
be passed in to the optimizer constructor.
"""
return (torch.optim.SGD,
{"lr": lr, "weight_decay": 1e-6, "momentum": 0.9}) | 213090258414059f7a01bd40ecd7ef04158d60e5 | 50 |
def _from_list(data: any) -> dict:
"""Convert lists to indexed dictionaries.
:arg data: An ordered map.
:returns: An ordered map.
"""
if isinstance(data, list):
return dict([(str(i), _from_list(v)) for i, v in enumerate(data)])
if isinstance(data, dict):
return dict([(key, _from_list(data[key])) for key in data])
return data | 06c757276edbc013c4094872f4063077cec2c589 | 51 |
def parse_date(ses_date):
"""This parses a date string of the form YYYY-MM-DD and returns
the string, year, month, day and day of year."""
[yr,mn,dy] = ses_date.split('-')
year = int(yr)
month = int(mn)
day = int(dy[:2]) # strip of any a or b
DOY = day_of_year(year,month,day)
return ses_date,year,month,day,DOY | a8105f9f39869402863f14a1d68ff37a7f25ed74 | 52 |
import requests
import json
def get_access_token(consumer_key, consumer_secret):
"""
:return: auth token for mpesa api calls
"""
oauth_url = "https://api.safaricom.co.ke/oauth/v1/generate?grant_type=client_credentials"
response = requests.get(oauth_url, auth=HTTPBasicAuth(consumer_key, consumer_secret))
access_token = json.loads(response.text).get('access_token', None)
return access_token | 15d09439d0b6e135c4f87958fe116e686c38cca2 | 53 |
def create_feed_forward_dot_product_network(observation_spec, global_layers,
arm_layers):
"""Creates a dot product network with feedforward towers.
Args:
observation_spec: A nested tensor spec containing the specs for global as
well as per-arm observations.
global_layers: Iterable of ints. Specifies the layers of the global tower.
arm_layers: Iterable of ints. Specifies the layers of the arm tower. The
last element of arm_layers has to be equal to that of global_layers.
Returns:
A dot product network that takes observations adhering observation_spec and
outputs reward estimates for every action.
Raises:
ValueError: If the last arm layer does not match the last global layer.
"""
if arm_layers[-1] != global_layers[-1]:
raise ValueError('Last layer size of global and arm layers should match.')
global_network = encoding_network.EncodingNetwork(
input_tensor_spec=observation_spec[bandit_spec_utils.GLOBAL_FEATURE_KEY],
fc_layer_params=global_layers)
one_dim_per_arm_obs = tensor_spec.TensorSpec(
shape=observation_spec[bandit_spec_utils.PER_ARM_FEATURE_KEY].shape[1:],
dtype=tf.float32)
arm_network = encoding_network.EncodingNetwork(
input_tensor_spec=one_dim_per_arm_obs,
fc_layer_params=arm_layers)
return GlobalAndArmDotProductNetwork(observation_spec, global_network,
arm_network) | ed4e95ce10859e976800fd88b6caceffd6ca09a2 | 54 |
import logging
def check_collisions(citekeys_df):
"""
Check for short_citekey hash collisions
"""
collision_df = citekeys_df[['standard_citekey', 'short_citekey']].drop_duplicates()
collision_df = collision_df[collision_df.short_citekey.duplicated(keep=False)]
if not collision_df.empty:
logging.error(f'OMF! Hash collision. Congratulations.\n{collision_df}')
return collision_df | b01b53323f7885a7375ba78b50222bcbe9980498 | 55 |
def get_user(module, system):
"""Find a user by the user_name specified in the module"""
user = None
user_name = module.params['user_name']
try:
user = system.users.get(name=user_name)
except ObjectNotFound:
pass
return user | f674352998e444a184ab2a2a6a2caedc35611e49 | 56 |
def appointments(request):
"""Page for users to view upcoming appointments."""
appointments = Appointment.objects.filter(patient=request.user.patient)
context = {
'appointments': appointments
}
return render(request, 'patients/appointments.html', context) | ad7bab85db19f907631a8c9e25b65048abab7e6b | 57 |
def _SignedVarintDecoder(mask):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint | de88a082cc90f6370674723173f4c75ee7025f27 | 58 |
def is_valid_msg_type(x):
"""
@return: True if the name is a syntatically legal message type name
@rtype: bool
"""
if not x or len(x) != len(x.strip()):
return False
base = base_msg_type(x)
if not roslib.names.is_legal_resource_name(base):
return False
# parse array indicies
x = x[len(base):]
state = 0
for c in x:
if state == 0:
if c != '[':
return False
state = 1 # open
elif state == 1:
if c == ']':
state = 0 # closed
else:
try:
int(c)
except Exception:
return False
return state == 0 | ca6b6b2e62ffa26a795cbbccab01667a8ce9470e | 59 |
def get_ascii_matrix(img):
"""(Image) -> list of list of str\n
Takes an image and converts it into a list of list containing a string which maps to brightness
of each pixel of each row
"""
ascii_map = "`^\",:;Il!i~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$"
brightness_matrix = get_brightness_matrix(img)
ascii_matrix = []
for rows in range(len(brightness_matrix)):
row = []
for column in brightness_matrix[rows]:
map_index = column//4
row.append(ascii_map[map_index])
ascii_matrix.append(row)
return ascii_matrix | e8b6a160fc082a868267971937e56e2f6a1eb9e4 | 60 |
from typing import List
from typing import Dict
from typing import Any
def to_scene_agent_prediction_from_boxes_separate_color(
tracked_objects: TrackedObjects, color_vehicles: List[int], color_pedestrians: List[int], color_bikes: List[int]
) -> List[Dict[str, Any]]:
"""
Convert predicted observations into prediction dictionary.
:param tracked_objects: List of tracked_objects in global coordinates.
:param color_vehicles: color [R, G, B, A] for vehicles predictions.
:param color_pedestrians: color [R, G, B, A] for pedestrians predictions.
:param color_bikes: color [R, G, B, A] for bikes predictions.
:return scene.
"""
predictions = []
for tracked_object in tracked_objects:
if tracked_object.predictions is None:
continue
if tracked_object.tracked_object_type == TrackedObjectType.VEHICLE:
color = color_vehicles
elif tracked_object.tracked_object_type == TrackedObjectType.PEDESTRIAN:
color = color_pedestrians
elif tracked_object.tracked_object_type == TrackedObjectType.BICYCLE:
color = color_bikes
else:
color = [0, 0, 0, 255]
predictions.append(_to_scene_agent_prediction(tracked_object, color))
return predictions | 728d471fbc15957c57f4b3a6da68bfffdbf875ac | 61 |
def stretch(snd_array, factor, window_size, h):
""" Stretches/shortens a sound, by some factor. """
phase = np.zeros(window_size)
hanning_window = np.hanning(window_size)
result = np.zeros( len(snd_array) /factor + window_size)
for i in np.arange(0, len(snd_array)-(window_size+h), h*factor):
# two potentially overlapping subarrays
a1 = snd_array[i: i + window_size]
a2 = snd_array[i + h: i + window_size + h]
# the spectra of these arrays
s1 = np.fft.fft(hanning_window * a1)
s2 = np.fft.fft(hanning_window * a2)
# rephase all frequencies
phase = (phase + np.angle(s2/s1)) % 2*np.pi
a2_rephased = np.fft.ifft(np.abs(s2)*np.exp(1j*phase))
i2 = int(i/factor)
result[i2 : i2 + window_size] += hanning_window*a2_rephased
result = ((2**(16-4)) * result/result.max()) # normalize (16bit)
return result.astype('int16') | aeb12b6da26de9630eec9ad84caf9f30bd6f1f71 | 62 |
def guess_encoding(text):
""" Given bytes, determine the character set encoding
@return: dict with encoding and confidence
"""
if not text: return {'confidence': 0, 'encoding': None}
enc = detect_charset(text)
cset = enc['encoding']
if cset.lower() == 'iso-8859-2':
# Anomoaly -- chardet things Hungarian (iso-8850-2) is
# a close match for a latin-1 document. At least the quotes match
# Other Latin-xxx variants will likely match, but actually be Latin1
# or win-1252. see Chardet explanation for poor reliability of Latin-1 detection
#
enc['encoding'] = CHARDET_LATIN2_ENCODING
return enc | f58d652b7a77652ace1c27b315fc81ac82726a03 | 63 |
def is_edit_end_without_next(line, configs):
"""
Is the line indicates that 'edit' section ends without 'next' end marker
(special case)?
- config vdom
edit <name>
...
end
:param line: A str represents a line in configurations output
:param configs: A stack (list) holding config node objects
"""
if len(configs) > 1:
(parent, child) = (configs[-2], configs[-1]) # (config, edit)
if parent.end_re.match(line) and parent.name == "vdom" and \
parent.type == NT_CONFIG and child.type == NT_EDIT:
return True
return False | 0398fc86ee7911686bfdaa0cdf6431f53db1ccba | 64 |
def get_live_args(request, script=False, typed=False):
""" Get live args input by user | request --> [[str], [str]]"""
arg_string = list(request.form.values())[0]
if script:
return parse_command_line_args(arg_string)
if typed:
try:
all_args = parse_type_args(arg_string)
except Exception as e: #Doesn't matter what the exception is.
#raise e #Uncomment for testing
return ('Parsing Error', e)
else:
all_args = parse_args(arg_string)
args = all_args[0]
kwargs = all_args[1]
all_args = [args, kwargs]
print(all_args)
return all_args | 9e56043760e9ac263a737166c796640170a6174c | 65 |
import codecs
import csv
def open_csv(path):
"""open_csv."""
_lines = []
with codecs.open(path, encoding='utf8') as fs:
for line in csv.reader(fs):
if len(line) == 3:
_lines.append(line)
return _lines | 501ff4a2a1a242439c21d3131cecd407dcfa36af | 66 |
from typing import Union
from pathlib import Path
from typing import Dict
def parse_metadata(metadata_filepath: Union[str, Path]) -> Dict:
"""Parse the metadata file retreived from the BEACO2N site
Args:
metadata_filepath: Path of raw CSV metadata file
pipeline: Are we running as part of the pipeline? If True
return the parsed site information dictionary.
Returns:
dict: Dictionary of site metadata
"""
metadata_filepath = Path(metadata_filepath).resolve()
raw_metadata = pd.read_csv(metadata_filepath)
site_metadata = aDict()
try:
for index, row in raw_metadata.iterrows():
site_name = row["node_name_long"].lower().replace(" ", "")
site_data = site_metadata[site_name]
site_data["long_name"] = row["node_name_long"]
site_data["id"] = row["id"]
site_data["latitude"] = round(row["lat"], 5)
site_data["longitude"] = round(row["lng"], 5)
site_data["magl"] = check_nan(row["height_above_ground"])
site_data["masl"] = check_nan(row["height_above_sea"])
site_data["deployed"] = check_date(row["deployed"])
site_data["node_folder_id"] = row["node_folder_id"]
except Exception as e:
raise ValueError(f"Can't read metadata file, please ensure it has expected columns. Error: {e}")
# Convert to a normal dict
metadata: Dict = site_metadata.to_dict()
return metadata | 321e9abb82172ee7d06423d2703ec2499aefbee9 | 67 |
def unit_norm(model,axis=0):
"""
Constrains the weights incident to each hidden unit to have unit norm.
Args:
axis (int):axis along which to calculate weight norms.
model : the model contains weights need to setting the constraints.
"""
def apply_constraint(t: Tensor):
w_data = None
if isinstance(t, tf.Variable):
w_data = t.value().detach()
else:
w_data = t.copy().detach()
param_applied = w_data/ (epsilon() +sqrt(reduce_sum(square(w_data),axis=axis,keepdims=True)))
param_applied = param_applied.detach()
return param_applied
if is_tensor(model):
model = apply_constraint(model)
elif isinstance(model, Layer):
for name, param in model.named_parameters():
if 'bias' not in name and param is not None and param.trainable == True:
param.assign(apply_constraint(param)) | ffe517f2f883541d5d3736a2bb3263e6349ffe18 | 68 |
def responsive_units(spike_times, spike_clusters, event_times,
pre_time=[0.5, 0], post_time=[0, 0.5], alpha=0.05):
"""
Determine responsive neurons by doing a Wilcoxon Signed-Rank test between a baseline period
before a certain task event (e.g. stimulus onset) and a period after the task event.
Parameters
----------
spike_times : 1D array
spike times (in seconds)
spike_clusters : 1D array
cluster ids corresponding to each event in `spikes`
event_times : 1D array
times (in seconds) of the events from the two groups
pre_time : two-element array
time (in seconds) preceding the event to get the baseline (e.g. [0.5, 0.2] would be a
window starting 0.5 seconds before the event and ending at 0.2 seconds before the event)
post_time : two-element array
time (in seconds) to follow the event times
alpha : float
alpha to use for statistical significance
Returns
-------
significant_units : ndarray
an array with the indices of clusters that are significatly modulated
stats : 1D array
the statistic of the test that was performed
p_values : ndarray
the p-values of all the clusters
cluster_ids : ndarray
cluster ids of the p-values
"""
# Get spike counts for baseline and event timewindow
baseline_times = np.column_stack(((event_times - pre_time[0]), (event_times - pre_time[1])))
baseline_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters,
baseline_times)
times = np.column_stack(((event_times + post_time[0]), (event_times + post_time[1])))
spike_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, times)
# Do statistics
p_values = np.empty(spike_counts.shape[0])
stats = np.empty(spike_counts.shape[0])
for i in range(spike_counts.shape[0]):
if np.sum(baseline_counts[i, :] - spike_counts[i, :]) == 0:
p_values[i] = 1
stats[i] = 0
else:
stats[i], p_values[i] = wilcoxon(baseline_counts[i, :], spike_counts[i, :])
# Perform FDR correction for multiple testing
sig_units, p_values, _, _ = multipletests(p_values, alpha, method='fdr_bh')
significant_units = cluster_ids[sig_units]
return significant_units, stats, p_values, cluster_ids | 10493948d8fc710a95e1267b4543bc63cdebc661 | 69 |
def create_link(seconds, image_name, size):
"""
Function returns temporary link to the image
"""
token = signing.dumps([str(timezone.now() + timedelta(seconds=int(seconds))), image_name, size])
return settings.SERVER_PATH + reverse("image:dynamic-image", kwargs={"token": token}) | e0ede3b6a28e1bfa3a69d8019b0141cd85b77cce | 70 |
def read_one_hot_labels(filename):
"""Read topic labels from file in one-hot form
:param filename: name of input file
:return: topic labels (one-hot DataFrame, M x N)
"""
return pd.read_csv(filename, dtype=np.bool) | df6f0be241c8f5016ff66d02a54c899298055bd7 | 71 |
def make_randint_list(start, stop, length=10):
""" Makes a list of randomly generated integers
Args:
start: lowest integer to be generated randomly.
stop: highest integer to be generated randomly.
length: length of generated list.
Returns:
list of random numbers between start and stop of length length
"""
return [randint(start, stop) for i in range(length)] | 6ac9a20b9c5c87d9eff2b13a461cf266381961e2 | 72 |
def merge(intervals: list[list[int]]) -> list[list[int]]:
"""Generate a new schedule with non-overlapping intervals by merging intervals which overlap
Complexity:
n = len(intervals)
Time: O(nlogn) for the initial sort
Space: O(n) for the worst case of no overlapping intervals
Examples:
>>> merge(intervals=[[1,3],[2,6],[8,10],[15,18]])
[[1, 6], [8, 10], [15, 18]]
>>> merge(intervals=[[1,4],[4,5]])
[[1, 5]]
>>> merge(intervals=[[1,4]])
[[1, 4]]
"""
## EDGE CASES ##
if len(intervals) <= 1:
return intervals
"""ALGORITHM"""
## INITIALIZE VARS ##
intervals.sort(key=lambda k: k[0]) # sort on start times
# DS's/res
merged_intervals = []
# MERGE INTERVALS
prev_interval, remaining_intervals = intervals[0], intervals[1:]
for curr_interval in remaining_intervals:
# if prev interval end >= curr interval start
if prev_interval[1] >= curr_interval[0]:
# adjust new prev interval
prev_interval[1] = max(prev_interval[1], curr_interval[1])
else:
merged_intervals.append(prev_interval)
prev_interval = curr_interval
merged_intervals.append(prev_interval)
return merged_intervals | 49a9d7d461ba67ec3b5f839331c2a13d9fc068d0 | 73 |
def df_drop_duplicates(df, ignore_key_pattern="time"):
"""
Drop duplicates from dataframe ignore columns with keys containing defined pattern.
:param df:
:param noinfo_key_pattern:
:return:
"""
ks = df_drop_keys_contains(df, ignore_key_pattern)
df = df.drop_duplicates(ks)
return df | babd7be3ef66cef81a5a2192cf781afd2f96aca9 | 74 |
def get_mediawiki_flow_graph(limit, period):
"""
:type limit int
:type period int
:rtype: list[dict]
"""
# https://kibana5.wikia-inc.com/goto/e6ab16f694b625d5b87833ae794f5989
# goreplay is running in RES (check SJC logs only)
rows = ElasticsearchQuery(
es_host=ELASTICSEARCH_HOST,
period=period,
index_prefix='logstash-mediawiki'
).query_by_string(
query='"Wikia internal request" AND @fields.environment: "prod" '
'AND @fields.datacenter: "sjc" '
'AND @fields.http_url_path: *',
fields=[
'@context.source',
'@fields.http_url_path',
],
limit=limit
)
# extract required fields only
# (u'user-permissions', 'api:query::users')
# (u'1', 'nirvana:EmailControllerDiscussionReply::handle')
rows = [
(
row.get('@context', {})['source'],
normalize_mediawiki_url(row.get('@fields', {})['http_url_path'])
)
for row in rows
if row.get('@context', {}).get('source') is not None
]
# process the logs
def _map(item):
return '{}-{}'.format(item[0], item[1])
def _reduce(items):
first = items[0]
source = first[0]
target = first[1]
return {
'source': source if source != '1' else 'internal',
'edge': 'http',
'target': target,
# the following is optional
'metadata': '{:.3f} reqs per sec'.format(1. * len(items) / period)
}
return logs_map_and_reduce(rows, _map, _reduce) | c82976c24d80f7784f32e36666f791fed4ada769 | 75 |
def bsplslib_Unperiodize(*args):
"""
:param UDirection:
:type UDirection: bool
:param Degree:
:type Degree: int
:param Mults:
:type Mults: TColStd_Array1OfInteger &
:param Knots:
:type Knots: TColStd_Array1OfReal &
:param Poles:
:type Poles: TColgp_Array2OfPnt
:param Weights:
:type Weights: TColStd_Array2OfReal &
:param NewMults:
:type NewMults: TColStd_Array1OfInteger &
:param NewKnots:
:type NewKnots: TColStd_Array1OfReal &
:param NewPoles:
:type NewPoles: TColgp_Array2OfPnt
:param NewWeights:
:type NewWeights: TColStd_Array2OfReal &
:rtype: void
"""
return _BSplSLib.bsplslib_Unperiodize(*args) | 0fad8703881304c9d169feb4ce58f31b29d1703b | 76 |
def genomic_del3_abs_37(genomic_del3_37_loc):
"""Create test fixture absolute copy number variation"""
return {
"type": "AbsoluteCopyNumber",
"_id": "ga4gh:VAC.Pv9I4Dqk69w-tX0axaikVqid-pozxU74",
"subject": genomic_del3_37_loc,
"copies": {"type": "Number", "value": 2}
} | ed417f1b0eba79a5db717bd16ca79dd85c55c381 | 77 |
import os
def write_data_to_file(training_data_files, out_file, image_shape, truth_dtype=np.uint8, subject_ids=None,
normalize=True, crop=True):
"""
Takes in a set of training images and writes those images to an hdf5 file.
:param training_data_files: List of tuples containing the training data files. The modalities should be listed in
the same order in each tuple. The last item in each tuple must be the labeled image.
Example: [('sub1-T1.nii.gz', 'sub1-T2.nii.gz', 'sub1-truth.nii.gz'),
('sub2-T1.nii.gz', 'sub2-T2.nii.gz', 'sub2-truth.nii.gz')]
:param out_file: Where the hdf5 file will be written to.
:param image_shape: Shape of the images that will be saved to the hdf5 file.
:param truth_dtype: Default is 8-bit unsigned integer.
:return: Location of the hdf5 file with the image data written to it.
"""
n_samples = len(training_data_files)
n_channels = len(training_data_files[0]) - 1 # n_channels is actually the number of modalities we have
try:
if not normalize:
hdf5_file, data_storage, truth_storage, affine_storage, normalization_storage = \
create_data_file(out_file,
n_channels=n_channels,
n_samples=n_samples,
image_shape=image_shape,
normalize=normalize,
storage_names=('data', 'truth', 'index', 'normalization'))
else:
hdf5_file, data_storage, truth_storage, affine_storage = create_data_file(out_file,
n_channels=n_channels,
n_samples=n_samples,
image_shape=image_shape,
normalize=normalize)
normalization_storage = None
except Exception as e:
# If something goes wrong, delete the incomplete data file
os.remove(out_file)
raise e
write_image_data_to_file(training_data_files, data_storage, truth_storage, image_shape,
truth_dtype=truth_dtype, n_channels=n_channels, affine_storage=affine_storage, crop=crop,
normalization_storage=normalization_storage)
if subject_ids:
hdf5_file.create_array(hdf5_file.root, 'subject_ids', obj=subject_ids)
if normalize:
normalize_data_storage(data_storage)
hdf5_file.close()
return out_file | b90261a855926afc99ef0af95ba026daf34d8d43 | 78 |
def get_configinfo(env):
"""Returns a list of dictionaries containing the `name` and `options`
of each configuration section. The value of `options` is a list of
dictionaries containing the `name`, `value` and `modified` state of
each configuration option. The `modified` value is True if the value
differs from its default.
:since: version 1.1.2
"""
all_options = {}
for (section, name), option in \
Option.get_registry(env.compmgr).iteritems():
all_options.setdefault(section, {})[name] = option
sections = []
for section in env.config.sections(env.compmgr):
options = []
for name, value in env.config.options(section, env.compmgr):
registered = all_options.get(section, {}).get(name)
if registered:
default = registered.default
normalized = registered.normalize(value)
else:
default = u''
normalized = unicode(value)
options.append({'name': name, 'value': value,
'modified': normalized != default})
options.sort(key=lambda o: o['name'])
sections.append({'name': section, 'options': options})
sections.sort(key=lambda s: s['name'])
return sections | c96b784f389af7c043977fbc1707840ef56a6486 | 79 |
def given_energy(n, ef_energy):
"""
Calculate and return the value of given energy using given values of the params
How to Use:
Give arguments for ef_energy and n parameters
*USE KEYWORD ARGUMENTS FOR EASY USE, OTHERWISE
IT'LL BE HARD TO UNDERSTAND AND USE.'
Parameters:
ef_energy (int):effective energy in Joule
n (int): efficiency
Returns:
int: the value of given energy in Joule
"""
gv_energy = ef_energy / n
return gv_energy | 98095581bfaf4b8a6dbf59dce86b02e3f1fa6002 | 80 |
def sequence_sigmoid_cross_entropy(labels,
logits,
sequence_length,
average_across_batch=True,
average_across_timesteps=False,
average_across_classes=True,
sum_over_batch=False,
sum_over_timesteps=True,
sum_over_classes=False,
time_major=False,
stop_gradient_to_label=False,
name=None):
"""Computes sigmoid cross entropy for each time step of sequence
predictions.
Args:
labels: Target class distributions.
- If :attr:`time_major` is `False` (default), this must be a\
Tensor of shape `[batch_size, max_time(, num_classes)]`.
- If `time_major` is `True`, this must be a Tensor of shape\
`[max_time, batch_size(, num_classes)]`.
Each row of `labels` should be a valid probability
distribution, otherwise, the computation of the gradient will be
incorrect.
logits: Unscaled log probabilities having the same shape as with
:attr:`labels`.
sequence_length: A Tensor of shape `[batch_size]`. Time steps beyond
the respective sequence lengths will have zero losses.
average_across_timesteps (bool): If set, average the loss across
the time dimension. Must not set `average_across_timesteps`
and `sum_over_timesteps` at the same time.
average_across_batch (bool): If set, average the loss across the
batch dimension. Must not set `average_across_batch`'
and `sum_over_batch` at the same time.
average_across_classes (bool): If set, average the loss across the
class dimension (if exists). Must not set
`average_across_classes`' and `sum_over_classes` at
the same time. Ignored if :attr:`logits` is a 2D Tensor.
sum_over_timesteps (bool): If set, sum the loss across the
time dimension. Must not set `average_across_timesteps`
and `sum_over_timesteps` at the same time.
sum_over_batch (bool): If set, sum the loss across the
batch dimension. Must not set `average_across_batch`
and `sum_over_batch` at the same time.
sum_over_classes (bool): If set, sum the loss across the
class dimension. Must not set `average_across_classes`
and `sum_over_classes` at the same time. Ignored if
:attr:`logits` is a 2D Tensor.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`labels` and :attr:`logits` must have shape
`[max_time, batch_size, ...]`. If `False`
(default), they must have shape `[batch_size, max_time, ...]`.
stop_gradient_to_label (bool): If set, gradient propagation to
:attr:`labels` will be disabled.
name (str, optional): A name for the operation.
Returns:
A Tensor containing the loss, of rank 0, 1, or 2 depending on the
arguments
:attr:`{average_across}/{sum_over}_{timesteps}/{batch}/{classes}`.
For example, if the class dimension does not exist, and
- If :attr:`sum_over_timesteps` and :attr:`average_across_batch` \
are `True` (default), the return Tensor is of rank 0.
- If :attr:`average_across_batch` is `True` and other arguments are \
`False`, the return Tensor is of shape `[max_time]`.
"""
with tf.name_scope(name, "sequence_sigmoid_cross_entropy"):
if stop_gradient_to_label:
labels = tf.stop_gradient(labels)
losses = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
rank = shapes.get_rank(logits) or shapes.get_rank(labels)
if rank is None:
raise ValueError(
'Cannot determine the rank of `logits` or `labels`.')
losses = mask_and_reduce(
losses,
sequence_length,
rank=rank,
average_across_batch=average_across_batch,
average_across_timesteps=average_across_timesteps,
average_across_remaining=average_across_classes,
sum_over_batch=sum_over_batch,
sum_over_timesteps=sum_over_timesteps,
sum_over_remaining=sum_over_classes,
time_major=time_major)
return losses | 7eaea7dc8c416f0a37f0d6668ec175725f1fea04 | 81 |
import math
import torch
def stats(func):
"""Stats printing and exception handling decorator"""
def inner(*args):
try:
code, decoded, res = func(*args)
except ValueError as err:
print(err)
else:
if FORMATTING:
code_length = 0
for el in code:
code_length += len(el)
compression_rate = 24 * img.shape[0] * img.shape[1] / code_length
print(f"Code length: {code_length}")
else:
compression_rate = 24 * img.shape[0] * img.shape[1] / len(code)
code_length = len(code)
print(f"Code length: {code_length}")
#Convert RGB to YCbCr
color_conv = RGBYCbCr()
img_ycbcr = color_conv.forward(img)
decoded_ycbcr = color_conv.forward(decoded)
#Calculate MSE and PSNR, Y:U:V = 6:1:1
MSE_y = ((img_ycbcr[:,:,0].astype(int)-decoded_ycbcr[:,:,0].astype(int))**2).mean()
MSE_u = ((img_ycbcr[:,:,1].astype(int)-decoded_ycbcr[:,:,1].astype(int))**2).mean()
MSE_v = ((img_ycbcr[:,:,2].astype(int)-decoded_ycbcr[:,:,2].astype(int))**2).mean()
PSNR_y = 10 * math.log10((255*255)/MSE_y)
PSNR_u = 10 * math.log10((255*255)/MSE_u)
PSNR_v = 10 * math.log10((255*255)/MSE_v)
PSNR = (PSNR_y * 6 + PSNR_u + PSNR_v)/8
#Call the functions of SSIM, MS-SSIM, VIF
D_1 = SSIM(channels=1)
D_2 = MS_SSIM(channels=1)
D_3 = VIFs(channels=3) # spatial domain VIF
#To get 4-dimension torch tensors, (N, 3, H, W), divide by 255 to let the range between (0,1)
torch_decoded = torch.FloatTensor(decoded.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255
torch_img = torch.FloatTensor(img.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255
torch_decoded_ycbcr = torch.FloatTensor(decoded_ycbcr.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255
torch_img_ycbcr = torch.FloatTensor(img_ycbcr.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255
#Calculate SSIM, MS-SSIM, VIF
#SSIM on luma channel
SSIM_value = D_1(torch_decoded_ycbcr[:, [0], :, :] , torch_img_ycbcr[:, [0], :, :], as_loss=False)
#MS-SSIM on luma channel
MS_SSIM_value = D_2(torch_decoded_ycbcr[:, [0], :, :], torch_img_ycbcr[:, [0], :, :], as_loss=False)
#VIF on spatial domain
VIF_value = D_3(torch_decoded, torch_img, as_loss=False)
#print(D_3(torch_img, torch_img, as_loss=False))
#Print out the results
#print(f"Mean squared error: {MSE}")
print(f"General PSNR: {PSNR}")
print(f"SSIM: {SSIM_value}")
print(f"MS_SSIM: {MS_SSIM_value}")
print(f"VIF: {VIF_value}")
print(f"Compression rate: {compression_rate} bits/nt")
# plt.imshow(decoded)
# plt.show()
# io.imsave(str(compression_rate) + ".png", decoded)
return code, decoded, res, compression_rate, PSNR, SSIM_value, MS_SSIM_value, VIF_value
return inner | 99e39e204238d09bd7275ac29089898ff3d22f6a | 82 |
import asyncio
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
config_data = hass.data[DOMAIN].pop(entry.entry_id)
await config_data[CONF_CLIENT].async_client_close()
return unload_ok | f44f9d9f3a566794547571563ae94ce433082d6d | 83 |
def get_ucp_worker_info():
"""Gets information on the current UCX worker, obtained from
`ucp_worker_print_info`.
"""
return _get_ctx().ucp_worker_info() | c40a0debcc769422b4cd8cd03018b57dd7efe224 | 84 |
from datetime import datetime
def check_can_collect_payment(id):
"""
Check if participant can collect payment this is true if :
- They have been signed up for a year
- They have never collected payment before or their last collection was more than 5 months ago
"""
select = "SELECT time_sign_up FROM SESSION_INFO WHERE user_id = (%s)"
time_sign_up = db.execute(select, (id,), 1)
one_year_after_sign_up = time_sign_up[0][0] + timedelta(weeks=43)
select = "SELECT date_collected,next_collection from TASK_COMPLETED WHERE user_id = (%s)"
date_collected = db.execute(select, (id,), 1)
can_collect_payment = False
#if one_year_after_sign_up < datetime.now() and user_type and next_collection[0][0] and next_collection[0][0] < datetime.now():
if one_year_after_sign_up < datetime.now() and len(date_collected) >= 1 and (date_collected[0][0] == None or date_collected[0][0] < (datetime.now() - timedelta(weeks=22))):
can_collect_payment = True
date_collected = date_collected[0][0]
elif len(date_collected) > 1:
date_collected = date_collected[0][0]
return (can_collect_payment,date_collected,time_sign_up) | a057b6599bfd5417a17de1672b4ccf2023991e6e | 85 |
def plus_tensor(wx, wy, wz=np.array([0, 0, 1])):
"""Calculate the plus polarization tensor for some basis.c.f., eq. 2 of https://arxiv.org/pdf/1710.03794.pdf"""
e_plus = np.outer(wx, wx) - np.outer(wy, wy)
return e_plus | c5632dcfffa9990c8416b77ff0df728ae46c34bc | 86 |
import json
def duplicate_objects(dup_infos):
"""Duplicate an object with optional transformations.
Args:
dup_infos (list[dict]): A list of duplication infos.
Each info is a dictionary, containing the following data:
original (str): Name of the object to duplicate.
name (str): Desired name for the duplicate.
translation (f,f,f): Translation float tuple or None if not
to change.
rotation (f,f,f): Rotation float tuple or None if not to
change.
scale (f,f,f): 3d scale float tuple or None if not to change.
Returns:
list[tuple (str, str)]: The first element of each tuple
contains the return 'code' of the operation, which can be
- 'Ok' If no problem occured.
- 'NotFound' If the original could not be found.
- 'Renamed' If the name was changed by the editor.
- 'Failed' If something else problematic happened.
The second element is None, unless the editor 'Renamed' the
object, in which case it contains the editor-assigned name.
If the return value is 'Renamed', the calling function must assign
the returned name to the original object in the Program or find a
new fitting name and assign it to the duplicated object using the
:func:`renameObject` function with the returned string as name.
.. seealso:: :func:`renameObject` :func:`getFreeName`
"""
infos_str = json.dumps(dup_infos)
msg = "DuplicateObjects " + infos_str
result = connection.send_message(msg)
results = json.parse(result)
return results | d8281eb6862cd7e022907bc479b03fc74cb3c78c | 87 |
def _list_data_objects(request, model, serializer):
"""a factory method for querying and receiving database objects"""
obj = model.objects.all()
ser = serializer(obj, many=True)
return Response(ser.data, status=status.HTTP_200_OK) | 80f43efdf1d09e73fda7be8ee5f8e37a163892f7 | 88 |
import configparser
def load_conf(file='./config', section='SYNTH_DATA'):
"""load configuration
Args:
file (str, optional): path to conf file. Defaults to './config'.
section (str, optional): name of section. Defaults to 'SYNTH_DATA'.
Returns:
[str]: params of configuration
"""
log_message('Load configuration.')
config = configparser.ConfigParser()
resource = config.read(file)
if 0 == resource:
log_message('Error: cannot read configuration file.')
exit(1)
params = {}
options = config.options(section)
for opt in options:
params[opt] = config.get(section, opt)
log_message(' - %s: %s' % (opt, params[opt]))
return params | a30240e98d9fd1cdc1bc7746566fdb07b842a8dc | 89 |
import math
def distance(a, b):
"""
Computes a
:param a:
:param b:
:return:
"""
x = a[0] - b[0]
y = a[1] - b[1]
return math.sqrt(x ** 2 + y ** 2) | 60b637771cd215a4cf83761a142fb6fdeb84d96e | 90 |
def approve_pipelines_for_publishing(pipeline_ids): # noqa: E501
"""approve_pipelines_for_publishing
:param pipeline_ids: Array of pipeline IDs to be approved for publishing.
:type pipeline_ids: List[str]
:rtype: None
"""
pipe_exts: [ApiPipelineExtension] = load_data(ApiPipelineExtension)
pipe_ext_ids = {p.id for p in pipe_exts}
missing_pipe_ext_ids = set(pipeline_ids) - pipe_ext_ids
for id in missing_pipe_ext_ids:
store_data(ApiPipelineExtension(id=id))
update_multiple(ApiPipelineExtension, [], "publish_approved", False)
if pipeline_ids:
update_multiple(ApiPipelineExtension, pipeline_ids, "publish_approved", True)
return None, 200 | e5ae4dfc0889fc95e3343a01111323eec43e1f93 | 91 |
def make_tokenizer_module(tokenizer):
"""tokenizer module"""
tokenizers = {}
cursors = {}
@ffi.callback("int(int, const char *const*, sqlite3_tokenizer **)")
def xcreate(argc, argv, ppTokenizer):
if hasattr(tokenizer, "__call__"):
args = [ffi.string(x).decode("utf-8") for x in argv[0:argc]]
tk = tokenizer(args)
else:
tk = tokenizer
th = ffi.new_handle(tk)
tkn = ffi.new("sqlite3_tokenizer *")
tkn.t = th
tokenizers[tkn] = th
ppTokenizer[0] = tkn
return SQLITE_OK
@ffi.callback("int(sqlite3_tokenizer *)")
def xdestroy(pTokenizer):
tkn = pTokenizer
del tokenizers[tkn]
return SQLITE_OK
@ffi.callback(
"int(sqlite3_tokenizer*, const char *, int, sqlite3_tokenizer_cursor **)"
)
def xopen(pTokenizer, pInput, nInput, ppCursor):
cur = ffi.new("sqlite3_tokenizer_cursor *")
tokenizer = ffi.from_handle(pTokenizer.t)
i = ffi.string(pInput).decode("utf-8")
tokens = [(n.encode("utf-8"), b, e) for n, b, e in tokenizer.tokenize(i) if n]
tknh = ffi.new_handle(iter(tokens))
cur.pTokenizer = pTokenizer
cur.tokens = tknh
cur.pos = 0
cur.offset = 0
cursors[cur] = tknh
ppCursor[0] = cur
return SQLITE_OK
@ffi.callback(
"int(sqlite3_tokenizer_cursor*, const char **, int *, int *, int *, int *)"
)
def xnext(pCursor, ppToken, pnBytes, piStartOffset, piEndOffset, piPosition):
try:
cur = pCursor[0]
tokens = ffi.from_handle(cur.tokens)
normalized, inputBegin, inputEnd = next(tokens)
ppToken[0] = ffi.from_buffer(normalized)
pnBytes[0] = len(normalized)
piStartOffset[0] = inputBegin
piEndOffset[0] = inputEnd
cur.offset = inputEnd
piPosition[0] = cur.pos
cur.pos += 1
except StopIteration:
return SQLITE_DONE
return SQLITE_OK
@ffi.callback("int(sqlite3_tokenizer_cursor *)")
def xclose(pCursor):
tk = ffi.from_handle(pCursor.pTokenizer.t)
on_close = getattr(tk, "on_close", None)
if on_close and hasattr(on_close, "__call__"):
on_close()
del cursors[pCursor]
return SQLITE_OK
tokenizer_module = ffi.new(
"sqlite3_tokenizer_module*", [0, xcreate, xdestroy, xopen, xclose, xnext]
)
tokenizer_modules[tokenizer] = (
tokenizer_module,
xcreate,
xdestroy,
xopen,
xclose,
xnext,
)
return tokenizer_module | 73f38b71f15b286a95a195296c5c265ba2da87f9 | 92 |
def looping_call(interval, callable):
"""
Returns a greenlet running your callable in a loop and an Event you can set
to terminate the loop cleanly.
"""
ev = Event()
def loop(interval, callable):
while not ev.wait(timeout=interval):
callable()
return gevent.spawn(loop, interval, callable), ev | 0df2a822c2eb56b8479224b0463bf9a2ad34f1e7 | 93 |
def rsquared_adj(r, nobs, df_res, has_constant=True):
"""
Compute the adjusted R^2, coefficient of determination.
Args:
r (float): rsquared value
nobs (int): number of observations the model was fit on
df_res (int): degrees of freedom of the residuals (nobs - number of model params)
has_constant (bool): whether the fitted model included a constant (intercept)
Returns:
float: adjusted coefficient of determination
"""
if has_constant:
return 1.0 - (nobs - 1) / df_res * (1.0 - r)
else:
return 1.0 - nobs / df_res * (1.0 - r) | 8d466437db7ec9de9bc7ee1d9d50a3355479209d | 94 |
def metadata_factory(repo, json=False, **kwargs):
"""
This generates a layout you would expect for metadata storage with files.
:type json: bool
:param json: if True, will return string instead.
"""
output = {
"baseline_filename": None,
"crontab": "0 0 * * *",
"exclude_regex": None,
"plugins": {
"AWSKeyDetector": {},
"ArtifactoryDetector": {},
"Base64HighEntropyString": {
"base64_limit": 4.5,
},
"BasicAuthDetector": {},
"HexHighEntropyString": {
"hex_limit": 3,
},
"KeywordDetector": {
'keyword_exclude': None
},
"MailchimpDetector": {},
"PrivateKeyDetector": {},
"SlackDetector": {},
"StripeDetector": {},
},
"repo": repo,
"sha": 'sha256-hash',
}
output.update(kwargs)
if json:
return json_module.dumps(output, indent=2, sort_keys=True)
return output | 8dc0cd4cb5aa194c915146efbe0a743b5047561d | 95 |
from typing import Optional
from typing import Sequence
def inpand(clip: vs.VideoNode, sw: int, sh: Optional[int] = None, mode: XxpandMode = XxpandMode.RECTANGLE,
thr: Optional[int] = None, planes: int | Sequence[int] | None = None) -> vs.VideoNode:
"""
Calls std.Minimum in order to shrink each pixel with the smallest value in its 3x3 neighbourhood
from the desired width and height.
:param clip: Source clip.
:param sw: Shrinking shape width.
:param sh: Shrinking shape height. If not specified, default to sw.
:param mode: Shape form. Ellipses are combinations of rectangles and losanges
and look more like octogons.
Losanges are truncated (not scaled) when sw and sh are not equal.
:param thr: Allows to limit how much pixels are changed.
Output pixels will not become less than ``input - threshold``.
The default is no limit.
:param planes: Specifies which planes will be processed. Any unprocessed planes will be simply copied.
:return: Transformed clip
"""
return morpho_transfo(clip, core.std.Minimum, sw, sh, mode, thr, planes) | 67d05b2ef31fdc3b544d063a571d39a1c1a3ecf8 | 96 |
def _extract_aggregate_functions(before_aggregate):
"""Converts `before_aggregate` to aggregation functions.
Args:
before_aggregate: The first result of splitting `after_broadcast` on
`intrinsic_defs.FEDERATED_AGGREGATE`.
Returns:
`zero`, `accumulate`, `merge` and `report` as specified by
`canonical_form.CanonicalForm`. All are instances of
`building_blocks.CompiledComputation`.
Raises:
transformations.CanonicalFormCompilationError: If we extract an ASTs of the
wrong type.
"""
# See `get_iterative_process_for_canonical_form()` above for the meaning of
# variable names used in the code below.
zero_index_in_before_aggregate_result = 1
zero_tff = transformations.select_output_from_lambda(
before_aggregate, zero_index_in_before_aggregate_result).result
accumulate_index_in_before_aggregate_result = 2
accumulate_tff = transformations.select_output_from_lambda(
before_aggregate, accumulate_index_in_before_aggregate_result).result
merge_index_in_before_aggregate_result = 3
merge_tff = transformations.select_output_from_lambda(
before_aggregate, merge_index_in_before_aggregate_result).result
report_index_in_before_aggregate_result = 4
report_tff = transformations.select_output_from_lambda(
before_aggregate, report_index_in_before_aggregate_result).result
zero = transformations.consolidate_and_extract_local_processing(zero_tff)
accumulate = transformations.consolidate_and_extract_local_processing(
accumulate_tff)
merge = transformations.consolidate_and_extract_local_processing(merge_tff)
report = transformations.consolidate_and_extract_local_processing(report_tff)
return zero, accumulate, merge, report | 22aff3c077b94c5eae8841448c2a55bbfa311487 | 97 |
def _make_system(A, M, x0, b):
"""Make a linear system Ax = b
Args:
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix or
cupyx.scipy.sparse.LinearOperator): sparse or dense matrix.
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix or
cupyx.scipy.sparse.LinearOperator): preconditioner.
x0 (cupy.ndarray): initial guess to iterative method.
b (cupy.ndarray): right hand side.
Returns:
tuple:
It returns (A, M, x, b).
A (LinaerOperator): matrix of linear system
M (LinearOperator): preconditioner
x (cupy.ndarray): initial guess
b (cupy.ndarray): right hand side.
"""
fast_matvec = _make_fast_matvec(A)
A = _interface.aslinearoperator(A)
if fast_matvec is not None:
A = _interface.LinearOperator(A.shape, matvec=fast_matvec,
rmatvec=A.rmatvec, dtype=A.dtype)
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
if A.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
n = A.shape[0]
if not (b.shape == (n,) or b.shape == (n, 1)):
raise ValueError('b has incompatible dimensions')
b = b.astype(A.dtype).ravel()
if x0 is None:
x = cupy.zeros((n,), dtype=A.dtype)
else:
if not (x0.shape == (n,) or x0.shape == (n, 1)):
raise ValueError('x0 has incompatible dimensions')
x = x0.astype(A.dtype).ravel()
if M is None:
M = _interface.IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
fast_matvec = _make_fast_matvec(M)
M = _interface.aslinearoperator(M)
if fast_matvec is not None:
M = _interface.LinearOperator(M.shape, matvec=fast_matvec,
rmatvec=M.rmatvec, dtype=M.dtype)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
return A, M, x, b | 37d877dc8522a476c1ff0be34db01fe8d711dbb7 | 98 |
from typing import List
def merge_intersecting_segments(segments: List[Segment]) -> List[Segment]:
"""
Merges intersecting segments from the list.
"""
sorted_by_start = sorted(segments, key=lambda segment: segment.start)
merged = []
for segment in sorted_by_start:
if not merged:
merged.append(Segment(segment.start, segment.end))
continue
last_merged = merged[-1]
if segment.start <= last_merged.end:
last_merged.end = max(last_merged.end, segment.end)
else:
merged.append(Segment(segment.start, segment.end))
return merged | e18498d9a9695b2c5796fb6e006f375541f704c7 | 99 |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 37