content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import torch
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = (2 * (x / (w - 1)) - 1)
y = (2 * (y / (h - 1)) - 1)
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed | 0bbbd2f0e0d588b58feebce19b3f2fd9c84934d8 | 708,797 |
def add_wrong_column(data_frame):
"""
Adds wrong column to dataframe
:params dataframe data_frame:
:returns dataframe:
"""
new_df = data_frame.copy()
new_df['Ducks'] = 0
return new_df | 0f3ae838c0975e8021cfeee258576afac75072c5 | 708,798 |
def drop_duplicates(df):
"""Drop duplicate rows and reindex.
Args:
df (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Dataframe with the replaced value.
Examples:
>>> df = pd.DataFrame({'letters':['b','b','c'], 'numbers':[2,2,3]})
>>> drop_duplicates(df)
letters numbers
0 b 2
1 c 3
"""
return df.drop_duplicates().reset_index(drop=True) | 517d9faf09267df72def3fa7b90b0f59d819d660 | 708,800 |
import unicodedata
def is_number(input_string):
"""
if input_string includes number only, return corresponding number,
otherwise return input_string
"""
try:
return float(input_string)
except ValueError:
pass
try:
return unicodedata.numeric(input_string)
except (TypeError, ValueError):
pass
return input_string.strip('"') | 2b435b1f23c8764e0ff6bf741678db91bb4a5b23 | 708,801 |
import requests
import json
def get_lang_list(source_text, key=None, print_meta_data=False):
"""
Inputs:
source_text - source text as a string
key - google api key, needed or function will raise and error
returns list of language identifiers
"""
#set up url request to google translate api
if not key:
raise Exception( "You dont have a key")
url_shell = 'https://www.googleapis.com/language/translate/v2/detect?key={0}&q={1}'
url = url_shell.format(key, source_text)
response = requests.get(url)
lang_json= json.loads(response.text)
source_lang = lang_json['data']['detections'][0][0]['language']
# if print_meta_data:
# print 'Is detection reliable: {0}'.format(data_dict['data']['detections']['isReliable'])
# print 'Confidence: {0}'.format(data_dict['data']['detections']['confidence'])
#
return source_lang | 720c3c9252535e82881411fa345734d984350537 | 708,802 |
import uuid
def token():
""" Return a unique 32-char write-token
"""
return str(uuid.uuid4().hex) | f7dc5725cc1d11ee0ab9471d141a89178fa3d07c | 708,803 |
import functools
def decorate_func_with_plugin_arg(f):
"""Decorate a function that takes a plugin as an argument.
A "plugin" is a pair of simulation and postprocess plugins.
The decorator expands this pair.
"""
@functools.wraps(f)
def wrapper(self, plugins_tuple):
return f(self, plugins_tuple[0], plugins_tuple[1])
return wrapper | e90c86bfd6c3cada33c867d26ed64da3cac6f9c4 | 708,804 |
def compute_acc_bin(conf_thresh_lower, conf_thresh_upper, conf, pred, true):
"""
# Computes accuracy and average confidence for bin
Args:
conf_thresh_lower (float): Lower Threshold of confidence interval
conf_thresh_upper (float): Upper Threshold of confidence interval
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
Returns:
(accuracy, avg_conf, len_bin): accuracy of bin, confidence of bin and number of elements in bin.
"""
filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper]
if len(filtered_tuples) < 1:
return 0, 0, 0
else:
correct = len([x for x in filtered_tuples if x[0] == x[1]]) # How many correct labels
len_bin = len(filtered_tuples) # How many elements falls into given bin
avg_conf = sum([x[2] for x in filtered_tuples]) / len_bin # Avg confidence of BIN
accuracy = float(correct) / len_bin # accuracy of BIN
return accuracy, avg_conf, len_bin | eb338800751de635e6b72213254287554cd34dc0 | 708,805 |
def _is_valid_requirement(requirement: str) -> bool:
"""Returns True is the `requirement.txt` line is valid."""
is_invalid = (
not requirement or # Empty line
requirement.startswith('#') or # Comment
requirement.startswith('-r ') # Filter the `-r requirement.txt`
)
return not is_invalid | 73b8ad139329698ad334b230cb04976db4ec05ba | 708,806 |
from typing import Union
from typing import Sequence
def wrap_singleton_string(item: Union[Sequence, str]):
""" Wrap a single string as a list. """
if isinstance(item, str):
# Can't check if iterable, because a string is an iterable of
# characters, which is not what we want.
return [item]
return item | 6e0946fee8fddd23631ff66d405dce2ae8a15fa6 | 708,807 |
def print_settings(settings):
"""
This function returns the harmonic approximation settings .
Returns
-------
text: str
Pretty-printed settings for the current Quantas run.
"""
text = '\nCalculator: Equation of state (EoS) fitting\n'
text += '\nMeasurement units\n'
text += '-------------------------------------\n'
text += ' - {:12} {}\n'.format('pressure:', settings['pressure_unit'])
text += ' - {:12} {}\n'.format('lenght:', settings['lenght_unit'])
return text | 4e64353e0c519a26ac210de1df39ce09fbf54045 | 708,808 |
def remove_multi_whitespace(string_or_list):
""" Cleans redundant whitespace from extracted data """
if type(string_or_list) == str:
return ' '.join(string_or_list.split())
return [' '.join(string.split()) for string in string_or_list] | a284eb1ea685fb55afeefe78d863a716475a9182 | 708,809 |
import json
def writeJSONFile(filename,JSONDocument):
""" Writes a JSON document to a named file
Parameters
----------
filename : str
name of the file
JSONDocument : str
JSON document to write to the file
Returns
-------
True
"""
filename='data/'+filename
with open(filename, 'w') as outfile:
json.dump(JSONDocument, outfile)
return True | 4f20b42a5f38554589a7bb03039ba348e3b0bb15 | 708,810 |
def get_monotask_from_macrotask(monotask_type, macrotask):
""" Returns a Monotask of the specified type from the provided Macrotask. """
return next((monotask for monotask in macrotask.monotasks if isinstance(monotask, monotask_type))) | 46d4516327c89755eaa3ba6f6fa3503aae0c5bd9 | 708,811 |
import os
def getREADMEforDescription(readmePath=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md')):
"""Use the Markdown from the file for the package's long_description.
long_description_content_type should be 'text/markdown' in this case.
This is why we need the README to be in the MANIFEST.in file.
"""
try:
with open(readmePath) as readme:
return '\n' + readme.read()
except FileNotFoundError:
return 'Package for fuzzing.' | 2b0eff2cb2a7fe5d94a512c6f62b4ad8bf48b290 | 708,812 |
from typing import List
from typing import Tuple
def choose_page(btn_click_list: List[Tuple[int, str]]) -> str:
"""
Given a list of tuples of (num_clicks, next_page) choose the next_page that
corresponds to exactly 1 num_clicks.
This is to help with deciding which page to go to next when clicking on one
of many buttons on a page.
The expectation is that exactly one button will have been clicked, so we get
a deterministic next page.
:param btn_click_list: List of tuples of (num_clicks, next_page).
:return: The id of the next page.
"""
for tup in btn_click_list:
if tup[0] == 1:
return tup[1]
raise ValueError(
"No clicks were detected, or the click list is misconfigured: {}".format(
btn_click_list
)
) | e61bc1e52c6531cf71bc54faea0d03976eb137ad | 708,813 |
def reconstruct_entity(input_examples, entitys_iter):
""" the entitys_iter contains the prediction entity of the splited examples.
We need to reconstruct the complete entitys for each example in input_examples.
and return the results as dictionary.
input_examples: each should contains (start, end) indice.
entitys_iter: iterator of entitys
Overlaps follows first in first set order:
--------------------------------------
O O O B-PER I-PER
O O O O B-GPE I-GPE
O B-LOC I-LOC O O
--------------------------------------
O O O B-PER I-PER O B-GPE I-GPE O O
--------------------------------------
return: the complete entitys of each input example.
"""
predict_entitys = []
for i, example in enumerate(input_examples):
_entity = []
for span in example.sentence_spans:
_, _, start, end = span
# +1 to skip the first padding
_entity.extend(next(entitys_iter)[start : end])
predict_entitys.append(_entity)
assert len(predict_entitys) == len(input_examples)
return predict_entitys | 520acff8bfd0616a045ca1286c51d75ea9465f0e | 708,814 |
def realord(s, pos=0):
"""
Returns the unicode of a character in a unicode string, taking surrogate pairs into account
"""
if s is None:
return None
code = ord(s[pos])
if code >= 0xD800 and code < 0xDC00:
if len(s) <= pos + 1:
print("realord warning: missing surrogate character")
return 0
code2 = ord(s[pos + 1])
if code2 >= 0xDC00 and code < 0xE000:
code = 0x10000 + ((code - 0xD800) << 10) + (code2 - 0xDC00)
return hex(code).replace("x", "") | 6683725d24a984ecf4feb2198e29a3b68c7f1d5b | 708,815 |
def specific_kinetic_energy(particles):
"""
Returns the specific kinetic energy of each particle in the set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.specific_kinetic_energy()
quantity<[0.5, 0.5] m**2 * s**-2>
"""
return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2) | 89a126c23b291a526401a00f812b40a5283319f4 | 708,816 |
def parse_loot_percentage(text):
"""Use to parse loot percentage string, ie: Roubo: 50% becomes 0.5"""
percentage = float(text.split(':')[1].strip("%")) / 100
return percentage | 97dc4f20f02ef0e5d3e592d3084dce80549777ce | 708,817 |
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False | effa9f55c82a9edcacd79e07716527f314e41f39 | 708,818 |
def org_unit_type_filter(queryset, passed_in_org_types):
"""Get specific Organisational units based on a filter."""
for passed_in_org_type in passed_in_org_types:
queryset = queryset.filter(org_unit_type_id=passed_in_org_type)
return queryset | 0495cabe121f8d6fdb584538f13764bd81d978c5 | 708,819 |
def jsonify(records):
"""
Parse asyncpg record response into JSON format
"""
return [dict(r.items()) for r in records] | 618cb538331c4eb637aa03f0ba857da3f2fa4c1c | 708,822 |
def parse_vad_label(line, frame_size: float = 0.032, frame_shift: float = 0.008):
"""Parse VAD information in each line, and convert it to frame-wise VAD label.
Args:
line (str): e.g. "0.2,3.11 3.48,10.51 10.52,11.02"
frame_size (float): frame size (in seconds) that is used when
extarcting spectral features
frame_shift (float): frame shift / hop length (in seconds) that
is used when extarcting spectral features
Returns:
frames (List[int]): frame-wise VAD label
Examples:
>>> label = parse_vad_label("0.3,0.5 0.7,0.9")
[0, ..., 0, 1, ..., 1, 0, ..., 0, 1, ..., 1]
>>> print(len(label))
110
NOTE: The output label length may vary according to the last timestamp in `line`,
which may not correspond to the real duration of that sample.
For example, if an audio sample contains 1-sec silence at the end, the resulting
VAD label will be approximately 1-sec shorter than the sample duration.
Thus, you need to pad zeros manually to the end of each label to match the number
of frames in the feature. E.g.:
>>> feature = extract_feature(audio) # frames: 320
>>> frames = feature.shape[1] # here assumes the frame dimention is 1
>>> label = parse_vad_label(vad_line) # length: 210
>>> import numpy as np
>>> label_pad = np.pad(label, (0, np.maximum(frames - len(label), 0)))[:frames]
"""
frame2time = lambda n: n * frame_shift + frame_size / 2
frames = []
frame_n = 0
for time_pairs in line.split():
start, end = map(float, time_pairs.split(","))
assert end > start, (start, end)
while frame2time(frame_n) < start:
frames.append(0)
frame_n += 1
while frame2time(frame_n) <= end:
frames.append(1)
frame_n += 1
return frames | 658a2a00b8b0b2cfdb83b649d2f87fcf23cbb6b4 | 708,823 |
import ntpath
def path_leaf(path):
"""
Extracts file name from given path
:param str path: Path be extracted the file name from
:return str: File name
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head) | 98ef27b218fdb5003ac988c42aff163d1067021f | 708,824 |
def next_permutation(a):
"""Generate the lexicographically next permutation inplace.
https://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
Return false if there is no next permutation.
"""
# Find the largest index i such that a[i] < a[i + 1]. If no such
# index exists, the permutation is the last permutation
for i in reversed(range(len(a) - 1)):
if a[i] < a[i + 1]:
break # found
else: # no break: not found
a.reverse()
return False # no next permutation
# Find the largest index j greater than i such that a[i] < a[j]
j = next(j for j in reversed(range(i + 1, len(a))) if a[i] < a[j])
# Swap the value of a[i] with that of a[j]
a[i], a[j] = a[j], a[i]
# Reverse sequence from a[i + 1] up to and including the final element a[n]
a[i + 1:] = reversed(a[i + 1:])
return True | b6246d53b5e0ac0e28aa5afda03d7756657a40bf | 708,825 |
from numpy.linalg import norm
def normalize(v):
"""
Calculate normalized vector
:param v: input vector
:return: normalized vector
"""
return v/norm(v) | 0ade14b6136e5f55410f6d4cc3fb5b466fa60566 | 708,826 |
import re
def replace_hyphen_by_romaji(text):
"""
長音「ー」などを仮名に置換する。
"""
# error check
if len(text) < 2:
return ""
while "-" in list(text) or "~" in list(text):
text_ = text
if (text[0] == "-" or text[0] == "~") and len(text) >= 2:
text = text[2:]
continue
text = re.sub(r"(?P<vowel>[aeiou])[-~][-~]", r"\g<vowel>x\g<vowel>", text) # "-" を 2文字
text = re.sub(r"A[-~][-~]", r"Axa", text)
text = re.sub(r"E[-~][-~]", r"Exe", text)
text = re.sub(r"O[-~][-~]", r"Oxo", text)
text = re.sub(r"U[-~][-~]", r"Uxu", text)
if text_ == text:
break # 変化しなかったら終わり
return text | 9e2d7216bbd751f49ed54519f5eaf8d516ae8025 | 708,827 |
def _function_set_name(f):
"""
return the name of a function (not the module)
@param f function
@return name
.. versionadded:: 1.1
"""
name = f.__name__
return name.split(".")[-1] | e1b73fbc520c7d9745872b0cd19766d42c027d15 | 708,828 |
def add(*args):
"""Adding list of values"""
return sum(args) | 9bc68771c10b537f0727e76cc07297e7d0311a5d | 708,829 |
import itertools
import shlex
def combine_arg_list_opts(opt_args):
"""Helper for processing arguments like impalad_args. The input is a list of strings,
each of which is the string passed into one instance of the argument, e.g. for
--impalad_args="-foo -bar" --impalad_args="-baz", the input to this function is
["-foo -bar", "-baz"]. This function combines the argument lists by tokenised each
string into separate arguments, if needed, e.g. to produce the output
["-foo", "-bar", "-baz"]"""
return list(itertools.chain(*[shlex.split(arg) for arg in opt_args])) | 77cfc6fa54201083c2cb058b8a9493b7d020273e | 708,830 |
def path_to_filename(username, path_to_file):
""" Converts a path formated as path/to/file.txt to a filename, ie. path_to_file.txt """
filename = '{}_{}'.format(username, path_to_file)
filename = filename.replace('/','_')
print(filename)
return filename | a29e98db8ac4cd7f39e0f0e7fc1f76e72f5fa398 | 708,831 |
from typing import List
def _convert_artist_format(artists: List[str]) -> str:
"""Returns converted artist format"""
formatted = ""
for x in artists:
formatted += x + ", "
return formatted[:-2] | 66f8afb0eb09e9a66eaa728c28576bb0e5a496d3 | 708,832 |
def parse_hostportstr(hostportstr):
""" Parse hostportstr like 'xxx.xxx.xxx.xxx:xxx'
"""
host = hostportstr.split(':')[0]
port = int(hostportstr.split(':')[1])
return host, port | 7d67b548728d8cc159a7baa3e5f419bf7cbbc4d3 | 708,833 |
def fastaDecodeHeader(fastaHeader):
"""Decodes the fasta header
"""
return fastaHeader.split("|") | 06f0af70765670dafa0b558867e2d9094c3d928b | 708,834 |
def tau_for_x(x, beta):
"""Rescales tau axis to x -1 ... 1"""
if x.min() < -1 or x.max() > 1:
raise ValueError("domain of x")
return .5 * beta * (x + 1) | 1d7b868dfadb65e6f98654276763fd4bff2c20ff | 708,835 |
import subprocess
def is_word_file(file):
"""
Check to see if the given file is a Word file.
@param file (str) The path of the file to check.
@return (bool) True if the file is a Word file, False if not.
"""
typ = subprocess.check_output(["file", file])
return ((b"Microsoft Office Word" in typ) or
(b"Word 2007+" in typ) or
(b"Microsoft OOXML" in typ)) | cb297e9cf8ed709e9802f1d3d48bc7d1271eac26 | 708,836 |
def minmax(data):
"""Solution to exercise R-1.3.
Takes a sequence of one or more numbers, and returns the smallest and
largest numbers, in the form of a tuple of length two. Do not use the
built-in functions min or max in implementing the solution.
"""
min_idx = 0
max_idx = 0
for idx, num in enumerate(data):
if num > data[max_idx]:
max_idx = idx
if num < data[min_idx]:
min_idx = idx
return (data[min_idx], data[max_idx]) | 9715bef69c120f6d1afb933bd9030240f556eb20 | 708,838 |
import os
def file_mtime_ns(file):
"""Get the ``os.stat(file).st_mtime_ns`` value."""
return os.stat(file).st_mtime_ns | 20b384549dae19e35d02b85b20dd62271352f08d | 708,839 |
def check_all_particles_present(partlist, gambit_pdg_codes):
"""
Checks all particles exist in the particle_database.yaml.
"""
absent = []
for i in range(len(partlist)):
if not partlist[i].pdg() in list(gambit_pdg_codes.values()):
absent.append(partlist[i])
absent_by_pdg = [x.pdg() for x in absent]
if len(absent) == 0:
print("All particles are in the GAMBIT database.")
else:
print(("\nThe following particles (by PDG code) are missing from the "
"particle database: {0}. GUM is now adding them to "
"../config/particle_database.yaml.\n").format(absent_by_pdg))
return absent | eab49388d472934a61900d8e972c0f2ef01ae1fb | 708,840 |
def translation_from_matrix(M):
"""Returns the 3 values of translation from the matrix M.
Parameters
----------
M : list[list[float]]
A 4-by-4 transformation matrix.
Returns
-------
[float, float, float]
The translation vector.
"""
return [M[0][3], M[1][3], M[2][3]] | 2b3bddd08772b2480a923a778d962f8e94f4b78a | 708,841 |
def saving_filename_boundary(save_location, close_up, beafort, wave_roughness):
""" Setting the filename of the figure """
if close_up is None:
return save_location + 'Boundary_comparison_Bft={}_roughness={}.png'.format(beafort, wave_roughness)
else:
ymax, ymin = close_up
return save_location + 'Boundary_comparison_Bft={}_max={}_min={}_roughness={}.png'.format(beafort, ymax, ymin,
wave_roughness) | c0357a211adc95c35873a0f3b0c900f6b5fe42d0 | 708,842 |
def childs_page_return_right_login(response_page, smarsy_login):
"""
Receive HTML page from login function and check we've got expected source
"""
if smarsy_login in response_page:
return True
else:
raise ValueError('Invalid Smarsy Login') | e7cb9b8d9df8bd5345f308e78cec28a20919370e | 708,843 |
import json
import sys
def _load_json(json_path):
"""Load JSON from a file with a given path."""
# Note: Binary so load can detect encoding (as in Section 3 of RFC 4627)
with open(json_path, 'rb') as json_file:
try:
return json.load(json_file)
except Exception as ex:
if sys.version_info[0] >= 3:
ex2 = Exception('Error loading ' + json_path)
exec('raise ex2 from ex') # nosec
else:
ex2 = Exception('Error loading ' + json_path + ': ' + str(ex))
ex2.__cause__ = ex
raise ex2 | 86a6ab7c509c24a50c248134e01a7d61d1499adb | 708,844 |
def sigma_bot(sigma_lc_bot, sigma_hc_bot, x_aver_bot_mass):
"""
Calculates the surface tension at the bottom of column.
Parameters
----------
sigma_lc_bot : float
The surface tension of low-boilling component at the bottom of column, [N / m]
sigma_hc_bot : float
The surface tension of high-boilling component at the bottom of column, [N / m]
x_aver_bot_mass : float
The average mass concentration at bot of column, [kg/kg]
Returns
-------
sigma_bot : float
The surface tension at the bottom of column, [N / m]
References
----------
&&&&&
"""
return (sigma_lc_bot * x_aver_bot_mass + (1 - x_aver_bot_mass) * sigma_hc_bot) | 5105e5592556cab14cb62ab61b4f242499b33e1d | 708,845 |
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
) | 6c497725e8a441f93f55084ef42489f97e35acf8 | 708,846 |
def vec_sum(a, b):
"""Compute the sum of two vector given in lists."""
return [va + vb for va, vb in zip(a, b)] | d85f55e22a60af66a85eb6c8cd180007351bf5d9 | 708,848 |
import numpy as np
def bolling(asset:list, samples:int=20, alpha:float=0, width:float=2):
"""
According to MATLAB:
BOLLING(ASSET,SAMPLES,ALPHA,WIDTH) plots Bollinger bands for given ASSET
data vector. SAMPLES specifies the number of samples to use in computing
the moving average. ALPHA is an optional input that specifies the exponent
used to compute the element weights of the moving average. The default
ALPHA is 0 (simple moving average). WIDTH is an optional input that
specifies the number of standard deviations to include in the envelope. It
is a multiplicative factor specifying how tight the bounds should be made
around the simple moving average. The default WIDTH is 2. This calling
syntax plots the data only and does not return the data.
Note: The standard deviations are normalized by (N-1) where N is the
sequence length.
"""
# build weight vector
# 主体
r = len(asset)
i = np.arange(1,samples+1) ** alpha
w = i / sum(i)
# build moving average vectors with for loops
a = np.zeros((r-samples, 1))
b = a.copy()
for i in range(samples, r):
a[i-samples] = np.sum( asset[i-samples:i] * w )
b[i-samples] = width * np.sum(np.std( asset[i-samples:i] * w ))
return a,a+b,a-b | 90c06bb45f30713a05cde865e23c0f9e317b0887 | 708,849 |
def pairwise_comparison(column1,var1,column2,var2):
"""
Arg: column1 --> column name 1 in df
column2 --> column name 2 in df
var1---> 3 cases:
abbreviation in column 1 (seeking better model)
abbreviation in column 1 (seeking lesser value in column1 in comparison to column2)
empty strong (seeking greater value in column2 in comparison to column1)
var2---> 3 cases:
abbreviation in column 2 (seeking better model)
abbreviation in column 2 (seeking greater value in column2 in comparison to column1)
empty strong (seeking lesser value in column1 in comparison to column2)
Return: 2 cases:
abbreviation of column name in which is smaller/greater depending on function use
Function: list comprehension , put two column together (zip)
used to find data set with a smaller/greater value
"""
return [var1 if r < c else var2 for r,c in zip(column1,column2)] | a67ef991dcad4816e9b15c1f352079ce14d7d823 | 708,850 |
def dequote(str):
"""Will remove single or double quotes from the start and end of a string
and return the result."""
quotechars = "'\""
while len(str) and str[0] in quotechars:
str = str[1:]
while len(str) and str[-1] in quotechars:
str = str[0:-1]
return str | e6377f9992ef8119726b788c02af9df32c722c28 | 708,851 |
import numpy
def uccsd_singlet_paramsize(n_qubits, n_electrons):
"""Determine number of independent amplitudes for singlet UCCSD
Args:
n_qubits(int): Number of qubits/spin-orbitals in the system
n_electrons(int): Number of electrons in the reference state
Returns:
Number of independent parameters for singlet UCCSD with a single
reference.
"""
n_occupied = int(numpy.ceil(n_electrons / 2.))
n_virtual = n_qubits / 2 - n_occupied
n_single_amplitudes = n_occupied * n_virtual
n_double_amplitudes = n_single_amplitudes ** 2
return (n_single_amplitudes + n_double_amplitudes) | 408c9158c76fba5d118cc6603e08260db30cc3df | 708,852 |
def setup_i2c_sensor(sensor_class, sensor_name, i2c_bus, errors):
""" Initialise one of the I2C connected sensors, returning None on error."""
if i2c_bus is None:
# This sensor uses the multipler and there was an error initialising that.
return None
try:
sensor = sensor_class(i2c_bus)
except Exception as err:
# Error initialising this sensor, try to continue without it.
msg = "Error initialising {}:\n{}".format(sensor_name, err)
print(msg)
errors += (msg + "\n")
return None
else:
print("{} initialised".format(sensor_name))
return sensor | 62633c09f6e78b43fca625df8fbd0d20d866735b | 708,853 |
def argparse_textwrap_unwrap_first_paragraph(doc):
"""Join by single spaces all the leading lines up to the first empty line"""
index = (doc + "\n\n").index("\n\n")
lines = doc[:index].splitlines()
chars = " ".join(_.strip() for _ in lines)
alt_doc = chars + doc[index:]
return alt_doc | f7068c4b463c63d100980b743f8ed2d69b149a97 | 708,854 |
import argparse
def is_positive_integer(value: str) -> int:
"""
Helper function for argparse.
Raise an exception if value is not a positive integer.
"""
int_value = int(value)
if int_value <= 0:
raise argparse.ArgumentTypeError("{} is not a positive integer".format(value))
return int_value | 4f5e2fd4e95e92b69bb8073daafbf8989037657b | 708,855 |
from typing import List
from typing import Tuple
def merge_overlapped_spans(spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
"""
Merge overlapped spans
Parameters
----------
spans: input list of spans
Returns
-------
merged spans
"""
span_sets = list()
for span in spans:
span_set = set(range(span[0], span[1]))
if not span_sets:
span_sets.append(span_set)
elif span_sets[-1] & span_set:
if span_set - span_sets[-1]:
span_sets[-1] = span_sets[-1] | span_set
else:
span_sets.append(span_set)
merged_spans = list()
for span_set in span_sets:
merged_spans.append((min(span_set), max(span_set) + 1))
return merged_spans | 0ea7f2a730274f7a98f25b8df22754ec79e8fce7 | 708,856 |
def _ww3_ounp_contents(run_date, run_type):
"""
:param str run_type:
:param run_date: :py:class:`arrow.Arrow`
:return: ww3_ounp.inp file contents
:rtype: str
"""
start_date = (
run_date.format("YYYYMMDD")
if run_type == "nowcast"
else run_date.shift(days=+1).format("YYYYMMDD")
)
run_hours = {"nowcast": 24, "forecast": 36, "forecast2": 30}
output_interval = 600 # seconds
output_count = int(run_hours[run_type] * 60 * 60 / output_interval)
contents = f"""$ WAVEWATCH III NETCDF Point output post-processing
$
$ First output time (YYYYMMDD HHmmss), output increment (s), number of output times
{start_date} 000000 {output_interval} {output_count}
$
$ All points defined in ww3_shel.inp
-1
$ File prefix
$ number of characters in date
$ netCDF4 output
$ one file, max number of points to process
$ tables of mean parameters
$ WW3 global attributes
$ time,station dimension order
$ WMO standard output
SoG_ww3_points_
8
4
T 100
2
0
T
6
"""
return contents | fda73d25c39c5bd46d791e6745fa72a0285edcdc | 708,857 |
def _get_value(key, entry):
"""
:param key:
:param entry:
:return:
"""
if key in entry:
if entry[key] and str(entry[key]).lower() == "true":
return True
elif entry[key] and str(entry[key]).lower() == "false":
return False
return entry[key]
return None | 93820395e91323939c8fbee653b6eabb6fbfd8eb | 708,858 |
import subprocess
def run_command(cmd):
"""Run command, return output as string."""
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
return output.decode("ascii") | 0996d76ab1980c2fad262f8fd227ac50772849d2 | 708,859 |
import re
import uuid
def get_mac():
"""This function returns the first MAC address of the NIC of the PC
without colon"""
return ':'.join(re.findall('..', '%012x' % uuid.getnode())).replace(':', '') | 95ebb381c71741e26b6713638a7770e452d009f2 | 708,861 |
def get_feature_names_small(ionnumber):
"""
feature names for the fixed peptide length feature vectors
"""
names = []
names += ["pmz", "peplen"]
for c in ["bas", "heli", "hydro", "pI"]:
names.append("sum_" + c)
for c in ["mz", "bas", "heli", "hydro", "pI"]:
names.append("mean_" + c)
names.append("mz_ion")
names.append("mz_ion_other")
names.append("mean_mz_ion")
names.append("mean_mz_ion_other")
for c in ["bas", "heli", "hydro", "pI"]:
names.append("{}_ion".format(c))
names.append("{}_ion_other".format(c))
names.append("endK")
names.append("endR")
names.append("nextP")
names.append("nextK")
names.append("nextR")
for c in ["bas", "heli", "hydro", "pI", "mz"]:
for pos in ["i", "i-1", "i+1", "i+2"]:
names.append("loc_" + pos + "_" + c)
names.append("charge")
for i in range(ionnumber):
for c in ["bas", "heli", "hydro", "pI", "mz"]:
names.append("P_%i_%s"%(i, c))
names.append("P_%i_P"%i)
names.append("P_%i_K"%i)
names.append("P_%i_R"%i)
return names | fbffe98af0cffb05a6b11e06786c5a7076449146 | 708,862 |
def vectorproduct(a,b):
"""
Return vector cross product of input vectors a and b
"""
a1, a2, a3 = a
b1, b2, b3 = b
return [a2*b3 - a3*b2, a3*b1 - a1*b3, a1*b2 - a2*b1] | adb9e7c4b5150ab6231f2b852d6860cd0e5060a0 | 708,863 |
def int_to_ip(ip):
"""
Convert a 32-bit integer into IPv4 string format
:param ip: 32-bit integer
:return: IPv4 string equivalent to ip
"""
if type(ip) is str:
return ip
return '.'.join([str((ip >> i) & 0xff) for i in [24, 16, 8, 0]]) | 8ceb8b9912f10ba49b45510f4470b9cc34bf7a2f | 708,864 |
import sys
def getExecutable():
"""
Returns the executable this session is running from.
:rtype: str
"""
return sys.executable | 87d842239f898554582900d879501b2a3457df8e | 708,865 |
import os
def get_file_with_suffix(d, suffix):
"""
Generate a list of all files present below a given directory.
"""
items = os.listdir(d)
for file in items:
if file.endswith(suffix):
return file.split(suffix)[0]
return None | 1191868a4fd9b925f6f8ce713aba16d9b66f1a9a | 708,866 |
def PolyMod(f, g):
"""
return f (mod g)
"""
return f % g | 53b47e993e35c09e59e209b68a8a7656edf6b4ce | 708,867 |
def both_block_num_missing(record):
"""
Returns true of both block numbers are missing
:param record: dict - The record being evaluated
:return: bool
"""
rpt_block_num = record.get("rpt_block_num", "") or ""
rpt_sec_block_num = record.get("rpt_sec_block_num", "") or ""
# True, if neither address has a block number.
if rpt_block_num == "" and rpt_sec_block_num == "":
return True
return False | 63e2fdaef78dbc3c6560a4b015ed022583f30d05 | 708,868 |
def encode_mode(mode):
"""
JJ2 uses numbers instead of strings, but strings are easier for humans to work with
CANNOT use spaces here, as list server scripts may not expect spaces in modes in port 10057 response
:param mode: Mode number as sent by the client
:return: Mode string
"""
if mode == 16:
return "headhunters"
if mode == 15:
return "domination"
if mode == 14:
return "tlrs"
if mode == 13:
return "flagrun"
if mode == 12:
return "deathctf"
if mode == 11:
return "jailbreak"
if mode == 10:
return "teambattle"
if mode == 9:
return "pestilence"
if mode == 8:
return "xlrs"
if mode == 7:
return "lrs"
if mode == 6:
return "roasttag"
if mode == 5:
return "coop"
if mode == 4:
return "race"
if mode == 3:
return "ctf"
if mode == 2:
return "treasure"
if mode == 1:
return "battle"
return "unknown" | db83c419acb299284b7b5338331efc95051115a5 | 708,870 |
def get_domains_by_name(kw, c, adgroup=False):
"""Searches for domains by a text fragment that matches the domain name (not the tld)"""
domains = []
existing = set()
if adgroup:
existing = set(c['adgroups'].find_one({'name': adgroup}, {'sites':1})['sites'])
for domain in c['domains'].find({}, {'domain': 1, 'alexa.rank.latest':1}):
try:
rank = domain['alexa']['rank']['latest']
domain_name = domain['domain'].replace('#', '.')
if kw in domain_name:
if domain_name not in existing:
domains.append({
"domain": domain_name,
"rank": rank
})
except KeyError:
pass
return domains[:50] | 6ecaf4ccf1ecac806fb621c02282bf46929459ce | 708,872 |
def live_ferc_db(request):
"""Use the live FERC DB or make a temporary one."""
return request.config.getoption("--live_ferc_db") | f0540c8e3383572c5f686ea89011d9e1ab0bf208 | 708,873 |
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
def extract_url_dataset(dataset,msg_flag=False):
"""
Given a dataset identifier this function extracts the URL for the page where the actual raw data resides.
"""
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
dataset_dict={}
baseurl='https://archive.ics.uci.edu/ml/datasets/'
url = baseurl+dataset
try:
uh= urllib.request.urlopen(url, context=ctx)
html =uh.read().decode()
soup=BeautifulSoup(html,'html5lib')
if soup.text.find("does not appear to exist")!=-1:
if msg_flag:
print(f"{dataset} not found")
return None
else:
for link in soup.find_all('a'):
if link.attrs['href'].find('machine-learning-databases')!=-1:
a=link.attrs['href']
a=a[2:]
dataurl="https://archive.ics.uci.edu/ml/"+str(a)
#print(dataurl)
return str(dataurl)
#dataurls.append(dataurl)
# After finishing the for-loop with a-tags, the first dataurl is added to the dictionary
#dataset_dict['dataurl']=dataurls[0]
except:
#print("Could not retrieve")
return None | 06ec2dd6bea4c264fe9590663a28c7c92eed6a49 | 708,874 |
import json
def format_parameters(parameters: str) -> str:
"""
Receives a key:value string and retuns a dictionary string ({"key":"value"}). In the process strips trailing and
leading spaces.
:param parameters: The key-value-list
:return:
"""
if not parameters:
return '{}'
pairs = []
for item in parameters.split(','):
try:
key, value = item.split(':')
except ValueError:
raise ValueError(f"Got unexpected parameters {item}.")
pairs.append((key.strip(), value.strip()))
return json.dumps(dict(pairs)) | 95f115b9000d495db776798700cfdf35209cfbd4 | 708,875 |
def Format_Phone(Phone):
"""Function to Format a Phone Number into (999)-999 9999)"""
Phone = str(Phone)
return f"({Phone[0:3]}) {Phone[3:6]}-{Phone[6:10]}" | 8e46c35bca9d302d86909457c84785ad5d366c15 | 708,876 |
def register_and_login_test_user(c):
"""
Helper function that makes an HTTP request to register a test user
Parameters
----------
c : object
Test client object
Returns
-------
str
Access JWT in order to use in subsequent tests
"""
c.post(
"/api/auth/register",
json={
"username": "test",
"password": "secret",
"first_name": "tim",
"last_name": "apple",
"email": "tim@test.com",
"birthday": "1990-01-01",
},
)
setup_resp = c.post(
"/api/auth/login", json={"username": "test", "password": "secret"}
)
setup_resp_json = setup_resp.get_json()
setup_access_token = setup_resp_json["access_token"]
return setup_access_token | b76f7f6afa9af453246ae304b1b0504bd68b8919 | 708,877 |
def basic_pyxll_function_22(x, y, z):
"""if z return x, else return y"""
if z:
# we're returning an integer, but the signature
# says we're returning a float.
# PyXLL will convert the integer to a float for us.
return x
return y | 851b5eef683b0456a0f5bce7f3850698693b067e | 708,878 |
def get_data_file_args(args, language):
"""
For a interface, return the language-specific set of data file arguments
Args:
args (dict): Dictionary of data file arguments for an interface
language (str): Language of the testbench
Returns:
dict: Language-specific data file arguments
"""
if language in args:
return args[language]
return args["generic"] | 11e30b92316bad9a46b87bd9188f97d5e8860377 | 708,879 |
import re
def check_string_capitalised(string):
""" Check to see if a string is in all CAPITAL letters. Boolean. """
return bool(re.match('^[A-Z_]+$', string)) | f496d79fafae4c89c3686856b42113c4818f7ed8 | 708,880 |
import textwrap
def ped_file_parent_missing(fake_fs):
"""Return fake file system with PED file"""
content = textwrap.dedent(
"""
# comment
FAM II-1\tI-1\t0\t1\t2
FAM I-1 0\t0\t1\t1
"""
).strip()
fake_fs.fs.create_file("/test.ped", create_missing_dirs=True, contents=content)
return fake_fs | 9df19ab925984236aa581c9b8843591f05d3b7b4 | 708,881 |
import random
import requests
def GettingAyah():
"""The code used to get an Ayah from the Quran every fixed time"""
while True:
ayah = random.randint(1, 6237)
url = f'http://api.alquran.cloud/v1/ayah/{ayah}'
res = requests.get(url)
if len(res.json()['data']['text']) <= 280:
return res.json()['data']['text'] | 5739cbd3554b97f01eefef7f59a4087e5497e3e7 | 708,882 |
import itertools
import six
def partition(predicate, iterable):
"""Use `predicate` to partition entries into falsy and truthy ones.
Recipe taken from the official documentation.
https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
t1, t2 = itertools.tee(iterable)
return (
six.moves.filterfalse(predicate, t1),
six.moves.filter(predicate, t2),
) | 5777203d9d34a9ffddc565129d8dda3ec91efc8e | 708,883 |
def check_struc(d1, d2,
errors=[], level='wf'):
"""Recursively check struct of dictionary 2 to that of dict 1
Arguments
---------
d1 : dict
Dictionary with desired structure
d2 : dict
Dictionary with structre to check
errors : list of str, optional
Missing values in d2. Initial value is [].
level : str, optional
Level of search. Inital value is 'wf' (wind farm) for top-level
dictionary.
Returns
-------
errors : list of str
Missing values in d2.
"""
for k1, v1 in d1.items(): # loop through keys and values in first dict
if k1 not in d2.keys(): # if key doesn't exist in d2
errors.append('{} not in dictionary'.format('.'.join([level,k1])))
elif isinstance(v1, dict): # otherwise, if item is a dict, recurse
errors = check_struc(v1, d2[k1],
errors=errors, # pass in accumulated errros
level='.'.join([level, k1])) # change level
return errors | aa835e7bbd6274e73d0b3d45d1ec4d617af0a167 | 708,884 |
import argparse
from datetime import datetime
from typing import OrderedDict
def get_args_string(args: argparse.Namespace) -> str:
"""
Creates a string summarising the argparse arguments.
:param args: parser.parse_args()
:return: String of the arguments of the argparse namespace.
"""
string = ''
if hasattr(args, 'experiment_name'):
string += f'{args.experiment_name} ({datetime.now()})\n'
max_length = max([len(k) for k, _ in vars(args).items()])
new_dict = OrderedDict((k, v) for k, v in sorted(
vars(args).items(), key=lambda x: x[0]
))
for key, value in new_dict.items():
string += ' ' * (max_length - len(key)) + key + ': ' + str(value) + '\n'
return string | f1f4de0821d04a21df046bc0dc526b2f9f1135f6 | 708,885 |
def inverseTranslateTaps(lowerTaps, pos):
"""Method to translate tap integer in range
[-lower_taps, raise_taps] to range [0, lowerTaps + raiseTaps]
"""
# Hmmm... is it this simle?
posOut = pos + lowerTaps
return posOut | 827bdfc51b3581b7b893ff8ff02dd5846ff6cd0f | 708,886 |
def GMLstring2points(pointstring):
"""Convert list of points in string to a list of points. Works for 3D points."""
listPoints = []
#-- List of coordinates
coords = pointstring.split()
#-- Store the coordinate tuple
assert(len(coords) % 3 == 0)
for i in range(0, len(coords), 3):
listPoints.append([float(coords[i]), float(coords[i+1]), float(coords[i+2])])
return listPoints | e755d344d163bdcdb114d0c9d614a1bbd40be29f | 708,887 |
def ToOrdinal(value):
"""
Convert a numerical value into an ordinal number.
@param value: the number to be converted
"""
if value % 100//10 != 1:
if value % 10 == 1:
ordval = '{}st'.format(value)
elif value % 10 == 2:
ordval = '{}nd'.format(value)
elif value % 10 == 3:
ordval = '{}rd'.format(value)
else:
ordval = '{}th'.format(value)
else:
ordval = '{}th'.format(value)
return ordval | 774bac5fd22714ba3eb4c9dd2b16f4236e2f5e8c | 708,888 |
def recall_at(target, scores, k):
"""Calculation for recall at k."""
if target in scores[:k]:
return 1.0
else:
return 0.0 | 0c3f70be3fb4cfde16d5e39b256e565f180d1655 | 708,889 |
import math
def dcg(r, k=None):
"""The Burges et al. (2005) version of DCG.
This is what everyone uses (except trec_eval)
:param r: results
:param k: cut-off
:return: sum (2^ y_i - 1) / log (i +2)
"""
result = sum([(pow(2, rel) - 1) / math.log(rank + 2, 2) for rank, rel in enumerate(r[:k])])
return result | d93c500ba55411807570c8efebdeaa49ce7fe288 | 708,890 |
async def detect_objects(computervision_client, image_url):
"""Detect objects from a remote image"""
detect_objects_results_local = \
computervision_client.detect_objects(image_url)
return detect_objects_results_local.objects | 9adb2a3b2c08f99187159ad6a22047bbf3d4c30a | 708,891 |
def fcard(card):
"""Create format string for card display"""
return f"{card[0]} {card[1]}" | ca3866011b418bf35e1b076afd7134926a9382f9 | 708,892 |
def resetChapterProgress(chapterProgressDict, chapter, initRepeatLevel):
"""This method resets chapter progress and sets initial level for repeat routine.
Args:
chapterProgressDict (dict): Chapter progress data.
chapter (int): Number of the chapter.
initRepeatLevel (int): Initial level for repeat routine.
Returns:
dictionary: Return Reseted chapter progress dictionary with initial level set.
"""
chapterProgressDict[chapter]["status"] = "Not started"
chapterProgressDict[chapter]["progress"]["current"] = 0
chapterProgressDict[chapter]["correct"] = {"correct":0, "subtotal":0, "rate":''}
chapterProgressDict[chapter]["repeatLevel"] = initRepeatLevel
return chapterProgressDict | e02d6e97f556a2c080c2bc273255aacedf7bb086 | 708,893 |
def on_coordinator(f):
"""A decorator that, when applied to a function, makes a spawn of that function happen on the coordinator."""
f.on_coordinator = True
return f | d9c97c47255d165c67a4eb67a18cc85c3c9b9386 | 708,894 |
def format_gro_coord(resid, resname, aname, seqno, xyz):
""" Print a line in accordance with .gro file format, with six decimal points of precision
Nine decimal points of precision are necessary to get forces below 1e-3 kJ/mol/nm.
@param[in] resid The number of the residue that the atom belongs to
@param[in] resname The name of the residue that the atom belongs to
@param[in] aname The name of the atom
@param[in] seqno The sequential number of the atom
@param[in] xyz A 3-element array containing x, y, z coordinates of that atom
"""
return "%5i%-5s%5s%5i % 13.9f % 13.9f % 13.9f" % (resid,resname,aname,seqno,xyz[0],xyz[1],xyz[2]) | ceeeeeafe4f7484fa17ee4ebd79363209c8f7391 | 708,895 |
def fix_legacy_database_uri(uri):
""" Fixes legacy Database uris, like postgres:// which is provided by Heroku but no longer supported by SqlAlchemy """
if uri.startswith('postgres://'):
uri = uri.replace('postgres://', 'postgresql://', 1)
return uri | aa3aa20110b7575abf77534d08a35dccb04b731d | 708,896 |
def get_url_names():
""" Получение ссылок на контент
Returns:
Здесь - список файлов формата *.str
"""
files = ['srts/Iron Man02x26.srt', 'srts/Iron1and8.srt']
return files | 4ee8fdd5ab9efc04eda4bfe1205e073064030520 | 708,897 |
from datetime import datetime
def unix_utc_now() -> int:
"""
Return the number of seconds passed from January 1, 1970 UTC.
"""
delta = datetime.utcnow() - datetime(1970, 1, 1)
return int(delta.total_seconds()) | b9768b60cf6f49a7cccedd88482d7a2b21cf05a2 | 708,898 |
import torch
def tensor_from_var_2d_list(target, padding=0.0, max_len=None, requires_grad=True):
"""Convert a variable 2 level nested list to a tensor.
e.g. target = [[1, 2, 3], [4, 5, 6, 7, 8]]
"""
max_len_calc = max([len(batch) for batch in target])
if max_len == None:
max_len = max_len_calc
if max_len_calc > max_len:
print("Maximum length exceeded: {}>{}".format(max_len_calc, max_len))
target = [batch[:max_len] for batch in target]
padded = [batch + (max_len - len(batch)) * [padding] for batch in target]
return torch.tensor(padded, requires_grad=requires_grad) | 2aa5fcc5b2be683c64026126da55330937cd8242 | 708,899 |
def invert_dict(d):
"""
Invert dictionary by switching keys and values.
Parameters
----------
d : dict
python dictionary
Returns
-------
dict
Inverted python dictionary
"""
return dict((v, k) for k, v in d.items()) | c70bfdb5ffa96cf07b1a4627aa484e3d5d0f4fea | 708,900 |
def segregate(str):
"""3.1 Basic code point segregation"""
base = bytearray()
extended = set()
for c in str:
if ord(c) < 128:
base.append(ord(c))
else:
extended.add(c)
extended = sorted(extended)
return bytes(base), extended | e274393735bf4f1d51a75c73351848cbfdd5f81f | 708,901 |
def format_last_online(last_online):
"""
Return the upper limit in seconds that a profile may have been
online. If last_online is an int, return that int. Otherwise if
last_online is a str, convert the string into an int.
Returns
----------
int
"""
if isinstance(last_online, str):
if last_online.lower() in ('day', 'today'):
last_online_int = 86400 # 3600 * 24
elif last_online.lower() == 'week':
last_online_int = 604800 # 3600 * 24 * 7
elif last_online.lower() == 'month':
last_online_int = 2678400 # 3600 * 24 * 31
elif last_online.lower() == 'year':
last_online_int = 31536000 # 3600 * 365
elif last_online.lower() == 'decade':
last_online_int = 315360000 # 3600 * 365 * 10
else: # Defaults any other strings to last hour
last_online_int = 3600
else:
last_online_int = last_online
return last_online_int | 335ed9a37062964b785c75246c9f23f678b4a90e | 708,902 |
def option_to_text(option):
"""Converts, for example, 'no_override' to 'no override'."""
return option.replace('_', ' ') | 4b7febe0c4500aa23c368f83bbb18902057dc378 | 708,903 |
def _cons8_89(m8, L88, L89, d_gap, k, Cp, h_gap):
"""dz constrant for edge gap sc touching edge, corner gap sc"""
term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts
term2 = k * d_gap / m8 / Cp / L88 # cond to adj bypass edge
term3 = k * d_gap / m8 / Cp / L89 # cond to adj bypass corner
return 1 / (term1 + term2 + term3) | b6e8b6331be394e9a10659029143997b097fae86 | 708,904 |