content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_ph_bs_symm_line(bands_path, has_nac=False, labels_dict=None):
"""
Creates a pymatgen PhononBandStructure from a band.yaml file.
The labels will be extracted from the dictionary, if present.
If the 'eigenvector' key is found the eigendisplacements will be
calculated according to the formula:
\\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_path: path to the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
"""
return get_ph_bs_symm_line_from_dict(loadfn(bands_path), has_nac, labels_dict) | 40b135c09c829348d0693574b745ad5c114ec037 | 3,658,491 |
def LinterPath():
"""Ascertain the dxl.exe path from this .py files path because sublime.packages_path is unavailable at startup."""
ThisPath = abspath(dirname(__file__))
if isfile(ThisPath):
# We are in a .sublime-package file in the 'Installed Package' folder
return abspath(join(ThisPath, '..', '..', 'Packages', 'DXL', 'Lint', 'dxl.exe'))
else:
# We are in a subfolder of the 'Packages' folder
return abspath(join(ThisPath, '..', 'DXL', 'Lint', 'dxl.exe')) | 5e7e8e5761b69ba3383b10af92f4d9a442bab69e | 3,658,493 |
import base64
def encrypt_and_encode(data, key):
""" Encrypts and encodes `data` using `key' """
return base64.urlsafe_b64encode(aes_encrypt(data, key)) | b318e5e17c7a5b8f74036157ce547a3c0d68129c | 3,658,494 |
def _get_undelimited_identifier(identifier):
"""
Removes delimiters from the identifier if it is delimited.
"""
if pd.notna(identifier):
identifier = str(identifier)
if _is_delimited_identifier(identifier):
return identifier[1:-1]
return identifier | cd31b5cd2aea8f6c115fa117da30960f5f6dd8d8 | 3,658,495 |
def has_product_been_used(uuid):
"""Check if this product has been used previously."""
existing = existing_processed_products()
if not isinstance(existing, pd.DataFrame):
return False
has_uuid = not existing.query("uuid == @uuid").empty
return has_uuid | f361c5177c0152179300d6c1356139ba8f7face9 | 3,658,497 |
def _FilterMemberData(
mr, owner_ids, committer_ids, contributor_ids, indirect_member_ids,
project):
"""Return a filtered list of members that the user can view.
In most projects, everyone can view the entire member list. But,
some projects are configured to only allow project owners to see
all members. In those projects, committers and contributors do not
see any contributors. Regardless of how the project is configured
or the role that the user plays in the current project, we include
any indirect members through user groups that the user has access
to view.
Args:
mr: Commonly used info parsed from the HTTP request.
owner_views: list of user IDs for project owners.
committer_views: list of user IDs for project committers.
contributor_views: list of user IDs for project contributors.
indirect_member_views: list of user IDs for users who have
an indirect role in the project via a user group, and that the
logged in user is allowed to see.
project: the Project we're interested in.
Returns:
A list of owners, committer and visible indirect members if the user is not
signed in. If the project is set to display contributors to non-owners or
the signed in user has necessary permissions then additionally a list of
contributors.
"""
visible_members_ids = set()
# Everyone can view owners and committers
visible_members_ids.update(owner_ids)
visible_members_ids.update(committer_ids)
# The list of indirect members is already limited to ones that the user
# is allowed to see according to user group settings.
visible_members_ids.update(indirect_member_ids)
# If the user is allowed to view the list of contributors, add those too.
if permissions.CanViewContributorList(mr, project):
visible_members_ids.update(contributor_ids)
return sorted(visible_members_ids) | be258b2d0559423a70fb5722734144f6a946b70e | 3,658,498 |
def escape_name(name):
"""Escape sensor and request names to be valid Python identifiers."""
return name.replace('.', '_').replace('-', '_') | 856b8fe709e216e027f5ab085dcab91604c93c2e | 3,658,499 |
def show_user_following(user_id):
"""Show list of people this user is following."""
user = User.query.get_or_404(user_id)
return render_template('users/following.html', user=user) | ef1d7d13e9c00c352f27cdde17d215d40ff47b76 | 3,658,500 |
def logout():
"""
This API revokes all the tokens including access and refresh tokens that belong to the user.
"""
current_user = get_jwt_identity()
logout_user(current_user.get('id'))
return jsonify(message="Token revoked."), 200 | d574135099dfaedcdb8d6bdef993d8f773898f63 | 3,658,501 |
def multiset_counter(mset):
"""
Return the sum of occurences of elements present in a token ids multiset,
aka. the multiset cardinality.
"""
return sum(mset.values()) | 36885abd5bf666aa6c77a262a647c227e46d2e88 | 3,658,502 |
def get_v6_subnet(address):
"""derive subnet number for provided ipv6 address
Args:
address (str): ipv6 address in string with mask
Returns:
str: subnet zero == network address
"""
return IPv6(address).subnet_zero() | ed9158b2d2ff8a83dce1b079066ef372ffc623e5 | 3,658,503 |
import yaml
def load_scenario(file_name: str) -> Waypoint:
"""
Create an object Waypoint from a Scenario file
:param file_name:
:return:
"""
# read file
with open(f"{waypoint_directory_path}/{file_name}", "r") as scenario_file:
scenario_data = yaml.load(scenario_file, Loader=yaml.FullLoader)
waypoint = Waypoint()
waypoint.build_from_json(scenario_data)
return waypoint | db5e246141e014af4545468481739e9449d90a00 | 3,658,505 |
def parse_example(serialized_example):
"""Parse a serialized example proto."""
features = tf.io.parse_single_example(
serialized_example,
dict(
beam_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
image_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
question_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
context=tf.io.FixedLenFeature(shape=[], dtype=tf.string),
question=tf.io.FixedLenFeature(shape=[], dtype=tf.string)))
return features | 5c3a76bc121f02ce4484a3af87104f7739db1669 | 3,658,507 |
from typing import Optional
from typing import Tuple
from typing import Union
def _compute_bootstrap_quantiles_point_estimate_custom_bias_corrected_method(
metric_values: np.ndarray,
false_positive_rate: np.float64,
n_resamples: int,
random_seed: Optional[int] = None,
) -> Tuple[Number, Number]:
"""
An internal implementation of the "bootstrap" estimator method, returning a point estimate for a population
parameter of interest (lower and upper quantiles in this case). See
https://en.wikipedia.org/wiki/Bootstrapping_(statistics) for an introduction to "bootstrapping" in statistics.
The methods implemented here can be found in:
Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 124-130).
Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
This implementation is sub-par compared to the one available from the "SciPy" standard library
("https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html"), in that it does not handle
multi-dimensional statistics. "scipy.stats.bootstrap" is vectorized, thus having the ability to accept a
multi-dimensional statistic function and process all dimensions.
Unfortunately, as of March 4th, 2022, the SciPy implementation has two issues: 1) it only returns a confidence
interval and not a point estimate for the population parameter of interest, which is what we require for our use
cases. 2) It can not handle multi-dimensional statistics and correct for bias simultaneously. You must either use
one feature or the other.
This implementation could only be replaced by "scipy.stats.bootstrap" if Great Expectations drops support for
Python 3.6, thereby enabling us to use a more up-to-date version of the "scipy" Python package (the currently used
version does not have "bootstrap"). Also, as discussed above, two contributions would need to be made to the SciPy
package to enable 1) bias correction for multi-dimensional statistics and 2) a return value of a point estimate for
the population parameter of interest (lower and upper quantiles in this case).
Additional future direction could include developing enhancements to bootstrapped estimator based on theory
presented in "http://dido.econ.yale.edu/~dwka/pub/p1001.pdf":
@article{Andrews2000a,
added-at = {2008-04-25T10:38:44.000+0200},
author = {Andrews, Donald W. K. and Buchinsky, Moshe},
biburl = {https://www.bibsonomy.org/bibtex/28e2f0a58cdb95e39659921f989a17bdd/smicha},
day = 01,
interhash = {778746398daa9ba63bdd95391f1efd37},
intrahash = {8e2f0a58cdb95e39659921f989a17bdd},
journal = {Econometrica},
keywords = {imported},
month = Jan,
note = {doi: 10.1111/1468-0262.00092},
number = 1,
pages = {23--51},
timestamp = {2008-04-25T10:38:52.000+0200},
title = {A Three-step Method for Choosing the Number of Bootstrap Repetitions},
url = {http://www.blackwell-synergy.com/doi/abs/10.1111/1468-0262.00092},
volume = 68,
year = 2000
}
The article outlines a three-step minimax procedure that relies on the Central Limit Theorem (C.L.T.) along with the
bootstrap sampling technique (see https://en.wikipedia.org/wiki/Bootstrapping_(statistics) for background) for
computing the stopping criterion, expressed as the optimal number of bootstrap samples, needed to achieve a maximum
probability that the value of the statistic of interest will be minimally deviating from its actual (ideal) value.
"""
lower_quantile_pct: float = false_positive_rate / 2
upper_quantile_pct: float = 1.0 - false_positive_rate / 2
sample_lower_quantile: np.ndarray = np.quantile(metric_values, q=lower_quantile_pct)
sample_upper_quantile: np.ndarray = np.quantile(metric_values, q=upper_quantile_pct)
if random_seed:
random_state: np.random.Generator = np.random.Generator(
np.random.PCG64(random_seed)
)
bootstraps: np.ndarray = random_state.choice(
metric_values, size=(n_resamples, metric_values.size)
)
else:
bootstraps: np.ndarray = np.random.choice(
metric_values, size=(n_resamples, metric_values.size)
)
bootstrap_lower_quantiles: Union[np.ndarray, Number] = np.quantile(
bootstraps,
q=lower_quantile_pct,
axis=1,
)
bootstrap_lower_quantile_point_estimate: float = np.mean(bootstrap_lower_quantiles)
bootstrap_lower_quantile_standard_error: float = np.std(bootstrap_lower_quantiles)
bootstrap_lower_quantile_bias: float = (
bootstrap_lower_quantile_point_estimate - sample_lower_quantile
)
# Bias / Standard Error > 0.25 is a rule of thumb for when to apply bias correction.
# See:
# Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 128).
# Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
lower_quantile_bias_corrected_point_estimate: Number
if bootstrap_lower_quantile_bias / bootstrap_lower_quantile_standard_error <= 0.25:
lower_quantile_bias_corrected_point_estimate = (
bootstrap_lower_quantile_point_estimate
)
else:
lower_quantile_bias_corrected_point_estimate = (
bootstrap_lower_quantile_point_estimate - bootstrap_lower_quantile_bias
)
bootstrap_upper_quantiles: Union[np.ndarray, Number] = np.quantile(
bootstraps,
q=upper_quantile_pct,
axis=1,
)
bootstrap_upper_quantile_point_estimate: np.ndarray = np.mean(
bootstrap_upper_quantiles
)
bootstrap_upper_quantile_standard_error: np.ndarray = np.std(
bootstrap_upper_quantiles
)
bootstrap_upper_quantile_bias: float = (
bootstrap_upper_quantile_point_estimate - sample_upper_quantile
)
# Bias / Standard Error > 0.25 is a rule of thumb for when to apply bias correction.
# See:
# Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 128).
# Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
upper_quantile_bias_corrected_point_estimate: Number
if bootstrap_upper_quantile_bias / bootstrap_upper_quantile_standard_error <= 0.25:
upper_quantile_bias_corrected_point_estimate = (
bootstrap_upper_quantile_point_estimate
)
else:
upper_quantile_bias_corrected_point_estimate = (
bootstrap_upper_quantile_point_estimate - bootstrap_upper_quantile_bias
)
return (
lower_quantile_bias_corrected_point_estimate,
upper_quantile_bias_corrected_point_estimate,
) | 50494c15ded4b9cd7c54f4262f7d9b2137d2bd4f | 3,658,508 |
def bytes_to_b64(data: bytes, remove_padding=True) -> str:
"""
byte string to URL safe Base64 string, with option to remove B64 LSB padding
:param data: byte string
:param remove_padding: remove b64 padding (``=`` char). True by default
:return: base64 unicode string
"""
text = urlsafe_b64encode(data).decode()
if remove_padding:
return text.replace('=', '')
else:
return text | 8ca495948eb72ab6bb8bf95ae62b4d370a04cbe3 | 3,658,509 |
import re
def _case_sensitive_replace(string, old, new):
"""
Replace text, retaining exact case.
Args:
string (str): String in which to perform replacement.
old (str): Word or substring to replace.
new (str): What to replace `old` with.
Returns:
repl_string (str): Version of string where instances of
`old` has been replaced with `new`, retaining case.
"""
def repl(match):
current = match.group()
# treat multi-word sentences word-by-word
old_words = current.split(" ")
new_words = new.split(" ")
out = []
for old_word, new_word in zip(old_words, new_words):
result = []
all_upper = True
for ind, chr in enumerate(old_word):
if ind >= len(new):
break
if chr.isupper():
result.append(new_word[ind].upper())
else:
result.append(new_word[ind].lower())
all_upper = False
# special cases - keep remaing case)
if new_word.lower() in CASE_WORD_EXCEPTIONS:
result.append(new_word[ind + 1 :])
# append any remaining characters from new
elif all_upper:
result.append(new_word[ind + 1 :].upper())
else:
result.append(new_word[ind + 1 :].lower())
out.append("".join(result))
# if we have more new words than old ones, just add them verbatim
out.extend([new_word for ind, new_word in enumerate(new_words) if ind >= len(old_words)])
return " ".join(out)
if string is None:
return None
regex = re.compile(re.escape(old), re.I)
return regex.sub(repl, string) | bf20636146b42f67ec3ad0b4a00a80a9d6cb9ce6 | 3,658,510 |
from typing import Dict
from typing import Any
def deserialize_transaction_from_etherscan(
data: Dict[str, Any],
internal: bool,
) -> EthereumTransaction:
"""Reads dict data of a transaction from etherscan and deserializes it
Can throw DeserializationError if something is wrong
"""
try:
# internal tx list contains no gasprice
gas_price = FVal(-1) if internal else FVal(data['gasPrice'])
tx_hash = read_hash(data, 'hash')
input_data = read_hash(data, 'input')
timestamp = deserialize_timestamp(data['timeStamp'])
block_number = read_integer(data, 'blockNumber')
nonce = -1 if internal else read_integer(data, 'nonce')
return EthereumTransaction(
timestamp=timestamp,
block_number=block_number,
tx_hash=tx_hash,
from_address=data['from'],
to_address=data['to'],
value=deserialize_fval(data['value']),
gas=deserialize_fval(data['gas']),
gas_price=gas_price,
gas_used=deserialize_fval(data['gasUsed']),
input_data=input_data,
nonce=nonce,
)
except KeyError as e:
raise DeserializationError(f'Etherscan ethereum transaction missing expected key {str(e)}') | c4184cea626b229a7c0de8848f95fb29ebdec6d3 | 3,658,511 |
def ar(p):
"""
Given a quaternion p, return the 4x4 matrix A_R(p)
which when multiplied with a column vector q gives
the quaternion product qp.
Parameters
----------
p : numpy.ndarray
4 elements, represents quaternion
Returns
-------
numpy.ndarray
4x4 matrix describing action of quaternion multiplication
"""
return np.array([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], p[3], -p[2]],
[p[2], -p[3], p[0], p[1]],
[p[3], p[2], -p[1], p[0]]]) | 0ee437eec9b62c902466de4e77b541fc3cb7a64a | 3,658,512 |
def preprocess_list(lst,tokenizer,max_len=None):
"""
function preprocesses a list of values returning tokenized sequences
Args:
lst: list of strings to be processed
tokenizer: a tokenizer object
max_len: if we need to ensure the same length of strings, we can provide an integer here
Returns:
a numpy array with tokenized sequences. Each sequence in a separate row
"""
return_seq = tokenizer.texts_to_sequences(lst)
seq = np.array(
pad_sequences(return_seq, maxlen=max_len,padding="post"),
dtype="float32"
)
return seq | c1ba91ae54b9869ac6dd80664b479a47c34388e2 | 3,658,513 |
def to_dataframe(ticks: list) -> pd.DataFrame:
"""Convert list to Series compatible with the library."""
df = pd.DataFrame(ticks)
df['time'] = pd.to_datetime(df['time'], unit='s')
df.set_index("time", inplace=True)
return df | 6f312e9e8f401d21cebc1404a24ba37738a2819d | 3,658,515 |
def keysCode(code):
"""
Download user's keys from an email link
GET: If the code is valid, download user keys
Else abort with a 404
"""
#Check if code exists and for the correct purpose. Else abort
if (hl.checkCode(code,"Keys")):
user = hl.getUserFromCode(code)
else:
abort(404)
#Mark code as used
hl.flagCode(code)
#return
return getKeys(user["Name"]) | 533f17cd4a2fb999f6ffd135a1e647f48266a04c | 3,658,516 |
def lengthenFEN(fen):
"""Lengthen FEN to 71-character form (ex. '3p2Q' becomes '111p11Q')"""
return fen.replace('8','11111111').replace('7','1111111') \
.replace('6','111111').replace('5','11111') \
.replace('4','1111').replace('3','111').replace('2','11') | f49cdf8ad6919fbaaad1abc83e24b1a33a3ed3f8 | 3,658,517 |
def keyboard_mapping(display):
"""Generates a mapping from *keysyms* to *key codes* and required
modifier shift states.
:param Xlib.display.Display display: The display for which to retrieve the
keyboard mapping.
:return: the keyboard mapping
"""
mapping = {}
shift_mask = 1 << 0
group_mask = alt_gr_mask(display)
# Iterate over all keysym lists in the keyboard mapping
min_keycode = display.display.info.min_keycode
keycode_count = display.display.info.max_keycode - min_keycode + 1
for index, keysyms in enumerate(display.get_keyboard_mapping(
min_keycode, keycode_count)):
key_code = index + min_keycode
# Normalise the keysym list to yield a tuple containing the two groups
normalized = keysym_normalize(keysyms)
if not normalized:
continue
# Iterate over the groups to extract the shift and modifier state
for groups, group in zip(normalized, (False, True)):
for keysym, shift in zip(groups, (False, True)):
if not keysym:
continue
shift_state = 0 \
| (shift_mask if shift else 0) \
| (group_mask if group else 0)
# Prefer already known lesser shift states
if keysym in mapping and mapping[keysym][1] < shift_state:
continue
mapping[keysym] = (key_code, shift_state)
return mapping | c9d2e0caea532ab66b00744d17ff6274f42844e9 | 3,658,518 |
def convertPeaks(peaksfile, bedfile):
"""Convert a MACS output file `peaksfile' to a BED file. Also works if the input is already in BED format."""
regnum = 1
with open(bedfile, "w") as out:
with open(peaksfile, "r") as f:
tot = 0
chrom = ""
start = 0
end = 0
c = CSVreader(f)
for line in c:
if len(line) == 0 or line[0][0] == '#' or line[0] == 'chr':
continue
bchrom = line[0]
if "_" in bchrom: # get rid of weird chromosomes
continue
# New chromosome?
if bchrom != chrom:
if end > 0:
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
regnum += 1
chrom = bchrom
start = 0
end = 0
# Unwanted chromosome?
if bchrom == 'chrM' or "random" in bchrom:
start = 0
end = 0
continue
# Good line
bstart = int(line[1])
bend = int(line[2])
if start <= bstart <= end:
# Extend current region
end = bend
else:
# Start new region
tot += (end - start)
if end > 0:
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
regnum += 1
start = bstart
end = bend
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
tot += (end - start)
return (tot, regnum) | 6c9af82254efb98d35c9182ebe53c4f3802cdb7f | 3,658,519 |
def create_freud_box(box: np.ndarray, is_2D=True) -> Box:
"""Convert an array of box values to a box for use with freud functions
The freud package has a special type for the description of the simulation cell, the
Box class. This is a function to take an array of lengths and tilts to simplify the
creation of the Box class for use with freud.
"""
# pylint: disable=invalid-name
Lx, Ly, Lz = box[:3]
xy = xz = yz = 0
if len(box) == 6:
xy, xz, yz = box[3:6]
if is_2D:
return Box(Lx=Lx, Ly=Ly, xy=xy, is2D=is_2D)
return Box(Lx=Lx, Ly=Ly, Lz=Lz, xy=xy, xz=xz, yz=yz)
# pylint: enable=invalid-name | 94ea3769d8138907bf29a30fc8afcf6b990264f1 | 3,658,520 |
def hrrr_snotel_pixel(file, x_pixel_index, y_pixel_index):
"""
Read GRIB file surface values, remove unsed dimensions, and
set the time dimension.
Required to be able to concatenate all GRIB file to a time series
"""
hrrr_file = xr.open_dataset(
file.as_posix(),
engine='cfgrib',
backend_kwargs={
'errors': 'ignore',
'indexpath': '',
'filter_by_keys': {
'level': 0,
'typeOfLevel': 'surface',
}
},
).isel(x=[x_pixel_index], y=[y_pixel_index])
del hrrr_file.coords['valid_time']
del hrrr_file.coords['surface']
del hrrr_file.coords['step']
return hrrr_file.expand_dims(time=[hrrr_file.time.values]) | 22a66317d672874b9ababfd0a7daa364d06ea87e | 3,658,521 |
def convert_to_diact_uttseg_interactive_tag(previous, tag):
"""Returns the dialogue act but with the fact it is keeping or
taking the turn.
"""
if not previous:
previous = ""
trp_tag = uttseg_pattern(tag)
return trp_tag.format(convert_to_diact_interactive_tag(previous, tag)) | 06950132147d374002495d92e456fe52a6d9546f | 3,658,522 |
from mne.chpi import compute_chpi_amplitudes, compute_chpi_locs
from mne.chpi import _get_hpi_initial_fit
def compute_good_coils(raw, t_step=0.01, t_window=0.2, dist_limit=0.005,
prefix='', gof_limit=0.98, verbose=None):
"""Comute time-varying coil distances."""
try:
except ImportError:
chpi_locs = _old_chpi_locs(raw, t_step, t_window, prefix)
else:
chpi_amps = compute_chpi_amplitudes(
raw, t_step_min=t_step, t_window=t_window)
chpi_locs = compute_chpi_locs(raw.info, chpi_amps)
hpi_dig_head_rrs = _get_hpi_initial_fit(raw.info, verbose=False)
hpi_coil_dists = cdist(hpi_dig_head_rrs, hpi_dig_head_rrs)
counts = np.empty(len(chpi_locs['times']), int)
for ii, (t, coil_dev_rrs, gof) in enumerate(zip(
chpi_locs['times'], chpi_locs['rrs'], chpi_locs['gofs'])):
these_dists = cdist(coil_dev_rrs, coil_dev_rrs)
these_dists = np.abs(hpi_coil_dists - these_dists)
# there is probably a better algorithm for finding the bad ones...
use_mask = gof >= gof_limit
good = False
while not good:
d = these_dists[use_mask][:, use_mask]
d_bad = d > dist_limit
good = not d_bad.any()
if not good:
if use_mask.sum() == 2:
use_mask[:] = False
break # failure
# exclude next worst point
badness = (d * d_bad).sum(axis=0)
exclude_coils = np.where(use_mask)[0][np.argmax(badness)]
use_mask[exclude_coils] = False
counts[ii] = use_mask.sum()
t = chpi_locs['times'] - raw.first_samp / raw.info['sfreq']
return t, counts, len(hpi_dig_head_rrs), chpi_locs | 060658dfae82768a5dff31a365f1c200d6f5d223 | 3,658,523 |
def prep_request(items, local_id="id"):
"""
Process the incoming items into an AMR request.
<map name="cite_1">
<val name="{id_type}">{value}</val>
</map>
"""
map_items = ET.Element("map")
for idx, pub in enumerate(items):
if pub is None:
continue
local_id_value = pub.get(local_id) or pub.get(local_id.upper())
if local_id_value is None:
local_id_value = str(idx)
this_item = ET.Element("map", name=local_id_value)
for k, v in pub.items():
if v is None:
continue
de = ET.Element("val", name=k.lower())
de.text = v.strip()
this_item.append(de)
map_items.append(this_item)
request_items = ET.tostring(map_items)
xml = id_request_template.format(user=client.USER, password=client.PASSWORD, items=request_items)
return xml | 46f1f7a94ffccc4eec2192fe100664c3d9e2d829 | 3,658,524 |
from averages_module import VariableType
from lrc_module import potential_lrc, pressure_lrc
def calc_variables ( ):
"""Calculates all variables of interest.
They are collected and returned as a list, for use in the main program.
"""
# In this example we simulate using the cut (but not shifted) potential
# but we only report results which have had the long-range corrections applied
# The value of the cut-and-shifted potential is not used, in this example
# Preliminary calculations (n,r,total are taken from the calling program)
vol = box**3 # Volume
rho = n / vol # Density
kin = 1.5 * n * p * temperature # Average kinetic energy for NP-atom system
kin_q = kin - total_spr # Quantum estimator for kinetic energy
rad_g = rad_gyr ( r )
# Variables of interest, of class VariableType, containing three attributes:
# .val: the instantaneous value
# .nam: used for headings
# .method: indicating averaging method
# If not set below, .method adopts its default value of avg
# The .nam and some other attributes need only be defined once, at the start of the program,
# but for clarity and readability we assign all the values together below
# Acceptance ratio of atomic moves
r_r = VariableType ( nam = 'Atomic move ratio', val = r_ratio, instant = False )
# Acceptance ratio of centre-of-mass moves
c_r = VariableType ( nam = 'COM move ratio', val = c_ratio, instant = False )
# Internal energy per atom for full potential with LRC
# LRC plus cut (but not shifted) PE already divided by factor P
# plus KE estimator: total classical KE for NP-atom system MINUS total spring potential
# all divided by N
e_f = VariableType ( nam = 'E/N full', val = potential_lrc(rho,r_cut) + (kin_q+total.pot)/n )
# Kinetic energy per atom, just for interest's sake
k_q = VariableType ( nam = 'KE/N', val = kin_q/n )
# Pressure for full potential with LRC
# LRC plus ideal gas contribution plus total virial divided by V
kin_q = kin_q / 1.5 # Convert KE estimator to kinetic energy part of pressure
p_f = VariableType ( nam = 'P full', val = pressure_lrc(rho,r_cut) + (kin_q+total.vir)/vol )
# Quantum spring energy per atom, just for interest's sake
e_q = VariableType ( nam = 'Espring/N', val = total_spr/n )
# Quantum polymer radius of gyration, just for interest's sake
r_g = VariableType ( nam = 'Radius of gyration', val = rad_g )
# Collect together into a list for averaging
return [ r_r, c_r, e_f, p_f, e_q, k_q, r_g ] | 4d0c066ccf4da82955a60d22c0ec27efc975df6d | 3,658,525 |
def findDocument_MergeFields(document):
"""this function creates a new docx document based on
a template with Merge fields and a JSON content"""
the_document = MailMerge(document)
all_fields = the_document.get_merge_fields()
res = {element:'' for element in all_fields}
return res | 9822f40e5f57bbc72f9292da9bd2a1c134776c2f | 3,658,527 |
def load_mushroom(data_home=None, return_dataset=False):
"""
Loads the mushroom multivariate dataset that is well suited to binary
classification tasks. The dataset contains 8123 instances with 3
categorical attributes and a discrete target.
The Yellowbrick datasets are hosted online and when requested, the dataset
is downloaded to your local computer for use. Note that if the dataset
hasn't been downloaded before, an Internet connection is required. However,
if the data is cached locally, no data will be downloaded. Yellowbrick
checks the known signature of the dataset with the data downloaded to
ensure the download completes successfully.
Datasets are stored alongside the code, but the location can be specified
with the ``data_home`` parameter or the $YELLOWBRICK_DATA envvar.
Parameters
----------
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
return_dataset : bool, default=False
Return the raw dataset object instead of X and y numpy arrays to
get access to alternative targets, extra features, content and meta.
Returns
-------
X : array-like with shape (n_instances, n_features) if return_dataset=False
A pandas DataFrame or numpy array describing the instance features.
y : array-like with shape (n_instances,) if return_dataset=False
A pandas Series or numpy array describing the target vector.
dataset : Dataset instance if return_dataset=True
The Yellowbrick Dataset object provides an interface to accessing the
data in a variety of formats as well as associated metadata and content.
"""
return _load_dataset('mushroom', data_home, return_dataset) | e300a1cade8532d18ebea1f5175d9c3001112855 | 3,658,528 |
def get_current_project(user_id):
"""Return from database user current project"""
try:
current = CurrentProject.objects.get(user_id=user_id)
except CurrentProject.DoesNotExist:
return None
keystone = KeystoneNoRequest()
return keystone.project_get(current.project) | dc8b1cf44ccd4c51bf58615657520007f2eca5db | 3,658,529 |
def get_random_successful_answer(intent: str) -> str:
"""
Get a random successful answer for this intent
* `intent`: name-parameter of the yml-section with which the successful answers were imported
**Returns:** None if no successful answers are known for this intent,
otherwise a random element of the successful answers for this intent
"""
return random_or_none(get_successful_answer_list(intent)) | e8106adff5f5a45c5b5e0ff12130d828fa2f4a55 | 3,658,530 |
from typing import Any
def formatter(
source: str,
language: str,
css_class: str,
options: dict[str, Any],
md: Markdown,
classes: list[str] | None = None,
id_value: str = "",
attrs: dict[str, Any] | None = None,
**kwargs: Any,
) -> str:
"""Execute code and return HTML.
Parameters:
source: The code to execute.
language: The code language, like python or bash.
css_class: The CSS class to add to the HTML element.
options: The container for options.
attrs: The container for attrs:
md: The Markdown instance.
classes: Additional CSS classes.
id_value: An optional HTML id.
attrs: Additional attributes
**kwargs: Additional arguments passed to SuperFences default formatters.
Returns:
HTML contents.
"""
fmt = _formatters.get(language, lambda source, *args, **kwargs: source)
return fmt(source, md, **options) | f141732ff6bd5d3bd7cc1a83895b0e2c020bf8cf | 3,658,531 |
import requests
def get_balance_sheet(ticker, limit, key, period):
"""Get the Balance sheet."""
URL = 'https://financialmodelingprep.com/api/v3/balance-sheet-statement/'
try:
r = requests.get(
'{}{}?period={}&?limit={}&apikey={}'.format(URL,
ticker,
period,
limit,
key))
balanceSheet = pd.DataFrame.from_dict(r.json()).transpose()
balanceSheet.columns = balanceSheet.iloc[0]
return balanceSheet[1:]
except requests.exceptions.HTTPError as e:
# We want a 200 value
print('Requesting Balance sheet statement ERROR: ', str(e)) | ae31a9d97715e1bc8818f64df48c18c3a7c806a3 | 3,658,534 |
def softmax_loss(scores, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- scores: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dscores: Gradient of the loss with respect to x
"""
N, C = scores.shape
scores = scores - np.max(scores, 1, keepdims=True)
loss = np.sum(-1 * scores[np.arange(N), y]) + np.sum(np.log(np.sum(np.exp(scores), 1)))
loss /= N
scores_e = np.exp(scores)
dscores = scores_e / np.sum(scores_e, 1).reshape(N, 1)
dscores[np.arange(N), y] = dscores[np.arange(N), y] - 1
dscores /= N
return loss, dscores | 7cc0e4fc070ab0a8cdc32c75aec342dac34179ab | 3,658,535 |
def text_to_lines(path):
"""
Parse a text file into lines.
Parameters
----------
path : str
Fully specified path to text file
Returns
-------
list
Non-empty lines in the text file
"""
delimiter = None
with open(path, encoding='utf-8-sig', mode='r') as f:
text = f.read()
if delimiter is not None and delimiter not in text:
e = DelimiterError(
'The delimiter specified does not create multiple words. Please specify another delimiter.')
raise (e)
lines = [x.strip().split(delimiter) for x in text.splitlines() if x.strip() != '']
return lines | df723ee40a490c084301584bd9374445ef73a5ae | 3,658,537 |
def measure_hemijunctions_timelapse(ims_labels, ims_labels_hjs):
"""
Measure the hemijunction traits from a timelapse of a live-imaged epithelium.
Parameters
----------
ims_labels : 3D ndarray (t,y,x)
Each timepoint is a 2D array with labeled regions.
ims_labels_hjs : 3D ndarray (t,y,x)
Each timepoint is a 2D array with hemijunctions labeled such that each one
has the same label as its "sending cell". Each "interface" spans a cell-cell
junction and is composed of two hemijunctions.
Returns
-------
df_hjs : pandas DataFrame
Each row is a single hemijunction from a single time step.
"""
# Total number of frames
total_t = np.shape(ims_labels)[0]
dfs = []
for t in range(total_t):
print(f"Measuring hemijunctions for timepoint {t} out of {total_t - 1}")
df_tmp = measure_hemijunctions(ims_labels[t], ims_labels_hjs[t])
# Add a column for t_step
df_tmp["t_step"] = [t] * len(df_tmp.index)
dfs.append(df_tmp)
df_hjs = pd.concat(dfs, ignore_index=True)
return df_hjs | c26779cd310a849843b20c8fc02539f972965c1a | 3,658,538 |
def get_compare_tables_checks_tasks():
"""Get list of tasks that will compare tables checks between databases.
Args:
Returns:
list: list of tasks to be executed in a process pool. Each item is a dict instance with following strucutre:
{
'function' (function): the function to be executed.
'kwds': keyworded args to be passed to the function.
}
"""
return [{
'function': compare_tables_checks,
'kwds': {}
}] | 9c210b1ebf43bffa6e2e9db0c53ebab5ba76c6bf | 3,658,539 |
from typing import Union
from typing import Set
def label_pr_failures(pull: Union[PullRequest, ShortPullRequest]) -> Set[str]:
"""
Labels the given pull request to indicate which checks are failing.
:param pull:
:return: The new labels set for the pull request.
"""
pr_checks = get_checks_for_pr(pull)
failure_labels: Set[str] = set()
success_labels: Set[str] = set()
def determine_labels(from_, to):
for check in from_:
if _python_dev_re.match(check):
continue
if check in {"Flake8", "docs"}:
to.add(f"failure: {check.lower()}")
elif check.startswith("mypy"):
to.add("failure: mypy")
elif check.startswith("ubuntu"):
to.add("failure: Linux")
elif check.startswith("windows"):
to.add("failure: Windows")
determine_labels(pr_checks.failing, failure_labels)
determine_labels(pr_checks.successful, success_labels)
issue: Issue = pull.issue()
current_labels = {label.name for label in issue.labels()}
for label in success_labels:
if label in current_labels and label not in failure_labels:
issue.remove_label(label)
new_labels = current_labels - success_labels
new_labels.update(failure_labels)
if new_labels != current_labels:
issue.add_labels(*new_labels)
return new_labels | ad36f23aa9e3d695e0ddab5a165e5665fdccf91c | 3,658,540 |
def arrange_images(total_width, total_height, *images_positions):
"""Return a composited image based on the (image, pos) arguments."""
result = mel.lib.common.new_image(total_height, total_width)
for image, pos in images_positions:
mel.lib.common.copy_image_into_image(image, result, pos[1], pos[0])
return result | 49e167b9b6eb1a8e76c8e2d65bc3fa419d91a8a1 | 3,658,542 |
from typing import Tuple
import importlib
def import_core_utilities() -> Tuple[ModuleType, ModuleType, ModuleType]:
"""Dynamically imports and return Tracing, Logging, and Metrics modules"""
return (
importlib.import_module(TRACING_PACKAGE),
importlib.import_module(LOGGING_PACKAGE),
importlib.import_module(METRICS_PACKAGE),
) | d627c1405b08975aeb02839f2da9d363f385d8b5 | 3,658,543 |
def pancakeSort(self, A):
# ! 这个方法实际上是在每轮循环中寻找最大的那个数,使其在正确的位置
"""
:type A: List[int]
:rtype: List[int]
"""
bucket = sorted(A)
ans = []
for k in range(len(A),0,-1):
i = A.index(bucket.pop())+1
ans += [i, k]
A = A[i:k][::-1] + A[:i] + A[k:]
print(A)
return ans | 35d358c6631f5cc708232f67a3e55d685116dff8 | 3,658,544 |
def getOrc(orcName):
"""Get an orchestra stored in the user namespace.
One can store an orchestra in the user name space with the %%orc magic.
"""
ip = get_ipython()
return ip.user_ns["__orc"][orcName] | 7fed637d4ab653579b4ad78e1b047e236ca46377 | 3,658,545 |
def get_prompt_data_from_batse(grb: str, **kwargs: None) -> pd.DataFrame:
"""Get prompt emission data from BATSE. Creates a directory structure and saves the data.
Returns the data, though no further action needs to be taken by the user.
:param grb: Telephone number of GRB, e.g., 'GRB140903A' or '140903A' are valid inputs.
:type grb: str
:param kwargs: Placeholder to prevent TypeErrors.
:type kwargs: None
:return: The processed data.
:rtype: pandas.DataFrame
"""
getter = BATSEDataGetter(grb=grb)
return getter.get_data() | 1bd7848f455401be89466c88efd9e4d44b3b72e9 | 3,658,546 |
def angular_error(a, b):
"""Calculate angular error (via cosine similarity)."""
a = pitchyaw_to_vector(a) if a.shape[1] == 2 else a
b = pitchyaw_to_vector(b) if b.shape[1] == 2 else b
ab = np.sum(np.multiply(a, b), axis=1)
a_norm = np.linalg.norm(a, axis=1)
b_norm = np.linalg.norm(b, axis=1)
# Avoid zero-values (to avoid NaNs)
a_norm = np.clip(a_norm, a_min=1e-8, a_max=None)
b_norm = np.clip(b_norm, a_min=1e-8, a_max=None)
similarity = np.divide(ab, np.multiply(a_norm, b_norm))
similarity = np.clip(similarity, a_min=-1.+1e-8, a_max=1.-1e-8)
return np.degrees(np.arccos(similarity)) | 89f7a51fc95a55349fc79e58b8f644a1ee6bd8a0 | 3,658,547 |
def includeme(config):
"""
Get build Git repository directory and make it accessible
to all requests generated via Cornice
"""
# Make DB connection accessible as a request property
def _get_repos(request):
_settings = request.registry.settings
repo_dir = _settings['repo_basedir']
return repo_dir
config.add_request_method(_get_repos, 'repo_dir', reify=True) | f2d73eb01b616f79059f4001c7b3faad67f48cd2 | 3,658,548 |
from typing import Union
from pathlib import Path
def add_dot_csv(filename: Union[Path, str]) -> str:
"""Adds a .csv extension to filename."""
return add_extension(filename, '.csv') | b0e89ca231675048ddb65b11856179db140a15fb | 3,658,549 |
from typing import Dict
from typing import Any
def load_settings_from_file(filename: str) -> Dict[str, Any]:
"""Load amset configuration settings from a yaml file.
If the settings file does not contain a required parameter, the default
value will be added to the configuration.
An example file is given in *amset/examples/example_settings.yaml*.
Args:
filename: Path to settings file.
Returns:
The settings, with any missing values set according to the amset
defaults.
"""
logger.info("Loading settings from: {}".format(filename))
settings = loadfn(filename)
return validate_settings(settings) | 8f857ede65c455b51f030edc58577a87cc6159a6 | 3,658,550 |
def execute_query(query, *arguments):
"""Execute a query on the DB with given arguments."""
_db = labpals.model.get_db()
cursor = _db.execute(query, arguments)
rows = cursor.fetchall()
return rows | d1b7aff948ee37b223386af29bbe4a6d0939cde1 | 3,658,551 |
from typing import Dict
from typing import Any
import copy
def format_search_events_results(response: Dict[str, Any], limit: int) -> tuple:
"""
Format the output of the search events results command.
Args:
response (Dict[str,Any]): API response from FortiSIEM.
limit (int):Maximum number of results to retrieve.
Returns:
str: Formatted command output.
"""
outputs = []
events = dict_safe_get(response, ['queryResult', 'events', 'event'])
if isinstance(events, dict):
events = [events]
total_count = arg_to_number(dict_safe_get(response, ['queryResult', '@totalCount']))
total_pages = total_count // limit + (total_count % limit != 0) if total_count else 0
if events:
for event in events:
formatted_event = copy.deepcopy(event)
formatted_attributes = {}
attributes = dict_safe_get(event, ['attributes', 'attribute'])
formatted_event['receiveTime'] = FormatIso8601(arg_to_datetime(event['receiveTime']))
for attribute in attributes:
formatted_attributes[attribute['@name']] = attribute['#text']
formatted_event['attributes'] = formatted_attributes
outputs.append(formatted_event)
return outputs, total_pages | de6b12f2009c3a7dab8093bd5842455e2bd2c84a | 3,658,552 |
from datetime import datetime
def radec_obs_vec_mpc(inds, mpc_object_data):
"""Compute vector of observed ra,dec values for MPC tracking data.
Args:
inds (int array): line numbers of data in file
mpc_object_data (ndarray): MPC observation data for object
Returns:
rov (1xlen(inds) array): vector of ra/dec observed values
"""
rov = np.zeros((2*len(inds)))
for i in range(0,len(inds)):
indm1 = inds[i]-1
# extract observations data
timeobs = Time( datetime(mpc_object_data['yr'][indm1],
mpc_object_data['month'][indm1],
mpc_object_data['day'][indm1]) + timedelta(days=mpc_object_data['utc'][indm1]) )
obs_t_ra_dec = SkyCoord(mpc_object_data['radec'][indm1], unit=(uts.hourangle, uts.deg), obstime=timeobs)
rov[2*i-2], rov[2*i-1] = obs_t_ra_dec.ra.rad, obs_t_ra_dec.dec.rad
return rov | daa0a7bfc5a1532c4a63f4543f4ea5e3db099973 | 3,658,553 |
def mod(x, y) -> ProcessBuilder:
"""
Modulo
:param x: A number to be used as the dividend.
:param y: A number to be used as the divisor.
:return: The remainder after division.
"""
return _process('mod', x=x, y=y) | fb94d3a3e1dcd918d8405232ad11f00943895785 | 3,658,554 |
def get_list_of_encodings() -> list:
"""
Get a list of all implemented encodings.
! Adapt if new encoding is added !
:return: List of all possible encodings
"""
return ['raw', '012', 'onehot', '101'] | 6e0749eb45f85afe4e5c7414e4d23e67335ba2b5 | 3,658,556 |
def region_to_bin(chr_start_bin, bin_size, chr, start):
"""Translate genomic region to Cooler bin idx.
Parameters:
----------
chr_start_bin : dict
Dictionary translating chromosome id to bin start index
bin_size : int
Size of the bin
chr : str
Chromosome
start : int
Start of the genomic region
"""
return chr_start_bin[chr] + start // bin_size | f17b132048b0ceb4bbf2a87b77327d0d63b3fd64 | 3,658,557 |
def cvCalcProbDensity(*args):
"""
cvCalcProbDensity(CvHistogram hist1, CvHistogram hist2, CvHistogram dst_hist,
double scale=255)
"""
return _cv.cvCalcProbDensity(*args) | dc0ce1eb33a07466d29defe0b4112e46cabe1308 | 3,658,559 |
def get_filter_para(node_element):
"""Return paragraph containing the used filter description"""
para = nodes.paragraph()
filter_text = "Used filter:"
filter_text += " status(%s)" % " OR ".join(node_element["status"]) if len(
node_element["status"]) > 0 else ""
if len(node_element["status"]) > 0 and len(node_element["tags"]) > 0:
filter_text += " AND "
filter_text += " tags(%s)" % " OR ".join(node_element["tags"]) if len(
node_element["tags"]) > 0 else ""
if (len(node_element["status"]) > 0 or len(node_element["tags"]) > 0) and len(
node_element["types"]) > 0:
filter_text += " AND "
filter_text += " types(%s)" % " OR ".join(node_element["types"]) if len(
node_element["types"]) > 0 else ""
filter_node = nodes.emphasis(filter_text, filter_text)
para += filter_node
return para | 7b3ad6b0a9752a53bd16d9cee2a250f54f43def3 | 3,658,560 |
def mk_multi_line_figax(nrows, ncols, xlabel='time (s)', ylabel='signal (a.u.)'):
"""
Create the figure and axes for a
multipanel 2d-line plot
"""
# ncols and nrows get
# restricted via the plotting frontend
x_size = ncols * pltConfig['mXSize']
y_size = nrows * pltConfig['mYSize']
fig, axs = ppl.subplots(nrows, ncols, figsize=(x_size, y_size),
sharex=True, sharey=True, squeeze=False)
# Hide the right and top spines
# and remove all tick labels
for ax in axs.flatten():
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize=0)
# determine axis layout
y_left = axs[:, 0]
x_bottom = axs[-1, :]
# write tick and axis labels only on outer axes to save space
for ax in y_left:
ax.tick_params(labelsize=pltConfig['mTickSize'])
ax.set_ylabel(ylabel, fontsize=pltConfig['mLabelSize'])
for ax in x_bottom:
ax.tick_params(labelsize=pltConfig['mTickSize'])
ax.set_xlabel(xlabel, fontsize=pltConfig['mLabelSize'])
return fig, axs | c759b4111a8cb3015aa9896f5afd2f8831ad8665 | 3,658,561 |
def load_sizes(infile_path: str, header: bool=None):
"""
Load and parse a gtf file. More information on the gtf format is here:
https://asia.ensembl.org/info/website/upload/gff.html
Arguments:
(REQUIRED) infile_path: path to gtf file
(OPTIONAL) header: headers in size file (DEFAULT: None)
chr1 247249719
chr2 242951149
...
"""
return pd.read_csv(infile_path, sep="\t", header=None, index_col=0) | 0b1737bb905b57f719c8f2369d771794dd49666b | 3,658,562 |
import string
import pickle
def load_model(file_path: string):
"""
Used to serialize an save a trained model, so it can be reused later on again.
-----------------------------------------------------------------------------------
Parameters:
-----------------------------------------------------------------------------------
file_path: List (ndarray, int)
Path to a stored model from prior running save_model().
Returns:
-----------------------------------------------------------------------------------
fcm_model: List (ndarray, float)
The de-serialized model.
"""
fcm_model = pickle.load(open(file_path, 'rb'))
return fcm_model | 26278c46092dff6199a82b1425203af1883ba49d | 3,658,564 |
import numpy as np
def gfs_mos_forecast(stid, forecast_date):
"""
Do the data retrieval.
"""
# Generate a Forecast object
forecast = Forecast(stid, default_model_name, forecast_date)
forecast.daily.high = np.round(np.random.rand() * 100.)
forecast.daily.low = np.round(np.random.rand() * 100.)
forecast.daily.wind = np.round(np.random.rand() * 40.)
forecast.daily.rain = np.round(np.random.rand() * 3., 2)
# Create a dummy pd dataframe to test
forecast.timeseries.data['DateTime'] = [forecast_date, forecast_date +
timedelta(hours=3)]
forecast.timeseries.data['temperature'] = [56., 55.]
forecast.timeseries.data['dewpoint'] = [51., 51.]
return forecast | 8ba16fe350e5eef77f9eb960de4b447bcb420b5f | 3,658,565 |
def evaluate_accuracy_score(preprocessing, prediction_binary):
"""
Evaluates the accuracy score
:param preprocessing: prepared DataPreprocess instance
:param prediction_binary: boolean expression for the predicted classes
"""
accuracy = []
for j in range(len(DETECTION_CLASSES)):
acc = accuracy_score(preprocessing.target_classes[:, j], prediction_binary[:, j])
accuracy.append(acc)
return np.mean(accuracy) | 9ee9110f924a930d442d00d4c06a929ba7589e42 | 3,658,566 |
def test_domain_visualize(case, visu_case):
"""
test the domain visualization
"""
dom = pylbm.Domain(case)
views = dom.visualize(**visu_case)
return views.fig | a395aad44955eb0599e257ccfeb326cb08638fcd | 3,658,567 |
import torch
def create_supervised_evaluator(model, metrics,
device=None):
"""
Factory function for creating an evaluator for supervised models
Args:
model (`torch.nn.Module`): the model to train
metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: an evaluator engine with supervised inference function
"""
if device:
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
data, language, pids, camids = batch
batchsize = language.size(0)
wordclass_feed = np.zeros((batchsize, max_tokens), dtype='int64')
wordclass_feed[:,0] = wordlist_final.index('<S>')
outcaps = np.empty((batchsize, 0)).tolist()
data = data.to(device) if torch.cuda.device_count() >= 1 else data
# language = language.to(device) if torch.cuda.device_count() >= 1 else language
for j in range(max_tokens-1):
wordclass = Variable(torch.from_numpy(wordclass_feed)).cuda()
features, wordact, _= model(data, wordclass)
wordact = wordact[:,:,:-1]
wordact_t = wordact.permute(0, 2, 1).contiguous().view(batchsize*(max_tokens-1), -1)
wordprobs = F.softmax(wordact_t).cpu().data.numpy()
wordids = np.argmax(wordprobs, axis=1)
for k in range(batchsize):
word = wordlist_final[wordids[j+k*(max_tokens-1)]]
outcaps[k].append(word)
if(j < max_tokens-1):
wordclass_feed[k, j+1] = wordids[j+k*(max_tokens-1)]
for j in range(batchsize):
num_words = len(outcaps[j])
if 'EOS' in outcaps[j]:
num_words = outcaps[j].index('EOS')
outcap = ' '.join(outcaps[j][:num_words])
feat, _, _ = model(data, wordclass)
print (outcap)
return feat, pids, camids
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine | da5c39b8a8d841181fc63ae48db0c68f9bbfe278 | 3,658,568 |
def get_available_operations():
""" Return a dict of available operations """
return True, runtime.get_available_operations() | 9d0b744061c97cf10fb69ccfdbc403b8f337db3d | 3,658,569 |
def word_distance(word1, word2):
"""Computes the number of differences between two words.
word1, word2: strings
Returns: integer
"""
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count | b3279744c628f3adc05a28d9ab7cc520744b540c | 3,658,570 |
from typing import Union
from typing import Tuple
from typing import Any
def get_parent_child(root: dict,
path: str) -> Union[Tuple[Tuple[None, None],
Tuple[None, None]],
Tuple[Tuple[dict, None],
Tuple[Any, str]],
Tuple[Tuple[Any, str],
Tuple[Any, str]]]:
""" Get first and second level node
:param root: The root node.
:param path: The path to identify the leaf node.
:return: (
(
parent node: The first level node in the hierarchy of the path
parent path: The path based on the root node
)
(
child node: The second level node in the hierarchy of the path
child path: The path based on the parent node
)
)
"""
res = Ddict.search(root, path)
if res is None:
if '.' not in path:
return (None, None), (None, None)
else:
child = Ddict.get(root, path)
return (root, None), (child, path)
parent_name, parent_value, child_name = res
if child_name:
child_value = Ddict.get(parent_value, child_name)
return (parent_value, parent_name), (child_value, child_name)
else:
return (root, None), (parent_value, parent_name) | 3e33e32af6b3f67cf41397b6da399ec9ede5491e | 3,658,571 |
def get_data_loaders(personachat, tokenizer, args_num_candidates=1, args_personality_permutations=1, args_max_history=2):
""" Prepare the dataset for training and evaluation """
print("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if args_num_candidates > 0 and dataset_name == 'train':
num_candidates = min(args_num_candidates, num_candidates)
for dialog in dataset:
persona = dialog["personality"].copy()
for _ in range(args_personality_permutations):
for utterance in dialog["utterances"]:
history = utterance["history"][-(2*args_max_history+1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates-1)
instance, _ = build_input_from_segments(persona, history, candidate, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
persona = [persona[-1]] + persona[:-1] # permuted personalities
print("Pad inputs and convert to Tensor")
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids('<pad>'))
for input_name in MODEL_INPUTS:
tensor = dataset[input_name]
dataset[input_name] = np.array(tensor)
return datasets | 212e7bdcdd880b47c56b76fe2e33ce12c665c650 | 3,658,572 |
def unescape_strict(s):
"""
Re-implements html.unescape to use our own definition of `_charref`
"""
if '&' not in s:
return s
return _charref.sub(_replace_charref, s) | d2b9aace645af58dce1e5a5f5e5cf3be919b759b | 3,658,573 |
def CheckVPythonSpec(input_api, output_api, file_filter=None):
"""Validates any changed .vpython files with vpython verification tool.
Args:
input_api: Bag of input related interfaces.
output_api: Bag of output related interfaces.
file_filter: Custom function that takes a path (relative to client root) and
returns boolean, which is used to filter files for which to apply the
verification to. Defaults to any path ending with .vpython, which captures
both global .vpython and <script>.vpython files.
Returns:
A list of input_api.Command objects containing verification commands.
"""
file_filter = file_filter or (lambda f: f.LocalPath().endswith('.vpython'))
affected_files = input_api.AffectedTestableFiles(file_filter=file_filter)
affected_files = map(lambda f: f.AbsoluteLocalPath(), affected_files)
commands = []
for f in affected_files:
commands.append(input_api.Command(
'Verify %s' % f,
['vpython', '-vpython-spec', f, '-vpython-tool', 'verify'],
{'stderr': input_api.subprocess.STDOUT},
output_api.PresubmitError))
return commands | d6e888b5ce6fec4bbdb35452b3c0572702430c06 | 3,658,574 |
import types
from typing import Tuple
def test_infer_errs() -> None:
"""Test inference applied to functions."""
with f.Fun(MockServer()):
a = f.put(b"bla bla")
b = f.put(3)
with pytest.raises(TypeError):
f.py(lambda x, y, z: (x, y), a, a, b)
# should NOT raise
f.py(
lambda x, y, z: (x, y),
a,
a,
b,
out=[types.Encoding.blob, types.Encoding.blob],
)
def i1o2(x: bytes) -> Tuple[bytes, bytes]:
return x, x
def i2o1(x: bytes, y: bytes) -> bytes:
return x
with pytest.raises(TypeError):
out = f.morph(i1o2, a) # type:ignore # noqa:F841
with pytest.raises(TypeError):
out = f.reduce(i1o2, a) # type:ignore # noqa:F841
with pytest.raises(TypeError):
out = f.reduce(lambda x, y: x, a, b) # type:ignore # noqa:F841
# If we pass out= then the inference is skipped
out = f.morph(i1o2, a, out=types.Encoding.blob) # type:ignore # noqa:F841
out = f.reduce(i1o2, a, out=types.Encoding.blob) | 434e5b19f6ad15d6644224475ddd656184593c19 | 3,658,576 |
def decode_captions(captions, idx_to_word):
""" Decode text captions from index in vocabulary to words.
"""
if captions.ndim == 1:
T = captions.shape[0]
N = 1
else:
N, T = captions.shape
decoded = []
for i in range(N):
words = []
for t in range(T):
if captions.ndim == 1:
word = idx_to_word[captions[t]]
else:
word = idx_to_word[captions[i, t]]
if word == '<END>':
words.append('.')
break
if word != '<NULL>':
words.append(word)
decoded.append(' '.join(words))
return decoded | a56abe824b522418480c80611505dabd0a8af6cc | 3,658,577 |
def make_loc(caller):
"""
turn caller location into a string
"""
# return caller["file"] + ":" + caller["func"] + ":" + caller["line"]
return caller["file"] + ":" + str(caller["line"]) | e0db31ffd5c76636938bfe66184f9a2a6fbca496 | 3,658,579 |
def run_part2(file_content):
"""Implmentation for Part 2."""
numbers = (int(number) for number in file_content.split())
root = _build_tree(numbers)
return _node_value(root) | 47171de36eacabd438f1243bddd866af6187c763 | 3,658,581 |
def get_cap_selected_frame(cap, show_frame):
"""
Gets a frame from an opencv video capture object to a specific frame
"""
cap_set_frame(cap, show_frame)
ret, frame = cap.read()
if not ret:
return None
else:
return frame | 4a5a939368e09faea3094335f60e782a249616ce | 3,658,582 |
def rotate_coords_x(pos, angle):
""" Rotate a set of coordinates about the x-axis
:param pos: (n, 3) xyz coordinates to be rotated
:param angle: angle to rotate them by w.r.t origin
:type pos: numpy.ndarray
:type angle: float
:return: array of rotated coordinates
:rtype: numpy.ndarray
"""
xyz = np.copy(pos)
angle *= (np.pi / 180) # convert to radians
R = rotate_x(angle)
for i in range(np.shape(xyz)[0]):
xyz[i, :] = np.dot(R, xyz[i, :])
return xyz | af0a95302c44be54e78b88b8f9851bab29556900 | 3,658,583 |
import itertools
def q_learning(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):
"""
Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy
while following an epsilon-greedy policy
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Lambda time discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, episode_lengths).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
current_state = env.reset()
# keep track number of time-step per episode only for plotting
for t in itertools.count():
# choose the action based on epsilon greedy policy
action_probs = policy(current_state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step(action)
# sse the greedy action to evaluate Q, not the one we actually follow
greedy_next_action = Q[next_state].argmax()
# evaluate Q using estimated action value of (next_state, greedy_next_action)
td_target = reward + discount_factor * Q[next_state][greedy_next_action]
td_error = td_target - Q[current_state][action]
Q[current_state][action] += alpha * td_error
# improve epsilon greedy policy using new evaluate Q
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
# update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
if done:
break
else:
current_state = next_state
return Q, stats | 380c46f9a1c35424028cbf54d905b7b3df1181ec | 3,658,584 |
import random
def find_rand_source_reg():
"""Find random source register based on readAfterWrite probability"""
prob=random.uniform(0,1)
while len(previousIntegerSources)>numberOfPreviousRegistersToConsider:
previousIntegerSources.popleft()
if prob<readAfterWrite and previousIntegerDestinations:
num=random.choice(previousIntegerDestinations)
else:
num=random.randint(1,31)
previousIntegerSources.append(num)
return num | 678223dc137a624b670834bc2fc84d6f5481d130 | 3,658,585 |
def _get_qnode_class(device, interface, diff_method):
"""Returns the class for the specified QNode.
Args:
device (~.Device): a PennyLane-compatible device
interface (str): the interface that will be used for classical backpropagation
diff_method (str, None): the method of differentiation to use in the created QNode
Raises:
ValueError: if an unrecognized ``diff_method`` is provided
Returns:
~.BaseQNode: the QNode class object that is compatible with the provided device and
differentiation method
"""
# pylint: disable=too-many-return-statements,too-many-branches
model = device.capabilities().get("model", "qubit")
passthru_interface = device.capabilities().get("passthru_interface", None)
device_provides_jacobian = device.capabilities().get("provides_jacobian", False)
allows_passthru = passthru_interface is not None
if diff_method is None:
# QNode is not differentiable
return BaseQNode
if diff_method == "best":
if allows_passthru and interface == passthru_interface:
# hand off differentiation to the device without type conversion
return PassthruQNode
if device_provides_jacobian:
# hand off differentiation to the device
return DeviceJacobianQNode
if model in PARAMETER_SHIFT_QNODES:
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
if diff_method == "backprop":
if allows_passthru:
if interface != passthru_interface:
raise ValueError(
"Device {} only supports diff_method='backprop' when using the "
"{} interface.".format(device.short_name, passthru_interface)
)
return PassthruQNode
raise ValueError(
"The {} device does not support native computations with "
"autodifferentiation frameworks.".format(device.short_name)
)
if diff_method == "device":
if device_provides_jacobian:
return DeviceJacobianQNode
raise ValueError(
"The {} device does not provide a native method "
"for computing the jacobian.".format(device.short_name)
)
if diff_method == "parameter-shift":
if model in PARAMETER_SHIFT_QNODES:
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
raise ValueError(
"The parameter shift rule is not available for devices with model {}.".format(model)
)
if diff_method == "reversible":
# pylint: disable=protected-access
if not device.capabilities().get("reversible_diff", False):
raise ValueError(
"Reversible differentiation method not supported on {}".format(device.short_name)
)
return ReversibleQNode
if diff_method in ALLOWED_DIFF_METHODS:
# finite differences
return JacobianQNode
raise ValueError(
"Differentiation method {} not recognized. Allowed "
"options are {}".format(diff_method, ALLOWED_DIFF_METHODS)
) | cb87fd664e37074fbad065e7c707554c1632a0d9 | 3,658,586 |
def evaluate_and_log_bleu(model, bleu_source, bleu_ref, vocab_file):
"""Calculate and record the BLEU score."""
subtokenizer = tokenizer.Subtokenizer(vocab_file)
uncased_score, cased_score = translate_and_compute_bleu(
model, subtokenizer, bleu_source, bleu_ref)
tf.compat.v1.logging.info("Bleu score (uncased): %s", uncased_score)
tf.compat.v1.logging.info("Bleu score (cased): %s", cased_score)
return uncased_score, cased_score | 5b7665851c69e0edfe526763a76582f10eb88bf0 | 3,658,587 |
def transform_call(red_node):
"""
Converts Python style function calls to VHDL style:
self.d(a) -> d(self, a)
If function owner is not exactly 'self' then 'type' is prepended.
self.next.moving_average.main(x) -> type.main(self.next.moving_average, x)
self.d(a) -> d(self, a)
self.next.d(a) -> d(self.next, a)
local.d() -> type.d(local)
self.local.d() -> type.d(self.local)
If return then:
b = self.a(arg) ->
variable pyha_ret_0: type;
a(self, arg, pyha_ret_0);
b := pyha_ret_0;
Handling call inside call is limited to depth 1.
"""
def find_line_node(red_obj):
line_node = red_obj
while True:
if type(line_node.next) == EndlNode:
break
if hasattr(line_node.parent, 'value') and type(line_node.parent.value) == LineProxyList:
if not (hasattr(line_node.parent, 'test') and (
line_node.parent.test == atom # if WE are the if condition, skip
or line_node.parent.test == atom.parent)): # if WE are the if condition (part of condition)
break
line_node = line_node.parent
return line_node
is_hack = False
# make sure each created variable is unique by appending this number and incrementing
tmp_var_count = 0
# loop over all atomtrailers, call is always a member of this
atomtrailers = red_node.find_all('atomtrailers')
for i, atom in enumerate(atomtrailers):
if is_hack: # when parsed out of order call
atom = atomtrailers[i - 1]
call = atom.call
is_hack = False
else:
call = atom.call # this actually points to the stuff between ()
if call is None: # this atomtrailer has no function call
continue
wat = call.call
if wat is not None: # one of the arguments is a call -> process it first (i expect it is next in the list)
call_index = wat.previous.index_on_parent
if call_index == 0: # input is something like x() -> len(), Sfix() ....
pass
else:
try:
atom = atomtrailers[i + 1]
call = atom.call
is_hack = True
except:
continue # no idea what is going on here...
if call is None: # this atomtrailer has no function call
continue
call_index = call.previous.index_on_parent
if call_index == 0: # input is something like x() -> len(), Sfix() ....
continue
# get the TARGET function object from datamodel
target_func_name = atom.copy()
del target_func_name[call_index + 1:]
try:
target_func_obj = super_getattr(convert_obj, str(target_func_name))
except: # happend for: (self.conjugate(complex_in) * complex_in).real
continue
if not target_func_obj.calls:
# function is not simulated...
line_node = find_line_node(atom)
line_node.replace(f'# comment out because not called in simulation: {line_node.dumps()}')
continue
prefix = atom.copy()
del prefix[call_index:]
del atom[:call_index]
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
# this branch happens because of 'for transform'
tmp[0][0] = 'self_const'
call.insert(0, tmp)
else:
tmp[0] = 'self_const'
call.insert(0, tmp)
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
tmp[0][0] = 'self_next'
call.insert(0, tmp)
else:
tmp[0] = 'self_next'
call.insert(0, tmp)
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
tmp[0][0] = 'self'
call.insert(0, tmp)
else:
tmp[0] = 'self'
call.insert(0, tmp)
# get the SOURCE (where call is going on) function object from datamodel
def_parent = atom
while not isinstance(def_parent, DefNode):
def_parent = def_parent.parent
# def_parent = atom.parent_find('def')
source_func_name = f'self.{def_parent.name}'
source_func_obj = super_getattr(convert_obj, str(source_func_name))
# if call is not to local class function
# self.moving_average.main(x) -> MODULE_NAME.main(self.moving_average, x)
if str(prefix) != 'self':
var = super_getattr(convert_obj, str(prefix))
var = init_vhdl_type('-', var, var)
atom.insert(0, var._pyha_module_name())
if target_func_obj.get_output_types() is None:
continue # function is not returning stuff -> this is simple
else:
# add return variables to function locals, so that they will be converted to VHDL variables
ret_vars = []
for x in get_iterable(target_func_obj.get_output_types()):
name = f'pyha_ret_{tmp_var_count}'
ret_vars.append(name)
source_func_obj.add_local_type(name, x)
tmp_var_count += 1
# add return variable to arguments
call.append(name)
# call.value[-1].target = f'ret_{j}'
# need to add new source line before the CURRENT line..search for the node with linenodes
line_node = find_line_node(atom)
# add function call BEFORE the CURRENT line
if line_node != atom: # equality means that value is not assigned to anything
line_node.parent.insert(line_node.index_on_parent, atom.copy())
atom.replace(','.join(ret_vars)) | 21091d369d75f5f51065e2a2df95956816d8b968 | 3,658,588 |
import random
def delta_next_time_to_send(G, u, v):
"""How long to wait before U should send a message to V under diffusion
spreading. Per the Bitcoin protocol, this depends on if we have an outgoing
connection or an incoming connection."""
is_outgoing = G[u][v][ORIGINATOR] == u
average_interval_seconds = 2 if is_outgoing else 5
delta = int(log1p(-random.random()) * average_interval_seconds * -1000000 + 0.5)
return delta if delta > 0 else 0 | 193e847c8dfe1bf4e23bb3ed0a749c36f83c9f61 | 3,658,589 |
def processData(list_pc, imo):
"""
Cette fonction traite les données de getData pour écrire une seule string
prête à être copié dans le csv et qui contient toutes les lignes d'un bateau
"""
str_pc = ''
for i in range(len(list_pc)):
if list_pc[i] == 'Arrival (UTC)':
tab = list_pc[i-1].split(',') # [Port, Country] (good) or [Port, Region, Country] (bad)
if len(tab) == 3:
tab = ['"' + tab[0] + ',' + tab[1].strip() + '"', tab[2]] # [Port+(Region), Country]
str_pc = str_pc + imo + ',' + tab[0] + ',' + tab[1] + ',"' + list_pc[i+1] + '","' + list_pc[i+3] + '","' + list_pc[i+5] + '"\n'
return str_pc | abb9d0a8d9f3f1ed35e4f991a3ac14e51621f104 | 3,658,590 |
def wrn(num_classes):
"""Constructs a wideres-28-10 model without dropout.
"""
return Wide_ResNet(28, 10, 0, num_classes) | bcf33fdaf7081389b2c4b2e8f172684531205315 | 3,658,591 |
from typing import Dict
from typing import Any
from typing import Optional
def run(
config: Dict[str, Any],
log_dir: str = "",
kernel_seed: int = 0,
kernel_random_state: Optional[np.random.RandomState] = None,
) -> Dict[str, Any]:
"""
Wrapper function that enables to run one simulation.
It does the following steps:
- instantiation of the kernel
- running of the simulation
- return the end_state object
Arguments:
config: configuration file for the specific simulation
log_dir: directory where log files are stored
kernel_seed: simulation seed
kernel_random_state: simulation random state
"""
coloredlogs.install(
level=config["stdout_log_level"],
fmt="[%(process)d] %(levelname)s %(name)s %(message)s",
)
kernel = Kernel(
random_state=kernel_random_state or np.random.RandomState(seed=kernel_seed),
log_dir=log_dir,
**subdict(
config,
[
"start_time",
"stop_time",
"agents",
"agent_latency_model",
"default_computation_delay",
"custom_properties",
],
),
)
sim_start_time = dt.datetime.now()
logger.info(f"Simulation Start Time: {sim_start_time}")
end_state = kernel.run()
sim_end_time = dt.datetime.now()
logger.info(f"Simulation End Time: {sim_end_time}")
logger.info(f"Time taken to run simulation: {sim_end_time - sim_start_time}")
return end_state | c8bb7931c9b74064d3488bfa92fb1376b9f9f474 | 3,658,592 |
def python_to_pydict(script_contents, namespace=None):
"""Load a Python script with dictionaries into a dictionary."""
if namespace is None:
namespace = {}
exec script_contents in {}, namespace
return to_lower(namespace) | 7f1dcf2099b2a5b132b6f7d7355b903d4328a84d | 3,658,593 |
def convertInt(s):
"""Tells if a string can be converted to int and converts it
Args:
s : str
Returns:
s : str
Standardized token 'INT' if s can be turned to an int, s otherwise
"""
try:
int(s)
return "INT"
except:
return s | a0eae31b69d4efcf8f8595e745316ea8622e24b3 | 3,658,594 |
import torch
def pairwise_distance(A, B):
"""
Compute distance between points in A and points in B
:param A: (m,n) -m points, each of n dimension. Every row vector is a point, denoted as A(i).
:param B: (k,n) -k points, each of n dimension. Every row vector is a point, denoted as B(j).
:return: Matrix with (m, k). And the ele in (i,j) is the distance between A(i) and B(j)
"""
A_square = torch.sum(A * A, dim=1, keepdim=True)
B_square = torch.sum(B * B, dim=1, keepdim=True)
distance = A_square + B_square.t() - 2 * torch.matmul(A, B.t())
return distance | 2142b94f91f9e762d1a8b134fdda4789c564455d | 3,658,595 |
from typing import Tuple
def _split_full_name(full_name: str) -> Tuple[str, str, str]:
"""Extracts the `(ds name, config, version)` from the full_name."""
if not tfds.core.registered.is_full_name(full_name):
raise ValueError(
f'Parsing builder name string {full_name} failed.'
'The builder name string must be of the following format:'
'`dataset_name[/config_name]/version`')
ds_name, *optional_config, version = full_name.split('/')
assert len(optional_config) <= 1
config = next(iter(optional_config)) if optional_config else ''
return ds_name, config, version | 2b2ace6e0df3302c8899834be749e0ef23c8df6d | 3,658,596 |
def query_paginate(resources, arguments):
"""Return the resources paginated
Args:
resources(list): List to paginate
arguments(FormsDict): query arguments
Returns:
list: Paginated resource (asc or desc)
"""
if '_page' not in arguments:
return resources
page = int(arguments['_page'])
limit = 10 if '_limit' not in arguments else int(arguments['_limit'])
chunk_data = list(chunk_list(resources, limit))
results = chunk_data[page-1]
link_header = build_link_header(request, page, len(chunk_data))
response.set_header("Link", link_header)
return results | caeefb937501945be2f35792dbdec9e7eefcadef | 3,658,597 |
def convert_grad(graph):
"""Remove all instances of SymbolicKeyType in the graphs.
They will be replaced by globally-unique integers.
"""
mng = graph.manager
counter = 0
key_map = {}
for node in mng.all_nodes:
if node.is_constant(SymbolicKeyInstance):
if node.value not in key_map:
key_map[node.value] = counter
counter += 1
node.value = key_map[node.value]
node.abstract = to_abstract(node.value)
return graph | 7dfec6d6319630024bfb84872fd99b55168f0028 | 3,658,598 |
def site_data(db, settings):
"""Simple fake site data
"""
if organizations_support_sites():
settings.FEATURES['FIGURES_IS_MULTISITE'] = True
site_data = make_site_data()
ce = site_data['enrollments'][0]
lcgm = [
LearnerCourseGradeMetricsFactory(site=site_data['site'],
user=ce.user,
course_id=str(ce.course_id),
date_for='2020-10-01'),
]
site_data['lcgm'] = lcgm
return site_data | 395751133325b4fb6dc0ea463726c56b95c7d2a7 | 3,658,599 |
def render_curve(name,
data,
x_range=None,
y_range=None,
x_label=None,
y_label=None,
legends=None,
legend_kwargs={},
img_height=None,
img_width=None,
dpi=300,
figsize=(2, 2),
**kwargs):
"""Plot 1D curves.
Args:
name (stor): rendering identifier
data (Tensor|np.ndarray): a rank-1 or rank-2 tensor/np.array. If rank-2,
then each row represents an individual curve.
x_range (tuple[float]): min/max for x values. If None, ``x`` is
the index sequence of curve points. If provided, ``x`` is
evenly spaced by ``(x_range[1] - x_range[0]) / (N - 1)``.
y_range (tuple[float]): a tuple of ``(min_y, max_y)`` for showing on
the figure. If None, then it will be decided according to the
``y`` values. Note that this range won't change ``y`` data; it's
only used by matplotlib for drawing ``y`` limits.
x_label (str): shown besides x-axis
y_label (str): shown besides y-axis
legends (list[str]): label for each curve. No legends are shown if
None.
legend_kwargs (dict): optional legend kwargs
img_height (int): height of the output image
img_width (int): width of the output image
dpi (int): resolution of each rendered image
figsize (tuple[int]): figure size. For the relationship between ``dpi``
and ``figsize``, please refer to `this post <https://stackoverflow.com/questions/47633546/relationship-between-dpi-and-figure-size>`_.
**kwargs: all other arguments to ``ax.plot()``.
Returns:
Image: an output image rendered for the tensor
"""
assert len(data.shape) in (1, 2), "Must be rank-1 or rank-2!"
if not isinstance(data, np.ndarray):
array = data.cpu().numpy()
else:
array = data
if len(array.shape) == 1:
array = np.expand_dims(array, 0)
fig, ax = plt.subplots(figsize=figsize)
M, N = array.shape
x = range(N)
if x_range is not None:
delta = (x_range[1] - x_range[0]) / float(N - 1)
x = delta * x + x_range[0]
for i in range(M):
ax.plot(x, array[i], **kwargs)
if legends is not None:
ax.legend(legends, loc="best", **legend_kwargs)
if y_range:
ax.set_ylim(y_range)
if x_label:
ax.set_xlabel(x_label)
if y_label:
ax.set_ylabel(y_label)
return _convert_to_image(name, fig, dpi, img_height, img_width) | f0f60bf64c195f82ec91513f2c79a7c72a25599d | 3,658,600 |
def CreateBooleanUnion1(breps, tolerance, manifoldOnly, multiple=False):
"""
Compute the Boolean Union of a set of Breps.
Args:
breps (IEnumerable<Brep>): Breps to union.
tolerance (double): Tolerance to use for union operation.
manifoldOnly (bool): If true, non-manifold input breps are ignored.
Returns:
Brep[]: An array of Brep results or None on failure.
"""
url = "rhino/geometry/brep/createbooleanunion-breparray_double_bool"
if multiple: url += "?multiple=true"
args = [breps, tolerance, manifoldOnly]
if multiple: args = list(zip(breps, tolerance, manifoldOnly))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | ae397d73b9acbcdd52e9e83592322274047d9915 | 3,658,601 |
def make_singleton_class(class_reference, *args, **kwargs):
"""
Make the given class a singleton class.
*class_reference* is a reference to a class type, not an instance of a class.
*args* and *kwargs* are parameters used to instantiate a singleton instance.
To use this, suppose we have a class called ``DummyClass`` and later instantiate
a variable ``dummy_instnace`` as an instance of class ``DummyClass``. ``class_reference``
will be ``DummyClass``, not ``dummy_instance``.
Note that this method is not for direct use. Always use ``@singleton`` or ``@singleton_with``.
"""
# Name of the attribute that store the singleton instance
singleton_attr_name = '_singleton_instance'
# The statice method to get the singleton instance of the reference class
@staticmethod
def instance():
"""
Get a singleton instance.
.. note:: This class is capable to act as a singleton class by invoking this method.
"""
return class_reference._singleton_instance
# Intercept if the class has already been a singleton class.
if singleton_attr_name in dir(class_reference):
raise SingletonInitializationException(
'The attribute _singleton_instance is already assigned as instance of %s.'\
% type(class_reference._singleton_instance)
)
# Instantiate an instance for a singleton class.
class_reference._singleton_instance = class_reference(*args, **kwargs)
class_reference.instance = instance
return class_reference | c33b09f2eee16e23dd1a10a914a8735120efbbfe | 3,658,602 |
def get_coaches(soup):
"""
scrape head coaches
:param soup: html
:return: dict of coaches for game
"""
coaches = soup.find_all('tr', {'id': "HeadCoaches"})
# If it picks up nothing just return the empty list
if not coaches:
return coaches
coaches = coaches[0].find_all('td')
return {
'Away': coaches[1].get_text(),
'Home': coaches[3].get_text()
} | 784b355adb885b0eb4f26e72168475e1abbe4d1f | 3,658,603 |
import logging
def create_app(config_name):
"""
Factory to create Flask application context using config option found in
app.config
:param config_name: (string) name of the chosen config option
:return app: (Flask application context)
"""
logging.basicConfig(
filename="app.log",
filemode="w",
format="%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
app = Flask(__name__)
app.config.from_object(config[config_name])
logging.info("App initialized.")
register_extensions(app)
register_blueprints(app)
configure_database(app)
return app | 8dea98c2393b575c7c353debe4b84eea67ff9353 | 3,658,604 |
import math
def _rectify_countdown_or_bool(count_or_bool):
"""
used by recrusive functions to specify which level to turn a bool on in
counting down yeilds True, True, ..., False
conting up yeilds False, False, False, ... True
Args:
count_or_bool (bool or int): if positive will count down, if negative
will count up, if bool will remain same
Returns:
int or bool: count_or_bool_
CommandLine:
python -m utool.util_str --test-_rectify_countdown_or_bool
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> result = [a1, a2, a3, a4, a5, a6, a7]
>>> print(result)
[1.0, 0.0, 0, 0.0, -1.0, True, False]
[1.0, True, False, False, -1.0, True, False]
"""
if count_or_bool is True or count_or_bool is False:
count_or_bool_ = count_or_bool
elif isinstance(count_or_bool, int):
if count_or_bool == 0:
return 0
sign_ = math.copysign(1, count_or_bool)
count_or_bool_ = int(count_or_bool - sign_)
#if count_or_bool_ == 0:
# return sign_ == 1
else:
count_or_bool_ = False
return count_or_bool_ | 63d02cfbd99652bc04cfbac57a7d9306465bbf2b | 3,658,605 |
def POpen (inUV, access, err):
""" Open an image persistent (disk) form
inUV = Python UV object
access = access 1=READONLY, 2=WRITEONLY, 3=READWRITE
err = Python Obit Error/message stack
"""
################################################################
if ('myClass' in inUV.__dict__) and (inUV.myClass=='AIPSUVData'):
raise TypeError("Function unavailable for "+inUV.myClass)
return inUV.Open(access, err)
# end POpen | f365a9d5a4fc8a028203e8ea4a51b64d6d19f9bc | 3,658,606 |