content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import torch
def get_prob_and_custom_prob_per_crops(
logits_for_patches,
img_size_work_px_space,
n_pixels_in_crop,
descendent_specifier,
target_list,
rf,
DEVICE,
):
"""Determine the probability and the custom probability (i.e. the non-Deep-Learning "logit", cf. Appendix C.2) for crops according to the descendent_specifier, i.e. either each crop or only the four corner crops.
Note that for the grouping of patches into one crop, each directly neighboring patch is considered (stride 1: logits_for_patches_reshaped[start_row:stop_row:stride_1, start_col:stop_col:stride_1]). This enables us to both either select all data for all crops or only the data for the corner crops. This is in contrast to the value that was used to train and evaluate BagNet-33 (stride = 8).
Args:
logits_for_patches: logit predictions for each patch
torch tensor, dtype = torch.float32
np_array of dimensions n_patches x 1000
img_size_work_px_space: number of image pixels in latest parent
n_pixels_in_crop: size of child crop
descendent_specifier: choice between selecting all crops ("stride1") or only four corner crops ("Ullman4")
target_list: list of targets
rf: number of pixels in image crop for BagNet-33
rf stands for receptive field size
Returns:
prob_per_crop: list of length n_crops^2 containing the probabilities per relevant crop
custom_prob_per_crop: list of length n_crops^2 containing the custom probabilities per relevant crop
"""
# When the crop is larger than 33x33 (or in fact 37x37 because that's the
# next larger pixel size appearing in the decreasing order of pixels when
# decreasing by 80% for each crop from 224 pixels), group patches into
# crops to calculate the probaiblities and the custom probabilities
if img_size_work_px_space > 37:
# calculate how many crops there are
n_crops = img_size_work_px_space - n_pixels_in_crop + 1
# calculate how many patches contribute to one crop in one dimensions
# (i.e. width or height)
n_patches_contribute_to_crop = n_pixels_in_crop - rf + 1
# make matrix square instead of one-dimensional along the patch-axis
patch_square_length = int(np.sqrt(logits_for_patches.size()[0]))
logits_for_patches_reshaped = torch.reshape(
logits_for_patches,
(patch_square_length,
patch_square_length,
logits_for_patches.shape[1]),
)
# loop through each crop
prob_per_crop = []
custom_prob_per_crop = []
for start_row in range(n_crops):
stop_row = start_row + n_patches_contribute_to_crop
for start_col in range(n_crops):
stop_col = start_col + n_patches_contribute_to_crop
# average logits over patches
logit_avg_of_cur_patch = torch.mean(
torch.mean(
logits_for_patches_reshaped[
start_row:stop_row, start_col:stop_col
],
dim=0,
),
dim=0,
)
# calculate probabilities
prob_for_targets_summed = get_prob_for_logits(
logit_avg_of_cur_patch[None, :], target_list
)
prob_per_crop.append(prob_for_targets_summed)
# calculate custom probabilities
cur_custom_prob_per_crop = get_custom_prob(
logit_avg_of_cur_patch[None, :], target_list, DEVICE
)
custom_prob_per_crop.append(cur_custom_prob_per_crop[0])
# patches correspond to crops
else:
custom_prob_per_crop = get_custom_prob(
logits_for_patches, target_list, DEVICE)
prob_per_crop = list(
get_prob_for_logits(
logits_for_patches,
target_list))
# if only the four corner crops are of interest ("Ullman4"), get that data
# only
if descendent_specifier == "Ullman4":
prob_per_crop, custom_prob_per_crop = extract_corner_data_for_Ullman4(
prob_per_crop, custom_prob_per_crop
)
return prob_per_crop, custom_prob_per_crop | 18a4c166334c6e5c3fa3300ddd255992677d412b | 300 |
def xywh_to_xyxy(boxes):
"""Convert [x y w h] box format to [x1 y1 x2 y2] format."""
if boxes is None or len(boxes) == 0:
return boxes
boxes = np.array(boxes)
return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)) | 391c55ddd2e84cf60073cbd02d0e5d595f3ea3b1 | 301 |
def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :
""" Make histograms of VSCATTER for different bins of Teff H], given min NVISITS, and min [M/H]
"""
if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))
else : fig,ax=fig
tbins=[3000,3500,4000,4500,5500,8000,30000]
hbins=[8,11,12,13,15]
try: snr = a['SNREV']
except: snr=a['SNR']
j=np.where(snr > 300) [0]
snr[j] = 300
for i in range(len(tbins)-1) :
ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)
for j in range(len(hbins)-1) :
ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))
gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &
(a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &
(a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]
print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))
try :
#plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')
ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)
ax[i,j].set_xlabel('VSCATTER (km/s)')
ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())
#ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])
#ax[i,1].set_xlabel('VSCATTER')
except : pass
if out is not None :
fig.savefig(out+'.png')
plt.close()
fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))
return fig,ax | b302883263ef79682e697d4c82b0fc352eb597ec | 302 |
def parse_unique_count_for_column(column_df, column):
"""
returns column specific distribution details.
sample output,
```
"<column_df>": {
"<>": 30
}
```
"""
return {column: get_unique_counts_of_column(column_df)} | 275375c012d8ffc2bd8f209bf57e1c1aa1d183f6 | 303 |
def transaction():
"""
Get database transaction object
:return: _TransactionContext object
usage:
with transaction():
# transactions operation
pass
>>> def update_profile(t_id, name, rollback):
... u = dict(id=t_id, name=name, email='%s@test.org' % name, password=name, last_modified=time.time())
... insert('testuser', **u)
... update('update testuser set password=%s where id=%s', name.upper(), t_id)
... if rollback:
... raise StandardError('will cause rollback...')
>>> with transaction():
... update_profile(900301, 'Python', False)
>>> select_one('select * from testuser where id=%s', 900301).name
u'Python'
>>> with transaction():
... update_profile(900302, 'Ruby', True)
Traceback (most recent call last):
...
StandardError: will cause rollback...
>>> select('select * from testuser where id=%s', 900302)
[]
"""
return _TransactionContext() | d0b941dd9c2ce3e07079280edc74324a28d60509 | 304 |
from typing import Union
from typing import Dict
from typing import Any
import typing
def _BoundedIntRange(
description: str = "",
description_tooltip: str = None,
layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {},
max: int = 100,
min: int = 0,
style: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_description.DescriptionStyle]] = {},
value: tuple = (0, 1),
on_description: typing.Callable[[str], Any] = None,
on_description_tooltip: typing.Callable[[str], Any] = None,
on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None,
on_max: typing.Callable[[int], Any] = None,
on_min: typing.Callable[[int], Any] = None,
on_style: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_description.DescriptionStyle]]], Any] = None,
on_value: typing.Callable[[tuple], Any] = None,
) -> Element[ipywidgets.widgets.widget_int._BoundedIntRange]:
"""
:param description: Description of the control.
:param description_tooltip: Tooltip for the description (defaults to description).
:param max: Max value
:param min: Min value
:param style: Styling customizations
:param value: Tuple of (lower, upper) bounds
"""
kwargs: Dict[Any, Any] = without_default(_BoundedIntRange, locals())
if isinstance(kwargs.get("layout"), dict):
kwargs["layout"] = Layout(**kwargs["layout"])
if isinstance(kwargs.get("style"), dict):
kwargs["style"] = DescriptionStyle(**kwargs["style"])
widget_cls = ipywidgets.widgets.widget_int._BoundedIntRange
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs) | 0f54f750da5000df2952298b0f6bec987a7b02b4 | 305 |
import os
import torch
def load_static_mnist(args, **kwargs):
"""
Dataloading function for static mnist. Outputs image data in vectorized form: each image is a vector of size 784
"""
args.dynamic_binarization = False
args.input_type = 'binary'
args.input_size = [1, 28, 28]
# start processing
def lines_to_np_array(lines):
return np.array([[int(i) for i in line.split()] for line in lines])
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_train.amat')) as f:
lines = f.readlines()
x_train = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_valid.amat')) as f:
lines = f.readlines()
x_val = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_test.amat')) as f:
lines = f.readlines()
x_test = lines_to_np_array(lines).astype('float32')
# shuffle train data
np.random.shuffle(x_train)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args | bdd9fd491048f7e0af5bcc5007bb61fd6ee65e2f | 306 |
def get_benchmark_snapshot(benchmark_df,
threshold=_MIN_FRACTION_OF_ALIVE_TRIALS_AT_SNAPSHOT):
"""Finds the latest time where |threshold| fraction of the trials were still
running. In most cases, this is the end of the experiment. However, if less
than |threshold| fraction of the trials reached the end of the experiment,
then we will use an earlier "snapshot" time for comparing results.
Returns a data frame that only contains the measurements of the picked
snapshot time.
"""
# Allow overriding threshold with environment variable as well.
threshold = environment.get('BENCHMARK_SAMPLE_NUM_THRESHOLD', threshold)
num_trials = benchmark_df.trial_id.nunique()
trials_running_at_time = benchmark_df.time.value_counts()
criteria = trials_running_at_time >= threshold * num_trials
ok_times = trials_running_at_time[criteria]
latest_ok_time = ok_times.index.max()
benchmark_snapshot_df = benchmark_df[benchmark_df.time == latest_ok_time]
return benchmark_snapshot_df | 6e7f887f3f720612013dfe06b5decbf2e092a2e2 | 307 |
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from os import getcwd
def configChromeDriver(webVisible : bool = True , filePathDownload : str = None, filePathWebDriver: str = None) -> WebDriver:
"""
Configure o seu Chrome Driver:
- webVisible ==> Por padrão True para ocultar o webDriver.
- filePathDownload ==> Por padrão será criado uma pasta Downloads na raiz do projeto, caso não seja informado uma pasta para envio dos downloads ("nameFolder\\folderDownload").
- filePathWebDriver ==> Informar o endereço completo, inclusive ("nameFolder\\91\\chromedriver.exe").
Por padrão é utilizado a pasta raiz (webDriver), caso ela não exista, cria-la e colocar a pasta do driver nomeada com o numero da versão.
"""
filePathDownload = filePathDownload or pathDownload()
filePathWebDriver = filePathWebDriver or fr"{getcwd()}\webDriver\{lastWebDriver()}\chromedriver.exe"
options = Options()
options.headless = webVisible
prefs = {"download.default_directory": filePathDownload}
options.add_experimental_option("prefs", prefs)
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--lang=pt")
return webdriver.Chrome(executable_path=filePathWebDriver, options=options) | 755e02ffcb4f9ab102023e65f97f4f30a030debf | 308 |
import traceback
import sys
def create_trigger_function_with_trigger(server, db_name, schema_name,
func_name):
"""This function add the trigger function to schema"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
query = "CREATE FUNCTION " + schema_name + "." + func_name + \
"()" \
" RETURNS trigger LANGUAGE 'plpgsql' STABLE LEAKPROOF" \
" SECURITY DEFINER SET enable_sort=true AS $BODY$ BEGIN" \
" NULL; END; $BODY$"
pg_cursor.execute(query)
connection.commit()
# Get 'oid' from newly created function
pg_cursor.execute("SELECT pro.oid, pro.proname FROM"
" pg_proc pro WHERE pro.proname='%s'" %
func_name)
functions = pg_cursor.fetchone()
connection.close()
return functions
except Exception:
traceback.print_exc(file=sys.stderr) | 0a142f77793a608a9da8342f2ce566327a258332 | 309 |
import tqdm
import timeit
import warnings
def get_results_seq_len(given_true_eig,
hidden_dim,
input_dim,
min_seq_len,
max_seq_len,
num_sampled_seq_len,
num_repeat,
input_mean,
input_stddev,
output_noise_stddev,
init_state_mean=0.0,
init_state_stddev=0.0,
generate_diagonalizable_only=False,
random_seed=0):
"""Get results for varying sequence lengths.
Args:
given_true_eig: Ground truth of eigenvalues. If None, generate random
eigenvalues from uniform [-1,1] in each repeat of experiment.
hidden_dim: Assumed hidden dim. If 0, use true hidden dim.
input_dim: The input dim.
min_seq_len: Min seq len in experiments.
max_seq_len: Max seq len in experiments.
num_sampled_seq_len: Number of sampled seq len values in between min and max
seq len.
num_repeat: Number of repeated experiments for each seq_len.
input_mean: Scalar or 1D array of length hidden state dim.
input_stddev: Scalar of 1D array of length hidden state dim.
output_noise_stddev: Scalar.
init_state_mean: Scalar or 1D array of length hidden state dim.
init_state_stddev: Scalar of 1D array of length hidden state dim.
generate_diagonalizable_only: Whether to only use diagonalizable LDSs in
simulations.
random_seed: Random seed, integer.
Returns:
A pandas DataFrame with columns `method`, `seq_len`, `t_secs`,
`failed_ratio`, and `l2_r_error`.
The same method and seq_len will appear in num_repeat many rows.
"""
np.random.seed(random_seed)
progress_bar = tqdm.tqdm(total=num_repeat * num_sampled_seq_len)
gen = lds.SequenceGenerator(
input_mean=input_mean,
input_stddev=input_stddev,
output_noise_stddev=output_noise_stddev,
init_state_mean=init_state_mean,
init_state_stddev=init_state_stddev)
# seq_len_vals = np.linspace(min_seq_len, max_seq_len, num_sampled_seq_len)
# seq_len_vals = [int(round(x)) for x in seq_len_vals]
min_inv_sqrt_seq_len = 1. / np.sqrt(max_seq_len)
max_inv_sqrt_seq_len = 1. / np.sqrt(min_seq_len)
inv_sqrt_seq_len_vals = np.linspace(min_inv_sqrt_seq_len,
max_inv_sqrt_seq_len, num_sampled_seq_len)
seq_len_vals = [int(round(1. / (x * x))) for x in inv_sqrt_seq_len_vals]
learning_fns = create_learning_fns(hidden_dim)
metric_dict = {
k: [] for k in [
'method', 'seq_len', 't_secs', 'l2_a_error', 'l2_r_error',
'failed_convg'
]
}
for _ in xrange(num_repeat):
if given_true_eig is not None:
ground_truth = lds.generate_linear_dynamical_system(
hidden_dim, input_dim, eigvalues=given_true_eig)
else:
ground_truth = lds.generate_linear_dynamical_system(
hidden_dim, input_dim, diagonalizable=generate_diagonalizable_only)
true_eig = ground_truth.get_spectrum()
for seq_len in seq_len_vals:
seq = gen.generate_seq(ground_truth, seq_len=seq_len)
for k, fn in learning_fns.iteritems():
start_t = timeit.default_timer()
with warnings.catch_warnings(record=True) as caught:
warnings.filterwarnings(
'always', category=sm_exceptions.ConvergenceWarning)
if FLAGS.hide_inputs:
eig_pred = fn(seq.outputs, None)
else:
eig_pred = fn(seq.outputs, seq.inputs)
t_elapsed = timeit.default_timer() - start_t
metric_dict['seq_len'].append(seq_len)
metric_dict['method'].append(k)
metric_dict['t_secs'].append(t_elapsed)
metric_dict['l2_a_error'].append(np.linalg.norm(true_eig - eig_pred))
metric_dict['l2_r_error'].append(
np.linalg.norm(true_eig - eig_pred) / np.linalg.norm(true_eig))
metric_dict['failed_convg'].append(False)
for w in caught:
if w.category in [
RuntimeWarning, sm_exceptions.ConvergenceWarning,
sm_exceptions.HessianInversionWarning
]:
metric_dict['failed_convg'][-1] = True
else:
warnings.warn(w.message, w.category)
progress_bar.update(1)
progress_bar.close()
return pd.DataFrame(data=metric_dict) | 6d7439b4b9c5bca6010eaffc4c924ef8f09fbb4f | 310 |
import functools
def episode_to_timestep_batch(
episode: rlds.BatchedStep,
return_horizon: int = 0,
drop_return_horizon: bool = False,
flatten_observations: bool = False,
calculate_episode_return: bool = False) -> tf.data.Dataset:
"""Converts an episode into multi-timestep batches.
Args:
episode: Batched steps as provided directly by RLDS.
return_horizon: int describing the horizon to which we should accumulate the
return.
drop_return_horizon: bool whether we should drop the last `return_horizon`
steps to avoid mis-calculated returns near the end of the episode.
flatten_observations: bool whether we should flatten dict-based observations
into a single 1-d vector.
calculate_episode_return: Whether to calculate episode return. Can be an
expensive operation on datasets with many episodes.
Returns:
rl_dataset.DatasetType of 3-batched transitions, with scalar rewards
expanded to 1D rewards
This means that for every step, the corresponding elements will be a batch of
size 3, with the first batched element corresponding to *_t-1, the second to
*_t and the third to *_t+1, e.g. you can access the previous observation as:
```
o_tm1 = el[types.OBSERVATION][0]
```
Two additional keys can be added: 'R_t' which corresponds to the undiscounted
return for horizon `return_horizon` from time t (always present), and
'R_total' which corresponds to the total return of the associated episode (if
`calculate_episode_return` is True). Rewards are converted to be (at least)
one-dimensional, prior to batching (to avoid ()-shaped elements).
In this example, 0-valued observations correspond to o_{t-1}, 1-valued
observations correspond to o_t, and 2-valued observations correspond to
s_{t+1}. This same structure is true for all keys, except 'R_t' and 'R_total'
which are both scalars.
```
ipdb> el[types.OBSERVATION]
<tf.Tensor: shape=(3, 11), dtype=float32, numpy=
array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.]], dtype=float32)>
```
"""
steps = episode[rlds.STEPS]
if drop_return_horizon:
episode_length = steps.cardinality()
steps = steps.take(episode_length - return_horizon)
# Calculate n-step return:
rewards = steps.map(lambda step: step[rlds.REWARD])
batched_rewards = rlds.transformations.batch(
rewards, size=return_horizon, shift=1, stride=1, drop_remainder=True)
returns = batched_rewards.map(tf.math.reduce_sum)
output = tf.data.Dataset.zip((steps, returns)).map(_append_n_step_return)
# Calculate total episode return for potential filtering, use total # of steps
# to calculate return.
if calculate_episode_return:
dtype = jnp.float64 if jax.config.jax_enable_x64 else jnp.float32
# Need to redefine this here to avoid a tf.data crash.
rewards = steps.map(lambda step: step[rlds.REWARD])
episode_return = rewards.reduce(dtype(0), lambda x, y: x + y)
output = output.map(
functools.partial(
_append_episode_return, episode_return=episode_return))
output = output.map(_expand_scalars)
if flatten_observations:
output = output.map(_flatten_observations)
output = rlds.transformations.batch(
output, size=3, shift=1, drop_remainder=True)
return output | 105c528772257b990897d35e8f8b82663e26e57c | 311 |
def parse(authz_file, modules):
"""Parse a Subversion authorization file.
Return a dict of modules, each containing a dict of paths, each containing
a dict mapping users to permissions. Only modules contained in `modules`
are retained.
"""
parser = UnicodeConfigParser(ignorecase_option=False)
parser.read(authz_file)
groups = {}
aliases = {}
sections = {}
for section in parser.sections():
if section == 'groups':
for name, value in parser.items(section):
groups.setdefault(name, set()).update(to_list(value))
elif section == 'aliases':
for name, value in parser.items(section):
aliases[name] = value.strip()
else:
for name, value in parser.items(section):
parts = section.split(':', 1)
module, path = parts[0] if len(parts) > 1 else '', parts[-1]
if module in modules:
sections.setdefault((module, path), []) \
.append((name, value))
def resolve(subject, done):
if subject.startswith('@'):
done.add(subject)
for members in groups[subject[1:]] - done:
for each in resolve(members, done):
yield each
elif subject.startswith('&'):
yield aliases[subject[1:]]
else:
yield subject
authz = {}
for (module, path), items in sections.iteritems():
section = authz.setdefault(module, {}).setdefault(path, {})
for subject, perms in items:
readable = 'r' in perms
# Ordering isn't significant; any entry could grant permission
section.update((user, readable)
for user in resolve(subject, set())
if not section.get(user))
return authz | 9102d77ed5db05c582a0ecbc3eb26a63fd579ce6 | 312 |
def send_expiry_note(invite, request, user_name):
"""
Send a notification email to the issuer of an invitation when a user
attempts to accept an expired invitation.
:param invite: ProjectInvite object
:param request: HTTP request
:param user_name: User name of invited user
:return: Amount of sent email (int)
"""
subject = (
SUBJECT_PREFIX
+ ' '
+ SUBJECT_EXPIRY.format(
user_name=user_name, project=invite.project.title
)
)
message = get_email_header(
MESSAGE_HEADER.format(
recipient=invite.issuer.get_full_name(), site_title=SITE_TITLE
)
)
message += MESSAGE_EXPIRY_BODY.format(
role=invite.role.name,
project=invite.project.title,
user_name=user_name,
user_email=invite.email,
date_expire=localtime(invite.date_expire).strftime('%Y-%m-%d %H:%M'),
site_title=SITE_TITLE,
project_label=get_display_name(invite.project.type),
)
if not settings.PROJECTROLES_EMAIL_SENDER_REPLY:
message += NO_REPLY_NOTE
message += get_email_footer()
return send_mail(subject, message, [invite.issuer.email], request) | 62602f670724630ff5aeb99ab28b7dfb18fc5233 | 313 |
def level_is_between(level, min_level_value, max_level_value):
"""Returns True if level is between the specified min or max, inclusive."""
level_value = get_level_value(level)
if level_value is None:
# unknown level value
return False
return level_value >= min_level_value and level_value <= max_level_value | b0bc1c4ea749d51af147bc1237aac5a5d1e5ba1b | 314 |
def voronoiMatrix(sz=512,percent=0.1,num_classes=27):
"""
Create voronoi polygons.
Parameters
----------
sz : int
row and column size of the space in which the circle is placed
percent : float
Percent of the space to place down centers of the voronoi polygons.
Smaller percent makes the polygons larger
num_classes : int
Number of classes to assign to each of the voronoi polygons
Returns
-------
X : 2D array
Array containing all voronoi polygons
"""
X = np.zeros((sz,sz))
#fill in percentage of the space
locs = np.random.rand(sz,sz)<=percent
vals = np.random.randint(0,num_classes,size=(sz,sz))
X[locs]=vals[locs]
#get all the indices of the matrix
cc,rr = np.meshgrid(np.arange(0,sz),np.arange(0,sz))
f = np.zeros((sz**2,2))
f[:,0]=rr.ravel() #feature1
f[:,1]=cc.ravel() #feature2
t = X.ravel() #target
train_ind = locs.ravel()
f_train = f[train_ind]
t_train = t[train_ind]
clf = neighbors.KNeighborsClassifier(n_neighbors=1)
clf.fit(f_train, t_train)
preds = clf.predict(f)
locs = f.astype(int)
X[locs[:,0],locs[:,1]] = preds
return X | c4dfb74ae5b9e26494cb8642eb96dca565d6497d | 315 |
from typing import Counter
def genVBOF2(virus_record, model, model_name=None):
"""New version of the genVBOF function by Hadrien.
Builds a Virus Biomass Objective Function (basically a virus biomass
production reaction, from aminoacids and nucleotides) from a genbank
file.
Params:
- virus_record: genbank record of a virus (output from Bio.SeqIO.parse)
- model: a cobra metabolic model (cobra.core.model.Model)
Returns:
- virus biomass objective function (cobra.core.reaction.Reaction)
"""
met_dict = load_metabolite_id_dict(model, model_name=model_name)
# VIRUS IDENTIFICATION
taxonomy = " ".join([taxon.lower() for taxon in virus_record.annotations["taxonomy"]])
if "betacoronavirus" not in taxonomy:
raise NotImplementedError('Virus family is not supported: Unable to create VBOF. Consult _README')
short_name, full_name = get_virus_names(virus_record)
# AMINOACID COUNT
all_cds = {feature for feature in virus_record.features if feature.type == "CDS"}
# Check that our own virus_composition dict contain exactly the
# proteins defined in the genbank file, no more, no less.
protein_names_in_gb_file = {cds.qualifiers["product"][0] for cds in all_cds}
protein_names_in_our_data = {protein_name for protein_name in virus_composition[short_name]["proteins"]}
assert protein_names_in_gb_file == protein_names_in_our_data
virus_aa_composition = Counter()
# protein name -> number of atp involved in its peptide bonds formations
# (accounting for the number of copies of protein)
peptide_bond_formation = dict()
for cds in all_cds:
protein_name = cds.qualifiers["product"][0]
aa_sequence = cds.qualifiers["translation"][0]
aa_count = Counter(aa_sequence)
copies_per_virus = virus_composition[short_name]["proteins"][protein_name]
virus_aa_composition += multiply_counter(aa_count, copies_per_virus)
peptide_bond_formation[protein_name] = (len(aa_sequence) * k_atp_protein - k_atp_protein) * copies_per_virus
# [3] Precursor frequency
# Genome [Nucleotides]
Cg = virus_composition[short_name]["Cg"] # number of genome copies per virus
virus_nucl_count = Counter(str(virus_record.seq))
countA = virus_nucl_count["A"]
countC = virus_nucl_count["C"]
countG = virus_nucl_count["G"]
countU = virus_nucl_count["T"] # Base 'T' is pseudo for base 'U'
antiA = countU
antiC = countG
antiG = countC
antiU = countA
# Count summation
totNTPS = (Cg * (countA + countC + countG + countU + antiA + antiC + antiG + antiU))
totAA = sum(count for count in virus_aa_composition.values())
# [4] VBOF Calculations
# Nucleotides
# mol.ntps/mol.virus
V_a = (Cg*(countA + antiA))
V_c = (Cg*(countC + antiC))
V_g = (Cg*(countG + antiG))
V_u = (Cg*(countU + antiU))
# g.ntps/mol.virus
G_a = V_a * ntpsDict["atp"]
G_c = V_c * ntpsDict["ctp"]
G_g = V_g * ntpsDict["gtp"]
G_u = V_u * ntpsDict["ttp"]
# Amino Acids
# g.a/mol.virus
G_aa = {aa: count * aaDict[aa] for aa, count in virus_aa_composition.items()}
# Total genomic and proteomic molar mass
M_v = (G_a + G_c + G_g + G_u) + sum(G_aa.values())
# Stoichiometric coefficients
# Nucleotides [mmol.ntps/g.virus] (for the genome)
S_atp = 1000 * (V_a/M_v)
S_ctp = 1000 * (V_c/M_v)
S_gtp = 1000 * (V_g/M_v)
S_utp = 1000 * (V_u/M_v)
# Amino acids [mmol.aa/g.virus]
S_aa = {aa: 1000 * V_aa / M_v for aa, V_aa in virus_aa_composition.items()}
# Energy requirements
# Genome: Phosphodiester bond formation products [Pyrophosphate]
# SARS Cov 2 is a single stranded RNA virus: it has to first do an
# intermediary reverse copy of itself and then replicate itself from
# that intermediary strand.
genTemp = (((countA + countC + countG + countU) * k_ppi) - k_ppi)
genRep = (((antiA + antiC + antiG + antiU) * k_ppi) - k_ppi)
genTot = genTemp + genRep
V_ppi = genTot
S_ppi = 1000 * (V_ppi / M_v)
# Proteome: Peptide bond formation [ATP + H2O]
# Note: ATP used in this process is denoated as ATPe/Ae [e = energy version]
V_Ae = sum(peptide_bond_formation.values())
S_Ae = 1000 * (V_Ae / M_v)
# [5] VBOF Reaction formatting and output
# Left-hand terms: Nucleotides
# Note: ATP term is a summation of genome and energy requirements
S_ATP = (S_atp + S_Ae) * -1
S_CTP = S_ctp * -1
S_GTP = S_gtp * -1
S_UTP = S_utp * -1
# Left-hand terms: Amino Acids
S_AAf = {aa: -coef for aa, coef in S_aa.items()}
# Left-hand terms: Energy Requirements
S_H2O = S_Ae * -1
# Right-hand terms: Energy Requirements
S_ADP = S_Ae
S_Pi = S_Ae
S_H = S_Ae
S_PPi = S_ppi
reaction_name = short_name + '_prodrxn_VN'
virus_reaction = Reaction(reaction_name)
virus_reaction.name = full_name + ' production reaction'
virus_reaction.subsystem = 'Virus Production'
virus_reaction.lower_bound = 0
virus_reaction.upper_bound = 1000
virus_reaction.add_metabolites(({
met_dict['atp']: S_ATP,
met_dict['ctp']: S_CTP,
met_dict['gtp']: S_GTP,
met_dict['utp']: S_UTP,
met_dict['A']: S_AAf['A'],
met_dict['R']: S_AAf['R'],
met_dict['N']: S_AAf['N'],
met_dict['D']: S_AAf['D'],
met_dict['C']: S_AAf['C'],
met_dict['Q']: S_AAf['Q'],
met_dict['E']: S_AAf['E'],
met_dict['G']: S_AAf['G'],
met_dict['H']: S_AAf['H'],
met_dict['I']: S_AAf['I'],
met_dict['L']: S_AAf['L'],
met_dict['K']: S_AAf['K'],
met_dict['M']: S_AAf['M'],
met_dict['F']: S_AAf['F'],
met_dict['P']: S_AAf['P'],
met_dict['S']: S_AAf['S'],
met_dict['T']: S_AAf['T'],
met_dict['W']: S_AAf['W'],
met_dict['Y']: S_AAf['Y'],
met_dict['V']: S_AAf['V'],
met_dict['h2o']: S_H2O,
met_dict['adp']: S_ADP,
met_dict['Pi']: S_Pi,
met_dict['h']: S_H,
met_dict['PPi']: S_PPi}))
return virus_reaction | 98f0aafae9efa65d18b65e23eb9d7c0457641bb0 | 316 |
import awsglue.utils as au
def parse_args(arguments: Sequence, options: List[str] = None) -> Dict:
"""
Parse input arguments.
Simple assessment that module AWS Glue is not available in pyshell jobs.
Parameters
----------
arguments
Sequence of options and values to be parsed. (sys.argv)
options
Options which value is resolved.
Returns
-------
Parsed options and values.
"""
LOGGER.debug("Parsing arguments: %s options: %s", arguments, options)
try:
except ImportError:
return parse_args_fallback(arguments, options)
try:
resolved = au.getResolvedOptions(args=arguments, options=options)
LOGGER.debug("awsglue.utils args resolved: %s", resolved)
return resolved
except au.GlueArgumentError:
return parse_args_fallback(arguments, options) | aa32d521e5fee4f10097355593a9adf0b8108357 | 317 |
import time
def get_current_ms_time() -> int:
"""
:return: the current time in milliseconds
"""
return int(time.time() * 1000) | 3c037bffb486ebae3ffcfba5fe431bd9b69b3bda | 318 |
def get_service_info(): # noqa: E501
"""Get information about Workflow Execution Service.
May include information related (but not limited to) the workflow descriptor formats, versions supported, the WES API versions supported, and information about general service availability. # noqa: E501
:rtype: ServiceInfo
"""
return adapter.get_service_info() | 693d7c47a235dc96f9c44d993fba25607994f2e3 | 319 |
def str_of_tuple(d, str_format):
"""Convert tuple to str.
It's just str_format.format(*d). Why even write such a function?
(1) To have a consistent interface for key conversions
(2) We want a KeyValidationError to occur here
Args:
d: tuple if params to str_format
str_format: Auto fields format string. If you have manual fields, consider auto_field_format_str to convert.
Returns:
parametrized string
>>> str_of_tuple(('hello', 'world'), "Well, {} dear {}!")
'Well, hello dear world!'
"""
try:
return str_format.format(*d)
except Exception as e:
raise KeyValidationError(e) | b5612efb3b189754cb278f40c7f471284dfc1daa | 320 |
def _intersect(bboxes1, bboxes2):
"""
bboxes: t x n x 4
"""
assert bboxes1.shape[0] == bboxes2.shape[0]
t = bboxes1.shape[0]
inters = np.zeros((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
_min = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
_max = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
w = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
h = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
for i in range(t):
np.maximum.outer(bboxes1[i, :, 0], bboxes2[i, :, 0], out=_min)
np.minimum.outer(bboxes1[i, :, 2], bboxes2[i, :, 2], out=_max)
np.subtract(_max + 1, _min, out=w)
w.clip(min=0, out=w)
np.maximum.outer(bboxes1[i, :, 1], bboxes2[i, :, 1], out=_min)
np.minimum.outer(bboxes1[i, :, 3], bboxes2[i, :, 3], out=_max)
np.subtract(_max + 1, _min, out=h)
h.clip(min=0, out=h)
np.multiply(w, h, out=w)
inters += w
return inters | 91056250d3adf829d1815a016a75423f93adb6c1 | 321 |
def convert_x_to_bbox(x,score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(np.abs(x[2] * x[3]))
if(w<=0):
w=1
h = x[2] / w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.],np.float32)
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5)) | f49a6b7c306087d92e99acb5bd679c59308f81b3 | 322 |
from typing import Tuple
def decimal_to_boolean_list(num: int, padding: int = 0) -> Tuple[bool, ...]:
"""
Convert a decimal number into a tuple of booleans, representing its binary value.
"""
# Convert the decimal into binary
binary = bin(num).replace('0b', '').zfill(padding)
# Return a tuple of booleans, one for each element of the binary number (it's either '0' or '1' so we can convert
# directly to boolean)
return tuple(char == '1' for char in binary) | c13831214faece847960089f781cc1c6442205ec | 323 |
def get_credentials(fn, url, username, allowed):
"""Call fn and return the credentials object"""
url_str = maybe_string(url)
username_str = maybe_string(username)
creds = fn(url_str, username_str, allowed)
credential_type = getattr(creds, 'credential_type', None)
credential_tuple = getattr(creds, 'credential_tuple', None)
if not credential_type or not credential_tuple:
raise TypeError("credential does not implement interface")
cred_type = credential_type
if not (allowed & cred_type):
raise TypeError("invalid credential type")
ccred = ffi.new('git_cred **')
if cred_type == C.GIT_CREDTYPE_USERPASS_PLAINTEXT:
name, passwd = credential_tuple
err = C.git_cred_userpass_plaintext_new(ccred, to_bytes(name),
to_bytes(passwd))
elif cred_type == C.GIT_CREDTYPE_SSH_KEY:
name, pubkey, privkey, passphrase = credential_tuple
if pubkey is None and privkey is None:
err = C.git_cred_ssh_key_from_agent(ccred, to_bytes(name))
else:
err = C.git_cred_ssh_key_new(ccred, to_bytes(name),
to_bytes(pubkey), to_bytes(privkey),
to_bytes(passphrase))
else:
raise TypeError("unsupported credential type")
check_error(err)
return ccred | 931d01d2c8ea44e1f8522f5dedb14a66367f3d4f | 324 |
def tpack(text, width=100):
"""Pack a list of words into lines, so long as each line (including
intervening spaces) is no longer than _width_"""
lines = [text[0]]
for word in text[1:]:
if len(lines[-1]) + 1 + len(word) <= width:
lines[-1] += (' ' + word)
else:
lines += [word]
return lines | e1b1b54a528c8dc2142a750156d3db1f754b4268 | 325 |
import torch
def _log_evidence_func(arr):
"""Returns an estimate of the log evidence from a set of log importance wegiths
in arr. arr has shape TxN where T is the number of trials and N is the number
of samples for estimation.
Args:
arr (torch.FloatTensor of shape TxN): log importance weights
Returns:
A tensor of shape (T,) representing the estimates for each set of sampels.
"""
T, N = arr.shape
log_evidence = torch.logsumexp(arr, dim=1) - np.log(N)
return log_evidence | 6fd0f7a3e6ad677300a1c2d342082417d6c1a2c8 | 326 |
import logging
def _accumulated_moments_for_inference(mean, variance, is_training):
"""Use accumulated statistics for moments during inference.
After training the user is responsible for filling the accumulators with the
actual values. See _UpdateBnAccumulators() in eval_gan_lib.py for an example.
Args:
mean: Tensor of shape [num_channels] with the mean of the current batch.
variance: Tensor of shape [num_channels] with the variance of the current
batch.
is_training: Boolean, wheather to construct ops for training or inference
graph.
Returns:
Tuple of (mean, variance) to use. This can the same as the inputs.
"""
variable_collections = [
tf.GraphKeys.MODEL_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES,
]
with tf.variable_scope("accu", values=[mean, variance]):
# Create variables for accumulating batch statistic and use them during
# inference. The ops for filling the accumulators must be created and run
# before eval. See docstring above.
accu_mean = tf.get_variable(
"accu_mean",
shape=mean.shape,
initializer=tf.zeros_initializer(),
trainable=False,
collections=variable_collections)
accu_variance = tf.get_variable(
"accu_variance",
shape=variance.shape,
initializer=tf.zeros_initializer(),
trainable=False,
collections=variable_collections)
accu_counter = tf.get_variable(
"accu_counter",
shape=[],
initializer=tf.initializers.constant(1e-12),
trainable=False,
collections=variable_collections)
update_accus = tf.get_variable(
"update_accus",
shape=[],
dtype=tf.int32,
initializer=tf.zeros_initializer(),
trainable=False,
collections=variable_collections)
mean = tf.identity(mean, "mean")
variance = tf.identity(variance, "variance")
if is_training:
return mean, variance
logging.debug("Using accumulated moments.")
# Return the accumulated batch statistics and add current batch statistics
# to accumulators if update_accus variables equals 1.
def update_accus_fn():
return tf.group([
tf.assign_add(accu_mean, mean),
tf.assign_add(accu_variance, variance),
tf.assign_add(accu_counter, 1),
])
dep = tf.cond(
tf.equal(update_accus, 1),
update_accus_fn,
tf.no_op)
with tf.control_dependencies([dep]):
return accu_mean / accu_counter, accu_variance / accu_counter | 5c4e65c7a84e5e3658c10c62f44027749f039ad2 | 327 |
import struct
def parse_bgp_attr(atype, aval_buf):
"""Given a type and value buffer, parses a BGP attribute and returns the value
parsed"""
if atype == BGP_ATYPE_ORIGIN:
attr = 'ORIGIN'
if len(aval_buf) != 1:
return None, None, -1
aval = struct.unpack('B', aval_buf)[0]
aval = BGP_ORIGIN_TYPES[aval]
return attr, aval, 1
elif atype == BGP_ATYPE_ASPATH:
attr = 'ASPATH'
segtype, seglen = struct.unpack('BB', aval_buf[:2])
ases = []
segproc = 2
for i in range(seglen):
as_, = struct.unpack('>I', aval_buf[segproc:segproc+4])
segproc += 4
ases.append(as_)
return attr, ases, len(aval_buf)
elif atype == BGP_ATYPE_NEXTHOP:
attr = 'NEXTHOP'
aval = inet_ntoa(aval_buf)
return attr, aval, 4
else:
return None, None, len(aval_buf) | 337ee8d0178759afead4ef1c55653639ed901fac | 328 |
import os
def _get_embedding_filename(base_dir, split_name, step):
"""Create the filename for embeddings."""
return os.path.join(base_dir, str(step), f'{split_name}-embeddings.tfrecord') | d6e4ca535b462ddf120ee77924c1bf2f3c662f24 | 329 |
def getUsage():
""" Get usage information about running APBS via Python
Returns (usage)
usage: Text about running APBS via Python
"""
usage = "\n\n\
----------------------------------------------------------------------\n\
This driver program calculates electrostatic potentials, energies,\n\
and forces using both multigrid methods.\n\
It is invoked as:\n\n\
python main.py apbs.in\n\
----------------------------------------------------------------------\n\n"
return usage | c21950b52106400cb20dd9d30a5cf742e98f9da9 | 330 |
def run_length_div_decode(x, n, divisor):
"""Decodes a run length encoded array and scales/converts integer values to float
Parameters
----------
x : encoded array of integers (value, repeat pairs)
n : number of element in decoded array
"""
y = np.empty(n, dtype=np.float32)
start = 0
for i in range(0, x.shape[0] - 1, 2):
end = x[i + 1] + start
y[start:end] = x[i] / divisor
start = end
return y | 434edfb44d1225277526233989ece2c91be14b0c | 331 |
import argparse
def parse_args():
"""Build file label list"""
parser = argparse.ArgumentParser(description='Build file label list')
parser.add_argument('data_path', type=str,
help='root directory for the dataset')
parser.add_argument('dataset', type=str, choices=[
'ucf101', 'hmdb51',
'kinetics400', 'kinetics600', 'kinetics700',
'sthv1', 'sthv2'],
help='name of the dataset')
parser.add_argument('--ann_root', type=str, default='annotation')
parser.add_argument('--out_root', type=str, default='../datalist')
parser.add_argument('--phase', type=str, default='train',
choices=['train', 'val'])
parser.add_argument('--level', type=int, default=2, choices=[1, 2])
parser.add_argument('--source', type=str, default='rgb',
choices=['rgb', 'flow', 'video'])
parser.add_argument('--split', type=int, default=1, choices=[1, 2, 3])
args = parser.parse_args()
return args | 267d6fbe34e48525dfa50987fb3ce674ec28d381 | 332 |
def modelFnBuilder(config):
"""Returns 'model_fn' closure for Estimator."""
def model_fn(features, labels, mode, params):
print('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = {}, shape = {}'.format(name, features[name].shape))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
# get the data
input_texts = features['input_texts']
input_texts_length = features['input_texts_length']
input_chars = features['input_chars']
input_chars_length = features['input_chars_length']
output_tags = labels['output_tags'] if is_training else None
# build the model
model = MultiTaskIntentModel(config,
cg.BATCH_SIZE,
is_training,
input_texts=input_texts,
input_texts_length=input_texts_length,
input_chars=input_chars,
input_chars_length=input_chars_length,
output_tags=output_tags)
# predict
if mode == tf.estimator.ModeKeys.PREDICT:
intent_logits = model.getResults('intent_logits')
intent_probs = tf.nn.softmax(intent_logits, axis=-1)
intent_labels = tf.math.argmax(intent_probs, axis=-1)
tag_logits = model.getResults('tag_logits')
viterbi_sequence, viterbi_score = model.decode(logit=tag_logits, sequence_lengths=input_texts_length)
predictions = {'intent_labels': intent_labels,
'viterbi_sequence': viterbi_sequence,
'viterbi_score': viterbi_score}
output_spec = tf.estimator.EstimatorSpec(mode, predictions)
elif mode == tf.estimator.ModeKeys.TRAIN:
gold_intent_labels = labels['output_indents']
intent_logits = model.getResults('intent_logits')
# max_time = tf.shape(gold_intent_labels)[1]
# target_weights = tf.sequence_mask(input_texts_length, max_time, dtype=intent_logits.dtype)
batch_size = tf.cast(cg.BATCH_SIZE, dtype=tf.float32)
intent_loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gold_intent_labels, logits=intent_logits)) / batch_size
tag_log_likelihood = model.getResults('log_likelihood')
tag_loss = tf.reduce_mean(-tag_log_likelihood)
loss = intent_loss + tag_loss
tvars = tf.trainable_variables()
l2_loss = 1e-2 * (tf.reduce_mean([tf.nn.l2_loss(v) for v in tvars]))
loss += l2_loss
lr = tf.train.polynomial_decay(
cg.LEARNING_RATE,
tf.train.get_or_create_global_step(),
cg.TRAIN_STEPS)
lr = tf.maximum(tf.constant(cg.LEARNING_RATE_LIMIT), lr)
# create optimizer and update
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())
logging_hook = tf.train.LoggingTensorHook({'step': tf.train.get_global_step(),
'loss': loss,
'l2_loss': l2_loss,
'lr': lr,
'intent_loss': intent_loss,
'tag_loss': tag_loss}, every_n_iter=1)
output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[logging_hook])
else:
raise NotImplementedError
return output_spec
return model_fn | ea39f0ed099ec9455667ec79df0f91fdda425783 | 333 |
from notifications.utils import notify_people, unotify_people
def accreds_validate(request, pk):
"""Validate an accred"""
accreds = [get_object_or_404(Accreditation, pk=pk_, end_date=None) for pk_ in filter(lambda x: x, pk.split(','))]
multi_obj = len(accreds) > 1
for accred in accreds:
if not accred.rights_can('VALIDATE', request.user):
raise Http404
if request.method == 'POST':
for accred in accreds:
accred.need_validation = False
accred.save()
accred.user.clear_rights_cache()
AccreditationLog(accreditation=accred, who=request.user, type='validated').save()
dest_users = accred.unit.users_with_access('ACCREDITATION', no_parent=True)
notify_people(request, 'Accreds.Validated', 'accreds_validated', accred, dest_users)
unotify_people('Accreds.ToValidate', accred)
if multi_obj:
messages.success(request, _(u'Accréditations validées !'))
else:
messages.success(request, _(u'Accréditation validée !'))
return redirect('units-views-accreds_list')
return render(request, 'units/accreds/validate.html', {'accreds': accreds, 'multi_obj': multi_obj}) | 9f539b4dacfc8bc4e824af9bff2acc4aad552c6c | 334 |
def need_verified_email(request, *args, **kwargs): # pylint: disable=unused-argument
"""
Returns error page for unverified email on edX
"""
return standard_error_page(request, 401, "verify_email.html") | 258f0cd7cc9d48724a4192397742ad476baf0aaf | 335 |
def random_masking(token_ids_all):
"""对输入进行随机mask,增加泛化能力
"""
result = []
for token_ids in token_ids_all:
rands = np.random.random(len(token_ids))
result.append([
t if r > 0.15 else np.random.choice(token_ids)
for r, t in zip(rands, token_ids)
])
return result | 5798fd271b6f8a1749ef04139c44a53ef2801473 | 336 |
import os
def is_directory(dir_path):
"""Validates that the argument passed into 'argparse' is a directory."""
if not os.path.isdir(dir_path):
raise ValueError('Path is not a directory: %s' % dir_path)
return dir_path | 57f8407eb02ae0c035f14d139a41a424d36df378 | 337 |
def used_caches_and_sources(layers, caches, sources):
"""
Find used cache and source names in layers and caches configuration.
"""
used_layer_sources = find_layer_sources(layers)
used_cache_sources = find_cache_sources(caches)
all_used_sources = used_layer_sources.union(used_cache_sources)
avail_caches = set(caches.keys())
avail_sources = set(sources.keys())
used_caches = avail_caches.intersection(all_used_sources)
used_sources = avail_sources.intersection(all_used_sources).difference(used_caches)
return used_caches, used_sources | 21df59bea5cf4d336f9f103de841bfbbedadf3d5 | 338 |
from typing import Union
def encode_labels(
labels: Union[list, np.ndarray, pd.Series],
multi_label: bool = False,
sep: str = '|'
):
"""Encode labels
Return coded labels, encoder, and decoder.
Examples:
>>> # multi-class problem
>>> labels = ['OK', 'OK', 'NG1', 'NG2', 'OK']
>>> encode_labels(labels)
(
[0, 0, 1, 2, 0],
{'OK': 0, 'NG1': 1, 'NG2': 2},
{0: 'OK', 1: 'NG1', 2: 'NG2}
)
>>> # multi-label problem, a.k.a. one hot encoding
>>> labels = ['dog', 'cat', 'dog|cat']
>>> encode_labels(labels, multi_label=True)
(
[[0, 1], [1, 0], [1, 1]],
{'dog': 0, 'cat': 1},
{0: 'dog', 1: 'cat'}
)
Args:
labels (list, np.ndarray): List of labels with string elements.
multi_label (bool, optional): Is multi label classification.
sep (str, optional): For multi-label only. Default is '|'.
Returns:
list or np.array: Coded labels. List in list out, array in array out.
dict: encoder
dict: decoder
"""
# get classes
if not multi_label:
classes = mlsorted(filter(None, set(labels)))
else:
classes = mlsorted(
{labs for item in filter(None, labels) for labs in item.split(sep)}
)
classes = [_ for _ in classes if _ not in ['']]
n_classes = len(classes)
# generate encoder and decoder
encoder = {_class: code for code, _class in enumerate(classes)}
decoder = {v: k for k, v in encoder.items()}
# create coded labels
if not multi_label:
coded_labels = [encoder[x] if x is not None else x for x in labels]
else:
coded_labels = list()
for x in labels:
labs = [0] * n_classes
if x is not None:
for lab in x.split(sep):
labs[encoder[lab]] = 1
coded_labels.append(labs)
# to numpy or to dataframe
if isinstance(labels, (pd.Series, pd.DataFrame)):
if multi_label:
coded_labels = pd.DataFrame(
coded_labels, columns=encoder.keys()
)
else:
coded_labels = pd.DataFrame(
{'y': coded_labels}, dtype=np.int32
)
elif isinstance(labels, (np.ndarray, Categorical)):
coded_labels = np.array(coded_labels, dtype=np.int32)
return coded_labels, encoder, decoder | 2cd3ec563edfe0d0f42df3018bfd2cf007738c9d | 339 |
def sigmoid_xent(*, logits, labels, reduction=True):
"""Computes a sigmoid cross-entropy (Bernoulli NLL) loss over examples."""
log_p = jax.nn.log_sigmoid(logits)
log_not_p = jax.nn.log_sigmoid(-logits)
nll = -jnp.sum(labels * log_p + (1. - labels) * log_not_p, axis=-1)
return jnp.mean(nll) if reduction else nll | a427532ddf0feba69879bc5b5d5a9a34d71d9ca6 | 340 |
def is_palindrome(s: str) -> bool:
"""Return whether a string is a palindrome
This is as efficient as you can get when computing whether a string is a
palindrome. It runs in O(n) time and O(1) space.
"""
if len(s) <= 1:
return True
i = 0
j = len(s) - 1
while i < j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True | 6d3001486fe3603a17e72861e3bdea495cd675c1 | 341 |
def accuracy(pred_cls, true_cls, nclass=3):
"""
compute per-node classification accuracy
"""
accu = []
for i in range(nclass):
intersect = ((pred_cls == i) + (true_cls == i)).eq(2).sum().item()
thiscls = (true_cls == i).sum().item()
accu.append(intersect / thiscls)
return np.array(accu) | 208c2e31b5df37179b2d67a2b8423c3236c64264 | 342 |
def my_hostogram(gray, bins):
""" pixel values has to be within bins range, otherwise index out of range, for example
if pixel 400th has value 70, but bins are -> [0...40], then histogram[70] yields IOR
"""
histogram = [0 for i in bins]
for i in range(gray.shape[0]):
for j in range(gray.shape[1]):
histogram[gray[i][j]] = histogram[gray[i][j]] + 1
return histogram | a2e774fb7b2249325191b20e6fa08847e38211c2 | 343 |
def reverse(password, position_x, position_y):
"""Reverse from position_x to position_y in password."""
password_slice = password[position_x:position_y + 1]
password[position_x:position_y + 1] = password_slice[::-1]
return password | 46fec2c6b9c02d8efa71d53451974e46cbe68102 | 344 |
from typing import Union
from typing import List
import random
def gen_sentence(
start_seq: str = None,
N: int = 4,
prob: float = 0.001,
output_str: bool = True
) -> Union[List[str], str]:
"""
Text generator using Thai2fit
:param str start_seq: word for begin word.
:param int N: number of word.
:param bool output_str: output is str
:param bool duplicate: duplicate word in sent
:return: list words or str words
:rtype: List[str], str
:Example:
::
from pythainlp.generate.thai2fit import gen_sentence
gen_sentence()
# output: 'แคทรียา อิงลิช (นักแสดง'
gen_sentence("แมว")
# output: 'แมว คุณหลวง '
"""
if start_seq is None:
start_seq = random.choice(list(thwiki_itos))
list_word = learn.predict(
start_seq,
N,
temperature=0.8,
min_p=prob,
sep='-*-'
).split('-*-')
if output_str:
return ''.join(list_word)
return list_word | 800b498498396a4cda84885481b09df689f541aa | 345 |
def GetBoolValueFromString(s):
"""Returns True for true/1 strings, and False for false/0, None otherwise."""
if s and s.lower() == 'true' or s == '1':
return True
elif s and s.lower() == 'false' or s == '0':
return False
else:
return None | d6ef53e837fc825a32e073e3a86185093dd1d037 | 346 |
def genomic_del6_abs_37(genomic_del6_37_loc):
"""Create test fixture absolute copy number variation"""
return {
"type": "AbsoluteCopyNumber",
"_id": "ga4gh:VAC.60XjT6dzYKX8rn6ocG4AVAxCoUFfdjI6",
"subject": genomic_del6_37_loc,
"copies": {"type": "Number", "value": 1}
} | 64d8bb95768587adef71c8a98111e0454dfdbb93 | 347 |
def get_typical_qualifications(cfg):
"""
create qualification list to filter just workers with:
- + 98% approval rate
- + 500 or more accepted HIT
- Location USA
:param cfg:
:return:
"""
if not cfg['hit_type'].getboolean('apply_qualification'):
return []
qualification_requirements=[
{
# Worker_NumberHITsApproved
'QualificationTypeId': '00000000000000000040',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues': [
500,
],
'RequiredToPreview': False,
'ActionsGuarded': 'Accept'
}, {
# Worker_PercentAssignmentsApproved
'QualificationTypeId': '000000000000000000L0',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues': [
98,
],
'RequiredToPreview': False,
'ActionsGuarded': 'Accept'
}, {
# Worker_Locale
'QualificationTypeId': '00000000000000000071',
'Comparator': 'EqualTo',
'LocaleValues': [
{
'Country':"US"
}
],
'RequiredToPreview': False,
'ActionsGuarded': 'Accept'
},
]
return qualification_requirements | 4cfad92d7c2587e2fce1caeac032a69f87c70c01 | 348 |
from typing import Callable
from typing import Tuple
from typing import Type
def _gen_codegen_dev_to_reg(
nybble: int,
) -> Callable[[Context, Op], Tuple[Context, Op]]:
"""'Code generator generator' for device-to-register instructions."""
def codegen_dev_to_reg(context: Context, op: Op) -> Tuple[Context, Op]:
op = op._replace(args=parse_args_if_able(
_PARSE_OPTIONS, context, op, Type.REGISTER, Type.ADDRESS))
if all_args_parsed(op.args):
_regcheck(op.args[0])
_devcheck(op.args[1])
digits = (nybble, op.args[0].integer, op.args[1].integer)
op = op._replace(todo=None, hex='{:X}{:X}{:X}F'.format(*digits))
# We can still update pos whether we've parsed all args or not.
return context.advance_by_bytes(2), op
return codegen_dev_to_reg | 96a1a7dd3d1e9fb69b735ba7e2034ea0c612f6bb | 349 |
def _shell_wrap_inner(command, shell=True, sudo_prefix=None):
"""
Conditionally wrap given command in env.shell (while honoring sudo.)
(Modified from fabric.operations._shell_wrap to avoid double escaping,
as the wrapping host command would also get shell escaped.)
"""
# Honor env.shell, while allowing the 'shell' kwarg to override it (at
# least in terms of turning it off.)
if shell and not env.use_shell:
shell = False
# Sudo plus space, or empty string
if sudo_prefix is None:
sudo_prefix = ""
else:
sudo_prefix += " "
# If we're shell wrapping, prefix shell and space, escape the command and
# then quote it. Otherwise, empty string.
if shell:
shell = env.shell + " "
command = '"%s"' % command # !! removed _shell_escape() here
else:
shell = ""
# Resulting string should now have correct formatting
return sudo_prefix + shell + command | 6a1a185262e312aac193b70babf9c4b8c1fc2c73 | 350 |
from typing import List
import time
def events_until(events: List[ScheduleEvent],
until: time, *, after: time = None) \
-> List[ScheduleEvent]:
"""
Return events up to and including the given time.
Keyword arguments:
after -- if specified, only events after this time will be included.
"""
if after is not None:
events = events_after(events, after)
return [event for event in events if event[0] <= until] | 8d7390c684fb5590ad1fbdaa0680b3aff7474c56 | 351 |
import socket
def get_ip():
"""
Get local ip from socket connection
:return: IP Addr string
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('bing.com', 80))
return s.getsockname()[0] | 9afb9cfc5721ea7a89764579bd878a9b51361af2 | 352 |
import unicodedata
def shave_marks(txt):
"""去掉全部变音符号"""
norm_txt = unicodedata.normalize('NFD', txt) # 把所有的字符分解为基字符和组合记号
shaved = ''.join(c for c in norm_txt
if not unicodedata.combining(c)) # 过滤掉所有的组合记号
return unicodedata.normalize('NFC', shaved) | 0b8e15c72854a5bca7b12f6452292d7472bbf1bc | 353 |
def _kld_gamma(p_data, q_data):
"""
Computes the Kullback-Leibler divergence between two gamma PDFs
Parameters
----------
p_data: np.array
Data of the first process
q_data: np.array
Data of the first process
Returns
-------
r_kld_gamma: numeric
Kullback-Leibler Divergence Quantity
References
----------
[1] Bauckhage, Christian. (2014). Computing the Kullback-Leibler Divergence between two Generalized Gamma Distributions. arXiv. 1401.6853.
"""
# -------------------------------------------------------------------------- Distribution Parameters -- #
def _gamma_params(data, method='MoM'):
"""
Computes the parameters of a gamma probability density function (pdf), according to the selected
method.
Parameters
----------
data: np.array
The data with which will be adjusted the pdf
method: str
Method to calculate the value of the parameters for the pdf
'MoM': Method of Moments (Default)
Returns
-------
r_params: dict
{'alpha': gamma distribution paramerter, 'beta': gamma distribution parameter}
"""
# -- Methods of Moments -- #
if method == 'MoM':
# first two moments
mean = np.mean(data)
variance = np.var(data)
# sometimes refered in literature as k
alpha = mean**2/variance
# sometimes refered in literature as 1/theta
beta = mean/variance
# return the gamma distribution empirically adjusted parameters
return alpha, beta
# -- For errors or other unsupported methods
else:
raise ValueError("Currently, the supported methods are: 'MoM'")
# alpha_1: Distribution 1: shape parameter, alpha_1 > 0
# beta_1: Distribution 1: rate or inverse scale distribution parameter, beta_1 > 0
alpha_1, beta_1 = _gamma_params(data=p_data)
# alpha_2: Distribution 2: shape parameter, alpha_2 > 0
# beta_2: Distribution 2: rate or inverse scale parameter, beta_2 > 0
alpha_2, beta_2 = _gamma_params(data=q_data)
# Expression with beta instead of theta
theta_1 = 1/beta_1
theta_2 = 1/beta_2
p1, p2 = 1, 1 # Generalized Gamma Distribution with p=1 is a gamma distribution [1]
# Calculations, see [1] for mathematical details.
a = p1*(theta_2**alpha_2)*sps.gamma(alpha_2/p2)
b = p2*(theta_1**alpha_1)*sps.gamma(alpha_1/p1)
c = (((sps.digamma(alpha_1/p1))/p1) + np.log(theta_1))*(alpha_1 - alpha_2)
# Bi-gamma functions
d = sps.gamma((alpha_1+p2)/p1)
e = sps.gamma((alpha_1/p1))
# Calculations
f = (theta_1/theta_2)**(p2)
g = alpha_1/p1
# General calculation and output
r_kld = np.log(a/b) + c + (d/e)*f - g
# Final Kullback-Leibler Divergence for Empirically Adjusted Gamma PDFs
return r_kld | c20fd6764299300dc555bca356d9942e98d38214 | 354 |
def interpolate_rat(nodes, values, use_mp=False):
"""Compute a rational function which interpolates the given nodes/values.
Args:
nodes (array): the interpolation nodes; must have odd length and
be passed in strictly increasing or decreasing order
values (array): the values at the interpolation nodes
use_mp (bool): whether to use ``mpmath`` for extended precision. Is
automatically enabled if `nodes` or `values` use ``mpmath``.
Returns:
BarycentricRational: the rational interpolant. If there are `2n + 1` nodes,
both the numerator and denominator have degree at most `n`.
References:
https://doi.org/10.1109/LSP.2007.913583
"""
# ref: (Knockaert 2008), doi:10.1109/LSP.2007.913583
# see also: (Ionita 2013), PhD thesis, Rice U
values = np.asanyarray(values)
nodes = np.asanyarray(nodes)
n = len(values) // 2 + 1
m = n - 1
if not len(values) == n + m or not len(nodes) == n + m:
raise ValueError('number of nodes should be odd')
xa, xb = nodes[0::2], nodes[1::2]
va, vb = values[0::2], values[1::2]
# compute the Loewner matrix
B = (vb[:, None] - va[None, :]) / (xb[:, None] - xa[None, :])
# choose a weight vector in the nullspace of B
weights = _nullspace_vector(B, use_mp=use_mp)
return BarycentricRational(xa, va, weights) | cdc98a0a04a6d35fb409fb4235dab759c1f96c1c | 355 |
import re
def binder_update_page_range(payload):
"""Parser for `binder_update_page_range`"""
try:
match = re.match(binder_update_page_range_pattern, payload)
if match:
match_group_dict = match.groupdict()
return BinderUpdatePageRange(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)))
except Exception as e:
raise ParserError(e.message) | 82f3bc931bebc5816b38feb75f918779fa271840 | 356 |
import re
def generate_breadcrumb(url: str, separator: str) -> str:
"""
Fungsi yang menerima input berupa string url dan separator
dan mengembalikan string yang berisi navigasi breadcrumb.
Halaman Wikipedia tentang navigasi breadcrumb:
https://en.wikipedia.org/wiki/Breadcrumb_navigation
Contoh:
>>> generate_breadcrumb("youtube.com", " > ")
'<span class="active">HOME</span>'
>>> generate_breadcrumb("https://github.com/harmonify/index.html", " > ")
'<a href="/">HOME</a> > <span class="active">HARMONIFY</span>'
>>> generate_breadcrumb("facebook.com/sebuah-slug-yang-panjang-sekali", " / ")
'<a href="/">HOME</a> / <span class="active">SSYPS</span>'
"""
# inisialisasi variabel untuk menampung hasil
result = []
# ambil path dari url
path = parse_path(url)
# filter akhiran index.* dari path
path = re.sub(r"index\.?.*$", "", path)
# split path menjadi list
pathList = path.split("/")
if pathList[-1] == "":
pathList.pop()
# generate tag anchor dari awal sampai dengan
# elemen kedua terakhir dari pathList
for i in range(len(pathList[:-1])):
url = "/".join(pathList[: i + 1])
desc = generate_description(pathList[i])
anchor = generate_anchor_tag(url, desc)
result.append(anchor)
# generate tag span dengan elemen terakhir dari pathList
span = generate_span_tag(generate_description(pathList[-1]))
result.append(span)
# return hasil join tag anchor dengan separator
return separator.join(result) | 8fc4e84ba68a5ff0d359ded9c70aef9ebec89b32 | 357 |
import time
import os
def get_work_path():
"""
获取工作目录,若不存在并创建
:return: work_path str
"""
work_dir = config.WORK_DIR
work_path = f'{work_dir}/{time.strftime("%Y%m%d%H%M%S")}'
print(f'work path: {work_path}')
if not os.path.exists(work_path):
os.makedirs(work_path)
return work_path | a5c32034b5d2d65de5a15d1add2004af3692d747 | 358 |
from pathlib import Path
def extract_all_sentences(dataset_path, features_outfile=None):
""" Extract features from sentences using pretrained universal sentence embeddings and save them in a pickle file
:param dataset_path: the path of the dataset to use
:param features_outfile: file used to store the extracted features
:return: extracted embeddings
"""
model_path = Path(__file__).parent.parent.parent / "data" / "models" / "use"
use = hub.load(str(model_path.absolute()))
feature_extractor = TextFeatureExtractor(use)
return feature_extractor.extract_all_features(dataset_path, features_outfile) | 259ef284310c8bc2a52aa201ec17522e4b00b6d1 | 359 |
import os
import yaml
def _load_yaml(blueoil_config_filename):
"""load blueoil config yaml
Args:
blueoil_config_filename(str): File path of blueoil config yaml file.
Returns:
blueoil_config(dict): dict of blueoil config.
"""
if not os.path.exists(blueoil_config_filename):
FileNotFoundError("File not found: {}".format(blueoil_config_filename))
with open(blueoil_config_filename, "r") as f:
blueoil_config = yaml.load(f)
model_name, _ = os.path.splitext(os.path.basename(blueoil_config_filename))
blueoil_config["model_name"] = model_name
return blueoil_config | 83d259059b734a280884496f22975d67ef3c3e06 | 360 |
def bat_activity(request):
""" """
# TODO:
wave_file_name = 'WURB-2_20160908T220024+0200_N57.6627E12.6393_TE-384.wav'
# Pandas data frame
peak_df = None
try:
# Prod:
peak_df = pd.read_csv('/srv/django/cloudedbats/src/test_data/peak_file.txt',
sep="\t")
except:
# Dev:
# peak_df = pd.read_csv('cloudedbats_django/cloudedbats_django/test_data/peak_file.txt',
# sep="\t")
peak_df = pd.read_csv('test_data/peak_file.txt',
sep="\t")
peak_df['time_s'] = peak_df.time/1000
peak_df['amplitude_log'] = np.log(peak_df.amplitude + 2) * 3 #* 10
# Bokeh data source.
ds = ColumnDataSource(peak_df)
#
### TOOLS="pan, box_zoom, wheel_zoom, undo, redo, reset, hover, resize, save"
TOOLS="pan, box_zoom, wheel_zoom, undo, redo, reset, hover, save"
# MORE_TOOLS="crosshair, tap,box_select, poly_select, lasso_select, tap"
p = figure(tools=TOOLS, toolbar_location="above")
# p = figure(tools=TOOLS, toolbar_location="above", active_drag="box_zoom")
# p.title.text="WURB-2_20160908T220024+0200_N57.6627E12.6393_TE-384"
p.plot_width = 700 # 1800
p.plot_height = 300
#
s = p.scatter(source = ds, x='time_s', y='frequency',
marker='circle',
size='amplitude_log',
line_color="navy", fill_color="red", alpha=0.5,
)
p.xaxis.axis_label="Time (sec)"
p.yaxis.axis_label="Peak frequency (kHz)"
p.x_range = Range1d(0, 300, bounds=(0, 300))
p.y_range = Range1d(0, 100, bounds=(0, 150))
#
hover = p.select_one(HoverTool)
hover.point_policy = "follow_mouse"
hover.tooltips = [
("Frequency (kHz)", "@frequency"),
("Amplitude", "@amplitude"),
("Time (sec.)", "@time_s")]
#
script, div = components(p)
#
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
#
return render(request, "cloudedbats_bat_activity.html",
{
'wave_file_name': wave_file_name,
'js_resources': js_resources,
'css_resources': css_resources,
'plot_script': script,
'plot_div': div}
) | dd64970f03b6ad90be07d9887e25b66703081126 | 361 |
def search_sorted(array, value):
"""
Searches the given sorted array for the given value using a
BinarySearch which should execute in O(log N).
array a 1D sorted numerical array
value the numerical value to search for
returns index of array closest to value
returns None if value is outside variable bounds
"""
def index_to_check(rmin, rmax):
return (rmin + rmax) / 2
range_min = 0
range_max_0 = len(array)
range_max = range_max_0
numloops = 0
while numloops < 100:
numloops += 1
if (range_max - range_min) == 1:
if (range_max == range_max_0) or (range_min == 0):
raise LookupError("For some reason, range_max-" +\
"range_min reached 1 before " +\
"the element was found. The " +\
"element being searched for " +\
("was %s. (min,max)" % (value,) +\
("=%s" % ((range_min, range_max),))))
else:
high_index = range_max
else:
high_index = index_to_check(range_min, range_max)
high_val = array[high_index]
low_val = array[high_index - 1]
if value < low_val:
range_max = high_index
elif value > high_val:
range_min = high_index
else: # low_val <= value <= high_val
if (2 * (high_val - value)) < (high_val - low_val):
return high_index
else:
return high_index - 1
raise NotImplementedError("Something went wrong! I " +\
"caught a pseudo-infinite loop!") | 6eec5fb24cd2da1989b4b80260ce185191d782f1 | 362 |
from typing import Dict
import attr
def to_doc(d: DatasetDoc) -> Dict:
"""
Serialise a DatasetDoc to a dict
If you plan to write this out as a yaml file on disk, you're
better off with `to_formatted_doc()`.
"""
doc = attr.asdict(
d,
recurse=True,
dict_factory=dict,
# Exclude fields that are the default.
filter=lambda attr, value: "doc_exclude" not in attr.metadata
and value != attr.default
# Exclude any fields set to None. The distinction should never matter in our docs.
and value is not None,
retain_collection_types=False,
)
doc["$schema"] = ODC_DATASET_SCHEMA_URL
if d.geometry is not None:
doc["geometry"] = shapely.geometry.mapping(d.geometry)
doc["id"] = str(d.id)
doc["properties"] = dict(d.properties)
return doc | 83a3ca0838074e000238765c34067e8086e4a2ab | 363 |
def annealing_exp(start, end, pct):
"""Exponentially anneal from start to end as pct goes from 0.0 to 1.0."""
return start * (end / start) ** pct | 4517b07ad7d065a1ba8d4f963c688677846640e3 | 364 |
def _compile_theano_function(param, vars, givens=None):
"""Compile theano function for a given parameter and input variables.
This function is memoized to avoid repeating costly theano compilations
when repeatedly drawing values, which is done when generating posterior
predictive samples.
Parameters
----------
param : Model variable from which to draw value
vars : Children variables of `param`
givens : Variables to be replaced in the Theano graph
Returns
-------
A compiled theano function that takes the values of `vars` as input
positional args
"""
return function(vars, param, givens=givens,
rebuild_strict=True,
on_unused_input='ignore',
allow_input_downcast=True) | bed6879a63beebe3af8eaabf654e5617e550e971 | 365 |
def redirect(url):
"""Create a response object representing redirection.
:param url: a URL
:return: a Response
"""
headers = {
"Location": url,
}
return Response(headers=headers, code=HTTPStatus.FOUND) | 13a61d5854fd5ef50ce51e38a0dc38af282a5693 | 366 |
import json
def remove_ordereddict(data, dangerous=True):
"""turns a nested OrderedDict dict into a regular dictionary.
dangerous=True will replace unserializable values with the string '[unserializable]' """
# so nasty.
return json.loads(json_dumps(data, dangerous)) | f5ca4db424c721a5e9015e77cd727f71b3912699 | 367 |
from typing import List
def evaluate_v1(tokens: List[str]) -> Number:
"""Evaluates a tokenized expression and returns the result"""
stack: List = []
for token in tokens:
stack = consume_token(token, stack)
return get_result_from_stack(stack) | 1507baf55f427096b12690d76854d0189ec1571e | 368 |
def load_gromacs_reaction_coord_files(us_path, n_wins, step=10, verbose=False):
"""
Parameters
----------
us_path: string
Path to the xvg files with sampled reaction coordinate values
n_wins: integer
Number of umbrella runs
step: integer
Time interval for analysis
verbose: Boolean
Verbosity
Outputs
-------
us_pull_l: list
list of reaction coordinates values sampled in the umbrella runs
"""
us_pull_l = []
bar = pyprind.ProgBar(n_wins, update_interval=15)
for win_i in (range(1, n_wins+1)):
if verbose:
print(win_i)
us_pull_l.append(
np.loadtxt(us_path.format(win_i), skiprows=17)[::step])
bar.update(force_flush=False)
return us_pull_l | 47a304592306b142b96638f40410685ce31e0482 | 369 |
from typing import Dict
def h_customer_role_playing(
process_configuration: Dict[str, str], h_customer: Hub, staging_table: StagingTable
) -> RolePlayingHub:
"""Define h_customer_role_playing test hub.
Args:
process_configuration: Process configuration fixture value.
h_customer: Hub customer fixture value.
staging_table: Staging table fixture value.
Returns:
Deserialized role playing hub h_customer_role_playing.
"""
h_customer_role_playing_fields = [
Field(
parent_table_name="h_customer_role_playing",
name="h_customer_role_playing_hashkey",
data_type=FieldDataType.TEXT,
position=1,
is_mandatory=True,
length=32,
),
Field(
parent_table_name="h_customer_role_playing",
name="r_timestamp",
data_type=FieldDataType.TIMESTAMP_NTZ,
position=2,
is_mandatory=True,
),
Field(
parent_table_name="h_customer_role_playing",
name="r_source",
data_type=FieldDataType.TEXT,
position=3,
is_mandatory=True,
),
Field(
parent_table_name="h_customer_role_playing",
name="customer_role_playing_id",
data_type=FieldDataType.TEXT,
position=4,
is_mandatory=True,
),
]
h_customer_role_playing = RolePlayingHub(
schema=process_configuration["target_schema"],
name="h_customer_role_playing",
fields=h_customer_role_playing_fields,
)
h_customer_role_playing.parent_table = h_customer
h_customer_role_playing.staging_table = staging_table
return h_customer_role_playing | f8f6fadc9dad8c637fbf173a2d10378f087954f6 | 370 |
import os
import subprocess
def call_port(command, arguments):
"""
This function calls the port executable with the specified parameters,
printing the output to stdout.
"""
command = ["port", command] + arguments
if (os.getuid != 0):
print("Using sudo to execute port.")
return subprocess.call(["sudo"] + command)
else:
return subprocess.call(command) | b5209388a03093758b680220600dd99749be5c81 | 371 |
def _classify(text:str, name:str=None, service:str=None, language:Language=None):
"""Takes the input text (and optional filename) and makes a best effort to extract/label the code content needed for classification.
E.g. a markdown file has codeblocks extracted and labeled with language, and a code file is extracted in its entirety and labeled accordingly."""
targeted_content = []
# First let's extract what metadata we can, as well as target our classification to important bits (code)
if is_markdown(text, name):
# Try to extract code blocks.
targeted_content += _extract_and_label_codefences(text)
# TODO: May want to refine this (e.g. don't run code-specific models on non-code)
# If none, or if code blocks don't do anything, fall back to treating whole thing as text.
# if not targeted_content:
# targeted_content.append((text, language, service))
# Treat as code as long as it's one of the languages we expect to deal with
elif is_code(text, name):
targeted_content.append((text, language or is_code(text, name), service))
# We also want to handle yaml, but we don't do anything special with that.
elif is_yaml(text, name):
targeted_content.append((text, language, service)) #TODO: Might want to do something custom for yaml in the future.
# otherwise short circuit out. ( e.g. json, etc)
else:
# Maybe should treat it as raw text, parse whole thing?
# TODO: figure this out.
targeted_content.append((text, language, service))
# TODO: If sdk/language aren't specified, try to determine them.
# If we know what they are with high confidence, use the targeted model, otherwise use a generic model. (Maybe run both anyhow and make sure they agree or mosaic)
return targeted_content | 7f146b8b0c9b041e681c4cb2d5ef5a3b175c7eda | 372 |
import argparse
import os
def parse_command_line_arguments():
"""
Parse the command-line arguments being passed to RMG Py. This uses the
:mod:`argparse` module, which ensures that the command-line arguments are
sensible, parses them, and returns them.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', metavar='FILE',
help='a predictor training input file')
parser.add_argument('-w', '--weights', metavar='H5',
help='Saved model weights to continue training on (typically for transfer learning)')
parser.add_argument('-d', '--data', metavar='FILE',
help='A file specifying which datasets to train on. Alternatively, a space-separated .csv file'
' with SMILES/InChI and output(s) in the first and subsequent columns, respectively.')
parser.add_argument('-o', '--out_dir', metavar='DIR', default=os.getcwd(),
help='Output directory')
parser.add_argument('-n', '--normalize', action='store_true',
help='Normalize output based on training set mean and standard deviation')
parser.add_argument('--save_tensors_dir', metavar='DIR',
help='Location to save tensors on disk (frees up memory)')
parser.add_argument('--keep_tensors', action='store_true',
help='Do not delete directory containing tensors at end of job')
parser.add_argument('-f', '--folds', type=int, default=5,
help='number of folds for training')
parser.add_argument('-tr', '--train_ratio', type=float, default=0.9,
help='Fraction of training data to use for actual training, rest is early-stopping validation')
parser.add_argument('-te', '--test_ratio', type=float, default=0.0,
help='Fraction of data to use for testing. If loading data from database,'
' test ratios are specified in datasets file')
parser.add_argument('-t', '--train_mode', default='full_train',
help='train mode: currently support in_house and keras for k-fold cross-validation,'
' and full_train for full training')
parser.add_argument('-bs', '--batch_size', type=int, default=1,
help='batch training size')
parser.add_argument('-lr', '--learning_rate', default='0.0007_30.0',
help='two parameters for learning rate')
parser.add_argument('-ep', '--nb_epoch', type=int, default=150,
help='number of epochs for training')
parser.add_argument('-pc', '--patience', type=int, default=10,
help='Number of consecutive epochs allowed for loss increase before stopping early.'
' Note: A value of -1 indicates that the best model will NOT be saved!')
parser.add_argument('-s', '--seed', type=int, default=0,
help='Numpy random seed')
return parser.parse_args() | 49b0beb15f6f8b2bba903e15dcb0bf43d79ac11e | 373 |
def _js_requires(offline: bool = False) -> str:
"""Format JS requires for Plotly dependency.
Args:
offline: if True, inject entire Plotly library for offline use.
Returns:
str: <script> block with Plotly dependency.
"""
helper_fxns = _load_js_resource(_AxPlotJSResources.HELPER_FXNS)
if offline:
script = Template(_load_js_resource(_AxPlotJSResources.PLOTLY_OFFLINE)).render(
library=plotly_offline.offline.get_plotlyjs()
)
else:
script = _load_js_resource(_AxPlotJSResources.PLOTLY_ONLINE)
return script + helper_fxns | cc5bf8b5c6a840b9905008c0986f5b7ef65d6942 | 374 |
def resnet_retinanet(num_classes, backbone='resnet50', inputs=None, modifier=None, **kwargs):
""" Constructs a retinanet model using a resnet backbone.
Args
num_classes: Number of classes to predict.
backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to
freeze backbone layers for example).
Returns
RetinaNet model with a ResNet backbone.
"""
# choose default input
if inputs is None:
inputs = keras.layers.Input(shape=(None, None, 3))
# create the resnet backbone
if backbone == 'resnet50':
resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True)
elif backbone == 'resnet101':
resnet = keras_resnet.models.ResNet101(inputs, include_top=False, freeze_bn=True)
elif backbone == 'resnet152':
resnet = keras_resnet.models.ResNet152(inputs, include_top=False, freeze_bn=True)
else:
raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))
# invoke modifier if given
if modifier:
resnet = modifier(resnet)
# create the full model
return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=resnet.outputs[1:], **kwargs) | 9e44d811cc0da8e7810731379259f8921483e907 | 375 |
def trans_full_matrix_projection(input, size=0, param_attr=None):
"""
Different from full_matrix_projection, this projection performs matrix
multiplication, using transpose of weight.
.. math::
out.row[i] += in.row[i] * w^\mathrm{T}
:math:`w^\mathrm{T}` means transpose of weight.
The simply usage is:
.. code-block:: python
proj = trans_full_matrix_projection(input=layer,
size=100,
param_attr=ParamAttr(
name='_proj',
initial_mean=0.0,
initial_std=0.01))
:param input: input layer
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:return: A TransposedFullMatrixProjection Object.
:rtype: TransposedFullMatrixProjection
"""
proj = TransposedFullMatrixProjection(
input_layer_name=input.name, size=size, **param_attr.attr)
proj.origin = input
return proj | c8c69a01bf311d449ec6b9af68d61fe917073c75 | 376 |
def http_head_deck_etag(gist_url):
"""Perform a HEAD against gist_url and return the etag."""
class HeadRequest(Request):
def get_method(self):
return 'HEAD'
head_request = HeadRequest(gist_url + '/raw')
response = urlopen(head_request)
headers = response.headers
etag = headers['etag']
return etag | b5f4d4ebb80ec95059562c500edc8fc3a4040064 | 377 |
def _get_fluxes(sol, reactions):
"""Get the primal values for a set of variables."""
fluxes = {
r.id: sol.fluxes.loc[r.community_id, r.global_id] for r in reactions
}
return pd.Series(fluxes) | da5ff0af1a3072baca70ac338ee29a7ea91606ac | 378 |
def compatible_elfs(elf1, elf2):
"""See if two ELFs are compatible
This compares the aspects of the ELF to see if they're compatible:
bit size, endianness, machine type, and operating system.
Parameters
----------
elf1 : ELFFile
elf2 : ELFFile
Returns
-------
True if compatible, False otherwise
"""
osabis = frozenset([e.header['e_ident']['EI_OSABI'] for e in (elf1, elf2)])
compat_sets = (frozenset('ELFOSABI_%s' % x
for x in ('NONE',
'SYSV',
'GNU',
'LINUX', )), )
return ((len(osabis) == 1 or
any(osabis.issubset(x)
for x in compat_sets)) and elf1.elfclass == elf2.elfclass and
elf1.little_endian == elf2.little_endian and
elf1.header['e_machine'] == elf2.header['e_machine']) | 808c52de45e96d177429ebe1339f4a97c2c219d0 | 379 |
import tempfile
import os
import atexit
import importlib
import sys
def load_source(source, delete_on_exit):
"""Loads the given source code as a Python module."""
with tempfile.NamedTemporaryFile(
mode='w',
suffix='.py',
prefix='__autograph_generated_file',
delete=False,
encoding='utf-8') as f:
module_name = os.path.basename(f.name[:-3])
file_name = f.name
f.write(source)
if delete_on_exit:
atexit.register(lambda: _remove_file(file_name))
spec = importlib.util.spec_from_file_location(module_name, file_name)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# TODO(mdan): Use our own garbage-collected cache instead of sys.modules.
sys.modules[module_name] = module
return module, file_name | 4881ed3722d8962fd45c80c0fed9e9d75ed7746a | 380 |
def _tear_down_response(data):
"""Helper function to extract header, payload and end from received response
data."""
response_header = data[2:17]
# Below is actually not used
response_payload_size = data[18]
response_payload = data[19:-2]
response_end = data[-2:]
return response_header, response_payload, response_end | 0c9684c2c054beaff018f85a6775d46202d0095a | 381 |
import pymysql
def read_data_from_bd(query,
host,
user,
port,
database,
password):
""" get data from abc database
arg:
query: sql
username: database username
password: database password
return:
df: dataframe
"""
connection = pymysql.connect(host=host,
user=user,
port=port,
db=database,
password=password)
df = pd.read_sql(query, connection)
return df | a7ff96f9bda9b71bced1baaceeeb7c7797839bb4 | 382 |
def stack_atomic_call_middleware(q_dict, q_queryset, logger, middleware):
""" Calls the middleware function atomically.
* Returns cached queue on error or None """
cached_q_dict = q_dict[:]
cached_q_query = q_queryset.all()
try:
middleware(q_dict, q_queryset, logger)
except:
logger.error('MM_STACK: Middleware exception occurred in %s' % middleware.__name__)
return [cached_q_dict, cached_q_query]
return None | 9d01c51e19702ba4bc0ae155f0b9b386a4d947b6 | 383 |
def collate_with_neg_fn(generator):
"""Collate a list of datapoints into a batch, with negative samples in last half of batch."""
users, items, item_attr, num_attr = collate_fn(generator)
users[len(users) // 2:] = users[:len(users) // 2]
return users, items, item_attr, num_attr | 189104ba993e522a1a5f7b40dfcaa06b25e69966 | 384 |
def build_scenario_3(FW, verbosity=None):
"""
Tests if override is cleared when all switch behaviours go out of scope.
And tests switch command with opaque value.
Returns a list of 2-lists: [time, 0ary function] that describes exactly
what needs to be executed when. The 0ary functions return a falsey value
when it succeeded, and a string describing what went wrong else.
"""
def setup_scenario_3():
sendBehaviour(0, buildTwilight(9, 14, 80))
sendBehaviour(1, buildSwitchBehaviour(9, 12, 70))
scenario = TestScenario(FW, "scenario 3")
add_common_setup(scenario)
scenario.addEvent(setup_scenario_3)
if verbosity is not None:
scenario.setVerbosity(verbosity)
# behaviours both become active
scenario.setTime(9, 0)
scenario.addExpect("SwitchAggregator", "overrideState", "-1", "overridestate should've been set to translucent")
scenario.addExpect("SwitchAggregator", "aggregatedState", "70", "aggregatedState should be equal to minimum of active behaviour and twilight")
# switch command occurs
scenario.setTime(10, 0)
scenario.addEvent(bind(sendSwitchCommand, 50))
scenario.addExpect("SwitchAggregator", "overrideState", "50", "overridestate should've been set to translucent")
scenario.setTime(10, 0)
scenario.addExpect("SwitchAggregator", "aggregatedState", "50", "aggregatedState should be equal to override state when it is opaque")
# all behaviours become inactive
scenario.setTime(12, 0)
scenario.addExpect("SwitchAggregator", "overrideState", "-1", "overridestate should've been cleared when it is non-zero and all switch behaviours become inactive")
scenario.addExpect("SwitchAggregator", "aggregatedState", "0", "aggregatedState should be equal to 0 when no override state or switch behaviours are active")
return scenario | a676d7997affcb92ea19dc007a8c38eebc919af3 | 385 |
def read(input):
"""Read an entire zonefile, returning an AST for it which contains formatting information."""
return _parse(input, actions=Actions()) | 46cbef9f1b5f85705166ec0527f60e8346157955 | 386 |
def generate_conditionally(text='welcome', random_seed=1, **kwargs):
"""
Input:
text - str
random_seed - integer
Output:
stroke - numpy 2D-array (T x 3)
"""
model = ConditionalStrokeModel.load(
str(MODEL_DIR / 'conditional-stroke-model'),
batch_size=1, rnn_steps=1, is_train=False, char_seq_len=len(text) + 1)
return conditional_decode(model, seed=random_seed, text=text, **kwargs) | e58a06fc620a71e6ff0704bfe5cae3693ef5f758 | 387 |
import torch
def cross_entropy(pred, soft_targets):
""" pred: unscaled logits
soft_targets: target-distributions (i.e., sum to 1)
"""
logsoftmax = nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-soft_targets * logsoftmax(pred), 1)) | 1a81e36a9839600bd621ec0e3bb0da1d5fca0c0a | 388 |
def test_separations():
"""Test if sky separations are the same in all spherical coordinate systems.
This is a simple consistency check.
Sky separations computed between consecutive event positions should
be the same in any spherical coordinate system.
"""
table = Table.read('hess_event_list_2.fits')
def separation(table, lon_colname, lat_colname):
lon = np.array(table[lon_colname], dtype=np.float64)
lat = np.array(table[lat_colname], dtype=np.float64)
pos1 = SkyCoord(lon[:1], lat[:1], unit='deg')
pos2 = SkyCoord(lon[1:], lat[1:], unit='deg')
sep = pos1.separation(pos2).arcsec
res = np.empty(len(table), dtype=np.float64)
res[:-1] = sep
res[-1] = np.nan
return res
table['SEP_RADEC'] = separation(table, 'RA', 'DEC')
table['SEP_RADEC_FOV'] = separation(table, 'FOV_RADEC_LON', 'FOV_RADEC_LAT')
table['SEP_RADEC_FOV_MINUS_SEP_RADEC'] = table['SEP_RADEC_FOV'] - table['SEP_RADEC']
print('Max separation difference RADEC_FOV to RADEC: {} arcsec'.format(np.nanmax(table['SEP_RADEC_FOV_MINUS_SEP_RADEC'])))
# TODO: this currently gives 14.9 arcsec, i.e. there's an issue!
table['SEP_RADEC_FOV_ASTROPY'] = separation(table, 'FOV_RADEC_LON_ASTROPY', 'FOV_RADEC_LAT_ASTROPY')
table['SEP_RADEC_FOV_ASTROPY_MINUS_SEP_RADEC'] = table['SEP_RADEC_FOV_ASTROPY'] - table['SEP_RADEC']
print('Max separation difference RADEC_FOV_ASTROPY to RADEC: {} arcsec'.format(np.nanmax(table['SEP_RADEC_FOV_ASTROPY_MINUS_SEP_RADEC'])))
# 0.02 arcsec => OK
# Note: for ALTAZ this is not expected to match RADEC, because the earth is rotating between events.
# table['SEP_ALTAZ'] = separation(table, 'AZ', 'ALT')
# table['SEP_RADEC_MINUS_SEP_ALTAZ'] = table['SEP_RADEC'] - table['SEP_ALTAZ']
# print('Max separation difference RADEC to ALTAZ: {}'.format(np.nanmax(table['SEP_RADEC_MINUS_SEP_ALTAZ'])))
# table.info('stats')
# table.write('temp.fits', overwrite=True) | f22efca62d5cd7beaecfbb46c6bef3ae12158aec | 389 |
import json
def config(path) -> None:
"""Read the default config"""
logger.debug("Reading config from %s", path)
try:
with open(path, encoding="utf-8") as config_file_object:
# Read into dict
config_json = json.load(config_file_object)
logger.info("Loaded config into dict")
except FileNotFoundError:
logger.critical("Unable to find file %s", path)
stager.utils.dialog.error(
"Config file not found",
f"The config file {path} was not found",
)
return False
except json.JSONDecodeError as exception_:
logger.critical("Unable to parse %s: %s", path, exception_.msg)
stager.utils.dialog.error("Unable to parse config", exception_.msg)
return False
# Set defaults for config if not present
# Validate the config
config_json_keys = config_json.keys()
for req in REQUIRED:
if req not in config_json_keys:
logger.critical("Missing required key %s in config", req)
stager.utils.dialog.error(
"Config invalid", f"Missing required key {req} in config"
)
return False
# Fill in optional fields with a default
for opt in OPTIONAL_DEFAULTS: # TODO move to items
if opt not in config_json_keys:
# Add to the config json
config_json[opt] = OPTIONAL_DEFAULTS[opt]["default_value"]
# Reload prefs namespace
config_ns = convert_to_namespace(json.dumps(config_json))
stager.utils.CONFIG = config_ns
return True | 34a4accedea3bf09e60cd4116ce7bff858f63b03 | 390 |
def get_credentials(_globals: dict):
"""
Gets Credentials from Globals
Structure may be found in modules/ducktests/tests/checks/utils/check_get_credentials.py
This function return default username and password, defaults may be overriden throw globals
"""
if USERNAME_KEY in _globals[AUTHENTICATION_KEY] and PASSWORD_KEY in _globals[AUTHENTICATION_KEY]:
return _globals[AUTHENTICATION_KEY][USERNAME_KEY], _globals[AUTHENTICATION_KEY][PASSWORD_KEY]
return DEFAULT_AUTH_USERNAME, DEFAULT_AUTH_PASSWORD | 2ca95af842f1e68eb31b452374adec4d0b830383 | 391 |
def hideablerevs(repo):
"""Revision candidates to be hidden
This is a standalone function to allow extensions to wrap it.
Because we use the set of immutable changesets as a fallback subset in
branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
changesets as "hideable". Doing so would break multiple code assertions and
lead to crashes."""
return obsolete.getrevs(repo, 'obsolete') | caf59496abeb0f6d42063509f3357c7520a90d82 | 392 |
import torch
def squeeze_features(protein):
"""Remove singleton and repeated dimensions in protein features."""
protein["aatype"] = torch.argmax(protein["aatype"], dim=-1)
for k in [
"domain_name",
"msa",
"num_alignments",
"seq_length",
"sequence",
"superfamily",
"deletion_matrix",
"resolution",
"between_segment_residues",
"residue_index",
"template_all_atom_mask",
]:
if k in protein:
final_dim = protein[k].shape[-1]
if isinstance(final_dim, int) and final_dim == 1:
if torch.is_tensor(protein[k]):
protein[k] = torch.squeeze(protein[k], dim=-1)
else:
protein[k] = np.squeeze(protein[k], axis=-1)
for k in ["seq_length", "num_alignments"]:
if k in protein:
protein[k] = protein[k][0]
return protein | 05c1a174935f7ebe845a0a3b308ca933baccfde6 | 393 |
import pathlib
def get_cache_dir(app_name: str, suffix: str = None, create: bool = True):
"""Get a local cache directory for a given application name.
Args:
app_name: The name of the application.
suffix: A subdirectory appended to the cache dir.
create: Whether to create the directory and its parents if it does not
already exist.
"""
appdirs = _import_appdirs()
if appdirs is None:
raise ImportError(
"To use `dm.utils.fs.get_cache_dir()`, you must have `appdirs` "
"installed: `conda install appdirs`."
)
cache_dir = pathlib.Path(appdirs.user_cache_dir(appname=app_name))
if suffix is not None:
cache_dir /= suffix
if create:
cache_dir.mkdir(exist_ok=True, parents=True)
return cache_dir | 40db55ce5d891a5cd496d23760ac66fe23206ed7 | 394 |
import random
import pickle
def _dump_test_data(filename, num_per_type=10):
"""Get corpus of statements for testing that has a range of stmt types."""
sp = signor.process_from_web()
# Group statements by type
stmts_by_type = defaultdict(list)
for stmt in sp.statements:
stmts_by_type[stmt.__class__].append(stmt)
# Sample statements of each type (without replacement)
stmt_sample = []
for stmt_type, stmt_list in stmts_by_type.items():
if len(stmt_list) <= num_per_type:
stmt_sample.extend(stmt_list)
else:
stmt_sample.extend(random.sample(stmt_list, num_per_type))
# Make a random binary class vector for the stmt list
y_arr = [random.choice((0, 1)) for s in stmt_sample]
with open(test_stmt_path, 'wb') as f:
pickle.dump((stmt_sample, y_arr), f)
return stmt_sample | 4eb2fbcfc6524d3f10c92e13f01475834f26f7f2 | 395 |
def gin_dict_parser(coll):
"""
Use for parsing collections that may contain a 'gin' key.
The 'gin' key is assumed to map to either a dict or str value that contains gin bindings.
e.g.
{'gin': {'Classifier.n_layers': 2, 'Classifier.width': 3}}
or
{'gin': 'Classifier.n_layers = 2\nClassifier.width = 3'}
"""
if 'gin' in coll:
if is_mapping(coll['gin']):
gin.parse_config("".join(map(lambda t: f'{t[0]} = {t[1]}\n', iteritems(coll['gin']))))
elif isinstance(coll['gin'], str):
gin.parse_config(coll['gin'])
return coll | d47fa1785948d70e5bf4575ed879fe37827db6ba | 396 |
def ones(shape, dtype):
"""
Declare a new worker-local tensor with all elements initialized to one.
:param shape: the tensor shape
:param dtype: the tensor data type
:return: the tensor expression
"""
np_dtype = DType(dtype).as_numpy()
init = _ConstTensor(np.ones(shape, dtype=np_dtype))
return LocalTensor(init) | 7345dad51739c1ada5dfd89ae9c0d0b21df54ce8 | 397 |
def _valid_url(url):
"""Checks that the given URL is Discord embed friendly. Or at least, it tries."""
def _valid_string(segment, main=True):
if not len(segment):
return False
for c in [ord(it.lower()) for it in segment]:
if not (97 <= c <= 122 or (main and (48 <= c <= 57 or c == 45))):
return False
return True
test = urlparse(url)
if not (test.scheme and test.netloc and '.' in test.netloc):
return False
# Discord only accepts http or https
if test.scheme not in ('http', 'https'):
return False
# Test for valid netloc
netloc_split = test.netloc.split('.')
if (len(netloc_split) < 2):
return False # http://foo
tld = test.netloc.split('.')[-1]
if not (len(tld) >= 2 and _valid_string(tld, main=False)):
return False # http://foo.123
for segment in netloc_split[:-1]:
if not _valid_string(segment):
return False # http://foo..bar or http://fo*o.bar
for c in url:
if not 33 <= ord(c) <= 126:
return False # non-ASCII only URLs
return True | 74d359bc2c8430fc5990cead6695626ea825db64 | 398 |
def isText(node):
"""
Returns True if the supplied node is free text.
"""
return node.nodeType == node.TEXT_NODE | 150efc016028d0fab4630ad5e754ebaeed0c82c0 | 399 |