content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def reversebits5(max_bits, num):
""" Like reversebits4, plus optimizations regarding leading zeros in
original value. """
rev_num = 0
shifts = 0
while num != 0 and shifts < max_bits:
rev_num |= num & 1
num >>= 1
rev_num <<= 1
shifts += 1
rev_num >>= 1
rev_num <<= (max_bits - shifts)
return rev_num | ada43721780d512cda73c30d0279216b709501fc | 1,143 |
def rescale(img, thresholds):
"""
Linear stretch of image between two threshold values.
"""
return img.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0]) | 76d5f56384f408e57161848ded85142e68296258 | 1,144 |
def X_n120() -> np.ndarray:
"""
Fixture that generates a Numpy array with 120 observations. Each
observation contains two float values.
:return: a Numpy array.
"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
return X | 6c746c759c1113611ee19acfae5326de615821a8 | 1,145 |
def int_to_bytes(n: uint64, length: uint64) -> bytes:
"""
Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian.
"""
return n.to_bytes(length, ENDIANNESS) | 135260207a65cff99770b8edc1cae3848f888434 | 1,147 |
import string
def upper_to_title(text, force_title=False):
"""Inconsistently, NiH has fields as all upper case.
Convert to titlecase"""
if text == text.upper() or force_title:
text = string.capwords(text.lower())
return text | 939515204b841c5443c5767da20712dff684d286 | 1,148 |
def pairwise_negative(true, pred):
"""Return p_num, p_den, r_num, r_den over noncoreferent item pairs
As used in calcualting BLANC (see Luo, Pradhan, Recasens and Hovy (2014).
>>> pairwise_negative({1: {'a', 'b', 'c'}, 2: {'d'}},
... {1: {'b', 'c'}, 2: {'d', 'e'}})
(2, 4, 2, 3)
"""
true_pairs = _positive_pairs(values(true))
pred_pairs = _positive_pairs(values(pred))
n_pos_agreements = len(true_pairs & pred_pairs)
true_mapping = sets_to_mapping(true)
pred_mapping = sets_to_mapping(pred)
extra_mentions = keys(true_mapping) ^ keys(pred_mapping)
disagreements = {p for p in true_pairs ^ pred_pairs
if p[0] not in extra_mentions
and p[1] not in extra_mentions}
n_common_mentions = len(keys(true_mapping) & keys(pred_mapping))
n_neg_agreements = (_triangle(n_common_mentions) - n_pos_agreements -
len(disagreements))
# Total number of negatives in each of pred and true:
p_den = _triangle(len(pred_mapping)) - len(pred_pairs)
r_den = _triangle(len(true_mapping)) - len(true_pairs)
return n_neg_agreements, p_den, n_neg_agreements, r_den | 64ff19dc861abe9fbc68412a61906b54439aa864 | 1,149 |
def reptile_resurgence_links(tar_url, max_layer, max_container="", a_elem="a", res_links=[], next_url="", callback=None):
"""
爬虫层次挖掘,对目标 URL 进行多层挖链接
参数:目标 URL | 最大层数 | 爬取范围 | 爬取的a标签选择器 | 内部使用,返回列表 | 内部使用 下一个目标
"""
if next_url != "" and next_url[:4] in 'http':
res_links.append(next_url)
if max_layer <= 0:
return res_links
rep = init_reptile(tar_url)
document = rep['document']
# 专注于某一区域对网页爬虫 推荐这种方法只爬一层
container_tags = document.find(max_container).items()
for tag1 in container_tags:
children_tags = tag1.children(a_elem).items()
for tag2 in children_tags:
# 可以在这里增加 callback 有效减少请求次数
if callback != None:
callback(comp_http_url(tar_url, tag2.attr('href')))
reptile_resurgence_links(
tar_url, max_layer - 1,
max_container=max_container,
res_links=res_links,
next_url=comp_http_url(tar_url, tag2.attr('href'))
)
# 爬取之后将会获得每一个链接
return res_links | a0fe56f4d1b0cd67b1918bf1db54cda6400fc2ca | 1,150 |
from typing import Tuple
def random_uniform(seed_tensor: Tensor,
shape: Tuple[int, ...],
low: float = 0.0,
high: float = 1.0,
dtype: dtypes.dtype = dtypes.float32):
"""
Randomly sample from a uniform distribution with minimum value `low` and maximum value `high`.
Note: not compatible with `IPUModel`.
Args:
seed_tensor (Tensor):
Used to seed the probability distribution. Must have data type uint32 and shape (2,).
shape (Tuple[int, ...]):
Shape of output tensor
low (float, optional):
Minimum value. Defaults to 0.0.
high (float, optional):
Maximum value. Defaults to 1.0.
dtype (dtypes.dtype, optional):
Data type of output tensor. Defaults to dtypes.float32.
Returns:
Tensor: tensor with elements sampled from a uniform distribution.
"""
ctx = get_current_context()
g = ctx.graph
pb_g = g._pb_graph
check_in_graph(g, seed_tensor)
settings = ctx._get_op_settings('random_uniform')
opid = _ir.OperatorIdentifier("ai.onnx", "RandomUniform", 1,
_ir.NumInputs(1, 1), 1)
op = pb_g.createConnectedOp_RandomUniformOp(
{0: seed_tensor.id},
{0: g._create_tensor_id("random_uniform_out")},
shape_=shape,
low_=low,
high_=high,
dataType_=convert_optional_dtype(dtype),
opid=opid,
settings=settings,
)
return Tensor._from_pb_tensor(op.outTensor(0)) | 6ec691faa5e35788df8f1b0be839727163271ee8 | 1,151 |
from shapely import wkt
def pipelines_as_gdf():
"""
Return pipelines as geodataframes
"""
def wkt_loads(x):
try:
return wkt.loads(x)
except Exception:
return None
df_fossil_pipelines = load_fossil_pipelines().query("route==route")
# Manual transform to line string:
# Input 43.5995, 16.3946: 43.6098, 16.5395:
# Output: LINESTRING (30 10, 10 30, 40 40)
df_fossil_pipelines['route'] = 'LINESTRING (' + df_fossil_pipelines['route'].str.replace(',', '').str.replace(':', ',') + ')'
df_fossil_pipelines['route'] = df_fossil_pipelines['route'].apply(wkt_loads)
return gpd.GeoDataFrame(df_fossil_pipelines, geometry=df_fossil_pipelines['route']) | b8f8817111e061db4160f524f13b005f6e8d8a3f | 1,152 |
def historico(
historia="",sintomas="",medicamentos=""
):
"""Histótia: Adicionar os relatos de doenças anteriores do paciente,\n incluindo sintomas antigos e histórico de doenças familiares
\n Sintomas: Descrever os atuais sintomas do paciente
\n Medicamentos: Remédios e tratamentos usados durante o tratamento geral do paciente."""
historia = str(
input(
"Digite o histórico de vida do paciente: "
)
)
sintomas = str(
input( "Digite os sintomas do paciente: " )
)
medicamentos = str(
input("Digite o medicamento a ser usado e a dosagem: " )
)
return historia, sintomas, medicamentos | a5bdb6cc6d13c73845650ec8fcd1d18fc1e4feb2 | 1,153 |
def plot_beam_ts(obs, title=None, pix_flag_list=[], reg_interest=None,
plot_show=False, plot_save=False, write_header=None,
orientation=ORIENTATION):
"""
plot time series for the pipeline reduction
:param obs: Obs or ObsArray or list or tuple or dict, can be the object
containing the data to plot, or list/tuple of objects, or dict in the
form of {key: obs} or {key: (obs, kwargs)} or {key: (obs, obs_yerr)} or
{key: (obs, obs_yerr, kwargs)} or {key: [obs, kwargs]}, in which case
the dict key will be the label in legend, obs and obs_yerr is Obs or
ObsArray objects, and kwargs is passed to FigArray.scatter() if the dict
iterm is tuple or FigArray.plot() if it's list, the items in the
tuple/list determined based on type, and if obs_yerr is present,
FigArray.errorbar() will also be called with kwargs
:type obs: Union[Obs, ObsArray, list, tuple, dict]
:param str title: str, title of the figure, will use the first available
obs_id if left None
:param list pix_flag_list: list, [[spat, spec], ...] or [[row, col], ...] of
the flagged pixels, shown in grey shade
:param dict reg_interest: dict, indicating the region of array to plot,
passed to ArrayMap.take_where(); will plot all the input pixels if
left None
:param bool plot_show: bool, flag whether to show the figure with plt.show()
:param bool plot_save: bool, flag whether to save the figure
:param str write_header: str, path to the file header to write the figure to,
the figure will be saved as {write_header}.png, only matters if
plot_save=True; will use the first available obs_id if left None
:param str orientation: str, the orientation of the figure, passed to
FigArray.init_with_array_map
:return: FigArray, object of the figure
:rtype: FigArray
"""
if isinstance(obs, (Obs, ObsArray, np.ndarray)):
obs0 = obs
elif isinstance(obs, dict):
obs0 = list(obs.values())[0]
if isinstance(obs0, (list, tuple)):
obs0 = obs0[0]
else:
obs0 = obs[0]
array_map = ObsArray(obs0).array_map_
if title is None:
title = obs0.obs_id_
if write_header is None:
write_header = obs0.obs_id_
if isinstance(obs0, (Obs, ObsArray)) and (not obs0.ts_.empty_flag_):
obs_t_len = obs0.t_end_time_ - obs0.t_start_time_
x_size = max((obs_t_len / units.hour).to(1).value / 2,
FigArray.x_size_)
else:
x_size = FigArray.x_size_
fig = FigArray.init_by_array_map(array_map if reg_interest is None else
array_map.take_where(**reg_interest),
orientation=orientation, x_size=x_size)
if isinstance(obs, (Obs, ObsArray, np.ndarray)):
fig.scatter(obs)
elif isinstance(obs, dict):
for key in obs:
if isinstance(obs[key], (list, tuple)):
plot_func = fig.scatter if isinstance(obs[key], tuple) else \
fig.plot
if len(obs[key]) > 1:
if isinstance(obs[key][1], (Obs, ObsArray)):
kwargs = obs[key][2] if len(obs[key]) > 2 else {}
plot_func(obs[key][0], **kwargs)
fig.errorbar(obs[key][0], yerr=obs[key][1], label=key,
**kwargs)
else:
plot_func(obs[key][0], label=key, **obs[key][1])
else:
plot_func(obs[key][0], label=key)
else:
fig.scatter(obs[key], label=key)
fig.legend(loc="upper left")
if fig.twin_axs_list_ is not None:
fig.legend(twin_axes=True, loc="lower right")
else:
for obs_i in obs:
fig.scatter(obs_i)
fig.imshow_flag(pix_flag_list=pix_flag_list)
fig.set_labels(obs0, orientation=orientation)
fig.set_title(title)
if plot_save:
fig.savefig("%s.png" % write_header)
if plot_show:
plt.show()
return fig | eb9cd76d3e6b2a722c2d26dd7371cd2ed1716264 | 1,154 |
def get_qos():
"""Gets Qos policy stats, CLI view"""
return render_template('qos.html', interfaces=QueryDbFor.query_interfaces(device),
interface_qos=QueryDbFor.query_qos(device)) | 5e2557738aca2d67561961e10cd7229345cfd96c | 1,156 |
from typing import List
from typing import Callable
from typing import Any
def create_multiaction(action_name: str, subactions: List[str], description: str = '') -> Callable[[Context, Any], Any]:
"""Creates and registers an action that only executes the subactions in order.
Dependencies and allowation rules are inferred from subactions.
Subactions must be defined first, because the function uses registered definitions!
Argumens
--------
action_name
Name of the new action that acts as a key
subactions
The subactions in the execution order.
The subactions must be registered before the multiaction.
description
Human readable action description.
Returns
-------
function
The combination of subaction functions.
"""
registerations = [registered_actions[sa] for sa in subactions]
affects_database = any([r.affects_database for r in registerations])
baseactions = {
baseaction for r in registerations for baseaction in r.baseactions}
dependencies = {
dep for r in registerations for dep in r.dependencies} - baseactions
def func(*args, **kwargs):
returns = [r.function(*args, **kwargs) for r in registerations]
return returns
func.__doc__ = description
ActionRegisteration(func, action_name, affects_database,
dependencies, baseactions)
return func | 920f6696608e120e1218618dd96daa691e95e383 | 1,157 |
import numpy
def phase_amp_seq_to_complex():
"""
This constructs the function to convert from phase/magnitude format data,
assuming that data type is simple with two bands, to complex64 data.
Returns
-------
callable
"""
def converter(data):
if not isinstance(data, numpy.ndarray):
raise TypeError(
_requires_array_text.format(type(data)))
if len(data.shape) != 3 and data.shape[2] != 2:
raise ValueError(_requires_3darray_text.format(data.shape))
if data.dtype.name not in ['uint8', 'uint16', 'uint32', 'uint64']:
raise ValueError(
'Requires a numpy.ndarray of unsigned integer type.')
bit_depth = data.dtype.itemsize*8
out = numpy.zeros(data.shape[:2] + (1, ), dtype=numpy.complex64)
mag = data[:, :, 0]
theta = data[:, :, 1]*(2*numpy.pi/(1 << bit_depth))
out[:, :, 0].real = mag*numpy.cos(theta)
out[:, :, 0].imag = mag*numpy.sin(theta)
return out
return converter | ee6a88df5c226115a05a4e57501df73837f98ef5 | 1,158 |
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
def Regress_model(x_train,y_train,x_test=None,y_test=None,degree=2,test_size=0.1):
"""[summary]
DESCRIPTION :-
Regressin Model selection.
This Model will compare all the different Regression models, and will return model with highest Rsq value.
It also shows performance graph comaring the models.
PARAMETERS :-
x_train,x_test,y_train,y_test = are the data after tain test split
test_size = 10 % of original data is used for testing
degree = degree of polinomial regresoin (default = 2)
Returns:
Model with heighest Rsq.
Along with model compaing plot.
"""
print('Regression Model Selection...')
if x_test is None or y_test is None:
x_train,x_test,y_train,y_test = train_test_split(x_train,y_train,random_state=0,test_size=test_size)
print('\nLinear Regression ...')
lr=LinearRegression()
lr.fit(x_train,y_train)
y_pred_lir = lr.predict(x_test)
lr_pred=r2_score(y_test, y_pred_lir)
print('Rsq :',lr_pred )
print('\nPolinomial Regression ...')
polr=PolynomialFeatures(degree)
x_polr=polr.fit_transform(x_train)
polr.fit(x_polr,y_train)
lr.fit(x_polr,y_train)
y_pred_poly=lr.predict(polr.fit_transform(x_test))
poly_pred=r2_score(y_pred_poly,y_test)
print('Rsq :',poly_pred )
print('\nSVM Model ...')
regressor = SVR(kernel = 'rbf')
regressor.fit(x_train, y_train)
y_pred=regressor.predict(x_test)
svr_pred=r2_score(y_test,y_pred)
print('Rsq :',svr_pred)
print('\nDesision Tree ...')
d_tree=DecisionTreeRegressor(random_state=1)
d_tree.fit(x_train,y_train)
y_pred=d_tree.predict(x_test)
d_tree_acc=r2_score(y_test,y_pred)
print('Rsq : ',d_tree_acc)
print('\nRandom Forest ...')
rand = RandomForestRegressor(n_estimators = 100, random_state = 1)
rand.fit(x_train,y_train)
y_pred=rand.predict(x_test)
ran_for_acc=r2_score(y_test,y_pred)
print('Rsq :',ran_for_acc)
l=[lr_pred,poly_pred,svr_pred,d_tree_acc,ran_for_acc]
x_label=['Lin_Reg','Poly_Reg','Svm','Des_Tr','Rand_For']
ma=l.index(max(l))
if ma==0:
model=lr
elif(ma==1):
model=polr
elif(ma==2):
model=regressor
elif(ma==3):
model=d_tree
else:
model=rand
xx=np.arange(0,5)
plt.plot(xx,l)
plt.ylabel('Rsq')
plt.title('Regression Models')
plt. xticks(xx,x_label)
plt.show()
return model | 9987edf48821d3c1a7164b0d5675da96b6319aa6 | 1,159 |
async def get_group_list_all():
"""
获取所有群, 无论授权与否, 返回为原始类型(列表)
"""
bot = nonebot.get_bot()
self_ids = bot._wsr_api_clients.keys()
for sid in self_ids:
group_list = await bot.get_group_list(self_id=sid)
return group_list | 2eae159381c48451fb776c8c46c15212aa431689 | 1,160 |
def _agefromarr(arr, agelist):
"""Measures the mean age map of a timeslice array.
:param arr: A timeslice instance's data array.
:param agelist: List of age sampling points of array.
:return:
:agemap: Light- or mass-weighted (depending on weight_type in the timecube()) mean metallicity of the slice_obj at each spaxel, in years.
"""
arr = np.sum(arr, axis=3) # Remove metallicities
arrshape = np.shape(arr)
arw = np.expand_dims(np.log10(agelist), 0)
arw = np.expand_dims(arw, 0)
arw, np.pad(arw, ((0,arrshape[0]-1),(0,arrshape[1]-1),(0,0)), 'maximum')
return 10**(np.sum(arw*arr, axis=2)/np.sum(arr, axis=2)) | ba1d31e94661022b8bed05cc36085ed0dbc38c94 | 1,162 |
def _build_timecode(time, fps, drop_frame=False, additional_metadata=None):
"""
Makes a timecode xml element tree.
.. warning:: The drop_frame parameter is currently ignored and
auto-determined by rate. This is because the underlying otio timecode
conversion assumes DFTC based on rate.
:param time: The :class: `opentime.RationalTime` for the timecode.
:param fps: The framerate for the timecode.
:param drop_frame: If True, generates drop-frame timecode.
:param additional_metadata: A dictionary with other metadata items like
``field``, ``reel``, ``source``, and ``format``. It is assumed this
dictionary is of the form generated by :func:`_xml_tree_to_dict` when
the file was read originally.
:return: The ``timecode`` element.
"""
if additional_metadata:
# Only allow legal child items for the timecode element
filtered = {
k: v for k, v in additional_metadata.items()
if k in ("field", "reel", "source", "format")
}
tc_element = _dict_to_xml_tree(filtered, "timecode")
else:
tc_element = cElementTree.Element("timecode")
tc_element.append(_build_rate(fps))
rate_is_not_ntsc = (tc_element.find('./rate/ntsc').text == "FALSE")
if drop_frame and rate_is_not_ntsc:
tc_fps = fps * (1000 / 1001.0)
else:
tc_fps = fps
# Get the time values
tc_time = opentime.RationalTime(time.value_rescaled_to(fps), tc_fps)
tc_string = opentime.to_timecode(tc_time, tc_fps, drop_frame)
_append_new_sub_element(tc_element, "string", text=tc_string)
frame_number = int(round(time.value))
_append_new_sub_element(
tc_element, "frame", text="{:.0f}".format(frame_number)
)
drop_frame = (";" in tc_string)
display_format = "DF" if drop_frame else "NDF"
_append_new_sub_element(tc_element, "displayformat", text=display_format)
return tc_element | 9d85287795cf17c1d665b3620d59679183943bd7 | 1,163 |
def transform(nodes, fxn, *args, **kwargs):
"""
Apply an arbitrary function to an array of node coordinates.
Parameters
----------
nodes : numpy.ndarray
An N x M array of individual node coordinates (i.e., the
x-coords or the y-coords only)
fxn : callable
The transformation to be applied to the whole ``nodes`` array
args, kwargs
Additional positional and keyword arguments that are passed to
``fxn``. The final call will be ``fxn(nodes, *args, **kwargs)``.
Returns
-------
transformed : numpy.ndarray
The transformed array.
"""
return fxn(nodes, *args, **kwargs) | edc487b7f1b83f750f868ee446ecf2676365a214 | 1,164 |
import glob
def create_input(
basedir, pertdir, latout=False, longwave=False, slc=slice(0, None, None)
):
"""Extract variables from a given directory and places into dictionaries.
It assumes that base and pert are different directories and only one
experiment output is present in each directory.
Slicing into time chunks is allowed and providing the filenames
follow CMIP6 convention they should be concatenated in the correct
order.
Variables required are rsdt, rsus, rsds, clt, rsdscs, rsuscs, rsut, rsutcs
An error will be raised if variables are not detected.
Parameters
----------
basedir : str
Directory containing control climate simulation variables
pertdir : str
Directory containing perturbed climate simulation variables
latout : bool, default=False
if True, include array of latitude points in the output.
longwave : bool, default=False
if True, do the longwave calculation using cloud radiative effect, in
addition to the shortwave calculation using APRP.
slc: `slice`, optional
Slice of indices to use from each dataset if not all of them.
Returns
-------
base, pert : dict of array_like of variables needed for APRP from control
pert: dict of variables needed for APRP from experiment
[lat]: latitude points relating to axis 1 of arrays
"""
base = {}
pert = {}
if longwave:
varlist = [
"rsdt",
"rsus",
"rsds",
"clt",
"rsdscs",
"rsuscs",
"rsut",
"rsutcs",
"rlut",
"rlutcs",
]
else:
varlist = ["rsdt", "rsus", "rsds", "clt", "rsdscs", "rsuscs", "rsut", "rsutcs"]
def _extract_files(filenames, var, directory):
if len(filenames) == 0:
raise RuntimeError(
f"No variables of name {var} found in directory {directory}"
)
for i, filename in enumerate(filenames):
ncfile = Dataset(filename)
invar = ncfile.variables[var][slc, ...]
lat = ncfile.variables["lat"][:]
ncfile.close()
if i == 0:
outvar = invar
else:
# This works for me with CMIP6 netcdfs, but we don't have a small
# example to test with
outvar = np.append(outvar, invar, axis=0) # pragma: nocover
return outvar, lat
for var in varlist:
filenames = sorted(glob.glob(f"{basedir}/{var}_*.nc"))
base[var], lat = _extract_files(filenames, var, basedir)
filenames = sorted(glob.glob(f"{pertdir}/{var}_*.nc"))
pert[var], lat = _extract_files(filenames, var, pertdir)
if latout:
return base, pert, lat
return base, pert | 1bfda243bf6e11eee38cfb4311767c78f79589c2 | 1,165 |
import logging
def get_tax_proteins(tax_id, tax_prot_dict, prot_id_dict, gbk_dict, cache_dir, args):
"""Get the proteins linked to a tax id in NCBI, and link the tax id with the local db protein ids
:param tax_id: str, NCBI tax db id
:param tax_prot_dict: {ncbi tax id: {local db protein ids}}
:param prot_id_dict: dict {protein ncbi id: prot acc}
:param gbk_dict: dict, {prot acc: local db id}
:param cache_dir: Path, path to cache dir
:param args: cmd-line args parser
Return dict {tax_id: {local db protein ids}} and bool (True=success, False=failed)
"""
logger = logging.getLogger(__name__)
try:
with entrez_retry(
args.retries,
Entrez.elink,
id=tax_id,
db="Protein",
dbfrom="Taxonomy",
linkname="taxonomy_protein",
) as handle:
tax_links = Entrez.read(handle, validate=False)
except (AttributeError, TypeError, RuntimeError) as err:
logger.warning(f"Failed to link NCBI tax id to NCBI Protein db for tax id {tax_id}\n{err}")
return tax_prot_dict, False
try:
tax_prot_dict[tax_id]
except KeyError:
tax_prot_dict[tax_id] = set()
for result in tax_links:
for item in result['LinkSetDb']:
links = item['Link']
for link in links:
linked_prot_id = link['Id']
# check if from the local database
try:
prot_ver_acc = prot_id_dict[linked_prot_id]
except KeyError:
continue
try:
prot_local_db_id = gbk_dict[prot_ver_acc]
except KeyError:
logger.error(
"Did not previously retrieved data from the local "
f"db for {prot_local_db_id}\n"
"Caching and skipping protein"
)
with open((cache_dir/"failed_local_db_retrieval.out"), "a") as fh:
fh.write(f"{prot_local_db_id}\n")
continue
tax_prot_dict[tax_id].add(prot_local_db_id)
return tax_prot_dict, True | d3aaa32adbc1ad66e0a6bb2c615ffc0f33df9f00 | 1,166 |
def define_features_vectorizer(columns, training_data, testing_data = None, ngramrange=(1,1)):
"""
Define the features for classification using CountVectorizer.
Parameters
----------
column: String or list of strings if using multiple columns
Names of columns of df that are used for trainig the classifier
training_data: Pandas dataframe
The dataframe containing the training data for the classifier
testing_data: Pandas dataframe
The dataframe containing the testing data for the classifier
ngramrange: tuple (min_n, max_n), with min_n, max_n integer values
range for ngrams used for vectorization
Returns
-------
vectorizer: sklearn CountVectorizer
CountVectorizer fit and transformed for training data
training_features: sparse matrix
Document-term matrix for training data
testing_features: sparse matrix
Document-term matrix for testing data
"""
#intialise Countvectorizer and fit transform to data
vectorizer = CountVectorizer(ngram_range = ngramrange)
vectorizer.fit_transform(training_data[columns].values)
#build matrixes for training_features and testing_features
training_features=vectorizer.transform(training_data[columns].values)
if testing_data is not None:
testing_features=vectorizer.transform(testing_data[columns].values)
else:
testing_features = None
return vectorizer, training_features, testing_features | 9c29847620ba392004efdeac80f607bc86db2780 | 1,167 |
import html
def show_graph_unique_not_callback(n_clicks, input_box):
""" Function which is called by a wrapped function in another module. It takes
user input in a text box, returns a graph if the query produces a hit in Solr.
Returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
RETURNS: 1 graph (unique occurrences) of all terms which have results
from Solr """
# Store the layout with the appropriate title and y axis labels for the graph
layout_unique = go.Layout(
title = 'Percentage of papers containing chosen entity mention(s) per Month',
xaxis = {'title': 'Publication date', 'tickformat': '%b %y', 'tick0': '2007-04-30',
'dtick': 'M2', 'range': ['2007-03-25', '2018-01-25'], 'titlefont': {'size': 20}, 'tickfont': {'size': 15}},
yaxis = {'title': 'Percentage of papers with entity mention', 'ticksuffix': '%', 'titlefont': {'size': 19}, 'tickfont': {'size': 18}},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
barmode = 'stack',
hovermode = 'closest',
font= {
'color': colours['text'],
'size': 15
},
showlegend=True,
legend = {'font': {'size': 18}, 'x': 0, 'y': -0.5, 'orientation': 'h'}
)
if input_box != '':
# Get the input data: both freq_df dfs will have index= published_date,
# columns = percentage_occurrences unique.
input_list = input_box.lower().split(',')
data_list_unique = []
notfound_list = []
for input_val in input_list:
# Make sure to strip input_val, otherwise if the user enters a
# space after the comma in the query, this space will get sent
# to Solr.
input_val = input_val.strip()
# If the search phrase doesn't start with the wikipedia url, it is a
# noun phrase which has to be converted to a URL
if not input_val.startswith('http://en.wikipedia.org/wiki'):
input_val = convert_phrase_to_url(input_val)
freq_df_total, freq_df_unique = get_aggregated_data(input_val)
if freq_df_unique is not None:
# Plot the graphs, published_date (index) goes on the x-axis,
# and percentage_occurrences (unique) goes on the y-axis.
data_list_unique.append(go.Bar(
x = freq_df_unique.index,
y = freq_df_unique.percentage_occurrences,
text = input_val.strip(), # hover text
opacity = 0.7,
name = input_val.strip() # legend text
))
else:
# Term not found, append it to the not found list and go to the
# next term.
notfound_list.append(input_val)
if data_list_unique == []:
if notfound_list != []:
# Append the error message for the terms not found in the
# Solr index
# return html.Br()
return not_found_message(notfound_list)
# One or more of the Solr queries returned a result
else:
graph_unique_terms = {'data': data_list_unique, 'layout': layout_unique}
if notfound_list != []:
terms_not_found = not_found_message(notfound_list)
#return terms_not_found, html.Br(),
return terms_not_found, dcc.Graph(id='uniquefreq', figure= graph_unique_terms)
return html.Br(), dcc.Graph(id='uniquefreq', figure= graph_unique_terms) | a53112cf76acd2a02ccd1a251b0a439ea8b06c77 | 1,170 |
def _add_string_datatype(graph, length):
"""Add a custom string datatype to the graph refering.
Args:
graph (Graph): The graph to add the datatype to
length (int): The maximim length of the string
Returns:
URIRef: The iri of the new datatype
"""
iri = rdflib_cuba[f"_datatypes/STRING-{length}"]
triple = (iri, RDF.type, RDFS.Datatype)
if graph is None or triple in graph:
return iri
graph.add(triple)
# length_triple = (iri, rdflib_cuba._length, Literal(int(length)))
# graph.add(length_triple)
return iri | 65534a58257157c9cf8943b8f4ba3c3a39d8a5b2 | 1,171 |
def get_selected_shipping_country(request):
"""Returns the selected shipping country for the passed request.
This could either be an explicitely selected country of the current
user or the default country of the shop.
"""
customer = customer_utils.get_customer(request)
if customer:
if customer.selected_shipping_address:
return customer.selected_shipping_address.country
elif customer.selected_country:
return customer.selected_country
return lfs.core.utils.get_default_shop(request).get_default_country() | 330ccf01ed261a3b669589ab3e550147f6086b0b | 1,172 |
def func_item_iterator_next(*args):
"""
func_item_iterator_next(fii, testf, ud) -> bool
"""
return _ida_funcs.func_item_iterator_next(*args) | ce4bb7516354c36fbb3548882ff45c64c8090381 | 1,173 |
def find_score_maxclip(tp_support, tn_support, clip_factor=ut.PHI + 1):
"""
returns score to clip true positives past.
Args:
tp_support (ndarray):
tn_support (ndarray):
Returns:
float: clip_score
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.score_normalization import * # NOQA
>>> tp_support = np.array([100, 200, 50000])
>>> tn_support = np.array([10, 30, 110])
>>> clip_score = find_score_maxclip(tp_support, tn_support)
>>> result = str(clip_score)
>>> print(result)
287.983738762
"""
max_true_positive_score = tp_support.max()
max_true_negative_score = tn_support.max()
if clip_factor is None:
clip_score = max_true_positive_score
else:
overshoot_factor = max_true_positive_score / max_true_negative_score
if overshoot_factor > clip_factor:
clip_score = max_true_negative_score * clip_factor
else:
clip_score = max_true_positive_score
return clip_score | 5b51bc8a09f8e7c93e9196bab8e0566c32d31ad9 | 1,174 |
import json
def create_sponsor():
"""
Creates a new sponsor.
---
tags:
- sponsor
summary: Create sponsor
operationId: create_sponsor
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Sponsor'
- type: object
multipart/form-data:
schema:
type: object
properties:
sponsor:
deprecated: true
allOf:
- $ref: '#/components/schemas/Sponsor'
- type: object
description: >
Deprecated,
do not use `multipart/form-data`,
use `application/json`.
properties:
encoding:
sponsor:
contentType: application/json
description: Created sponsor Object
required: true
responses:
201:
description: OK
400:
description: Bad request.
409:
description: Sorry, that sponsor already exists.
5XX:
description: Unexpected error.
"""
if "multipart/form-data" in request.content_type:
try:
data = json.loads(request.form.get("sponsor"))
except JSONDecodeError:
raise BadRequest("Invalid JSON sent in sponsor form part.")
elif request.content_type == "application/json":
data = request.get_json()
else:
raise UnsupportedMediaType()
if not data:
raise BadRequest()
try:
sponsor = Sponsor.createOne(**data)
sponsor.save()
except NotUniqueError:
raise Conflict("Sorry, that sponsor already exists.")
except ValidationError:
raise BadRequest()
res = {
"status": "success",
"message": "sponsor was created!"
}
res = make_response(res)
if "multipart/form-data" in request.content_type:
res.headers["Deprecation"] = (
"The use of multipart/form-data is deprecated. ")
if "socials" in data:
res.headers["Deprecation"] = (
"The socials field is deprecated use sponsor_website instead")
return res, 201 | 70d78a05046b2a9e845176838ae85b7c6aef01f5 | 1,175 |
import shutil
def download_or_copy(uri, target_dir, fs=None) -> str:
"""Downloads or copies a file to a directory.
Downloads or copies URI into target_dir.
Args:
uri: URI of file
target_dir: local directory to download or copy file to
fs: if supplied, use fs instead of automatically chosen FileSystem for
uri
Returns:
the local path of file
"""
local_path = download_if_needed(uri, target_dir, fs=fs)
shutil.copy(local_path, target_dir)
return local_path | ac7871adc2784a77246bbe9d1d5ae9c3d8b8443e | 1,176 |
from typing import Dict
from typing import Any
import yaml
def as_yaml(config: Dict[str, Any], **yaml_args: Any) -> str:
"""Use PyYAML library to write YAML file"""
return yaml.dump(config, **yaml_args) | 28c792504d7a6ccd7dbf040d516343e44e072b16 | 1,178 |
def retrieve(filen,start,end):
"""Retrieve a block of text from a file.
Given the name of a file 'filen' and a pair of start and
end line numbers, extract and return the text from the
file.
This uses the linecache module - beware of problems with
consuming too much memory if the cache isn't cleared."""
text = ""
# Check for consistency and validity of lines
if start < 0 and end < 0 or end < start:
return ""
# Fetch from a file if possible
if os.path.isfile(filen):
try:
for i in range(start,end+1):
text = text+str(linecache.getline(filen,i))
return text
except Exception:
print "Exception raised in retrieve method:"
print "\tSource file = \""+str(filen)+"\""
print "\tStart line = "+str(start)
print "\tEnd line = "+str(end)
print "\tCurrent line = "+str(i)
raise
# Otherwise return nothing
return "" | 1ead50be72c542551b2843e8a7fd59b98106f2ce | 1,179 |
def L1_Charbonnier_loss(predict, real):
"""
损失函数
Args:
predict: 预测结果
real: 真实结果
Returns:
损失代价
"""
eps = 1e-6
diff = tf.add(predict, -real)
error = tf.sqrt(diff * diff + eps)
loss = tf.reduce_mean(error)
return loss | 61b0183bf78914dc405290fc89e2ec875b9adfd7 | 1,180 |
def correction_byte_table_h() -> dict[int, int]:
"""Table of the number of correction bytes per block for the correction
level H.
Returns:
dict[int, int]: Dictionary of the form {version: number of correction
bytes}
"""
table = {
1: 17, 2: 28, 3: 22, 4: 16, 5: 22, 6: 28, 7: 26, 8: 26, 9: 24, 10: 28,
11: 24, 12: 28, 13: 22, 14: 24, 15: 24, 16: 30, 17: 28, 18: 28, 19: 26,
20: 28, 21: 30, 22: 24, 23: 30, 24: 30, 25: 30, 26: 30, 27: 30, 28: 30,
29: 30, 30: 30, 31: 30, 32: 30, 33: 30, 34: 30, 35: 30, 36: 30, 37: 30,
38: 30, 39: 30, 40: 30
}
return table | 982f775172ed0fa148f0d618e4c521fc42e3883e | 1,181 |
def stash_rename(node_id, new_name):
"""Renames a node."""
return stash_invoke('rename', node_id, new_name) | 1fd1dd27bcab8db64e2fb39bf4301a3eb3d48035 | 1,182 |
from datetime import datetime
def get_fake_value(attr): # attr = (name, type, [dim, [dtype]])
""" returns default value for a given attribute based on description.py """
if attr[1] == pq.Quantity or attr[1] == np.ndarray:
size = []
for i in range(int(attr[2])):
size.append(np.random.randint(100) + 1)
to_set = np.random.random(size) * pq.millisecond # let it be ms
if attr[0] == 't_start': to_set = 0.0 * pq.millisecond
if attr[0] == 't_stop': to_set = 1.0 * pq.millisecond
if attr[0] == 'sampling_rate': to_set = 10000.0 * pq.Hz
if attr[1] == np.ndarray:
to_set = np.array(to_set, dtype=attr[3])
if attr[1] == str:
to_set = str(np.random.randint(100000))
if attr[1] == int:
to_set = np.random.randint(100)
if attr[1] == datetime:
to_set = datetime.now()
return to_set | 6a732c90946b58cc7be834193692c36c56bd83fc | 1,183 |
def find_x(old_time,omega,new_time):
"""
Compute x at the beginning of new time array.
"""
interp_omega=spline(old_time,omega)
x=interp_omega(new_time[0])**(2./3)
return x | 450af49dca9c8a66dc0b9a37abddb23afd9a9749 | 1,184 |
import struct
def _platformio_library_impl(ctx):
"""Collects all transitive dependencies and emits the zip output.
Outputs a zip file containing the library in the directory structure expected
by PlatformIO.
Args:
ctx: The Skylark context.
"""
name = ctx.label.name
# Copy the header file to the desired destination.
header_file = ctx.actions.declare_file(
_HEADER_FILENAME.format(dirname=name, filename=name))
inputs = [ctx.file.hdr]
outputs = [header_file]
commands = [_COPY_COMMAND.format(
source=ctx.file.hdr.path, destination=header_file.path)]
# Copy all the additional header and source files.
for additional_files in [ctx.attr.add_hdrs, ctx.attr.add_srcs]:
for target in additional_files:
if len(target.files.to_list()) != 1:
fail("each target listed under add_hdrs or add_srcs must expand to " +
"exactly one file, this expands to %d: %s" %
(len(target.files), target.files))
# The name of the label is the relative path to the file, this enables us
# to prepend "lib/" to the path. For PlatformIO, all the library files
# must be under lib/...
additional_file_name = target.label.name
additional_file_source = [f for f in target.files.to_list()][0]
additional_file_destination = ctx.actions.declare_file(
_ADDITIONAL_FILENAME.format(dirname=name, filename=additional_file_name))
inputs.append(additional_file_source)
outputs.append(additional_file_destination)
commands.append(_COPY_COMMAND.format(
source=additional_file_source.path,
destination=additional_file_destination.path))
# The src argument is optional, some C++ libraries might only have the header.
if ctx.attr.src != None:
source_file = ctx.actions.declare_file(
_SOURCE_FILENAME.format(dirname=name, filename=name))
inputs.append(ctx.file.src)
outputs.append(source_file)
commands.append(_COPY_COMMAND.format(
source=ctx.file.src.path, destination=source_file.path))
# Zip the entire content of the library folder.
outputs.append(ctx.outputs.zip)
commands.append(_ZIP_COMMAND.format(
output_dir=ctx.outputs.zip.dirname, zip_filename=ctx.outputs.zip.basename))
ctx.actions.run_shell(
inputs=inputs,
outputs=outputs,
command="\n".join(commands),
)
# Collect the zip files produced by all transitive dependancies.
transitive_zip_files=depset([ctx.outputs.zip])
for dep in ctx.attr.deps:
transitive_zip_files = depset(transitive=[
transitive_zip_files, dep.transitive_zip_files
])
return struct(
transitive_zip_files=transitive_zip_files,
) | e048afd34e1228490f879e10bf42105197053bd8 | 1,185 |
def repeat_interleave(x, arg):
"""Use numpy to implement repeat operations"""
return paddle.to_tensor(x.numpy().repeat(arg)) | 9677d48626460241751dde2dfe1ca70d31bab6e2 | 1,186 |
def quantize_arr(arr, min_val=None, max_val=None, dtype=np.uint8):
"""Quantization based on real_value = scale * (quantized_value - zero_point).
"""
if (min_val is None) | (max_val is None):
min_val, max_val = np.min(arr), np.max(arr)
scale, zero_point = choose_quant_params(min_val, max_val, dtype=dtype)
transformed_arr = zero_point + arr / scale
# print(transformed_arr)
if dtype == np.uint8:
clamped_arr = np.clip(transformed_arr, 0, 255)
quantized = clamped_arr.astype(np.uint8)
elif dtype == np.uint32:
clamped_arr = np.clip(transformed_arr, 0, 2 ** 31)
quantized = clamped_arr.astype(np.uint32)
else:
raise ValueError('dtype={} is not supported'.format(dtype))
# print(clamped_arr)
min_val = min_val.astype(np.float32)
max_val = max_val.astype(np.float32)
return quantized, min_val, max_val | 55f36d84708b32accd7077dc59fa9321f074cd5a | 1,187 |
def EST_NOISE(images):
"""Implementation of EST_NOISE in Chapter 2 of Trucco and Verri."""
num = images.shape[0]
m_e_bar = sum(images)/num
m_sigma = np.sqrt(sum((images - m_e_bar)**2) / (num - 1))
return m_sigma | 8f8d68b25a88cc800b1a6685407072c29c47db7d | 1,188 |
def continue_cad_funcionario(request):
""" Continuação do Cadastro do Funcionário.
"""
usuario = request.user
try:
funcionario = Funcionario.objects.get(usuario=usuario)
except Exception:
raise Http404()
if funcionario and request.method == "POST":
form = FuncionarioForm(request.POST)
if form.is_valid():
form.save()
return redirect("funcionario")
else:
form = FuncionarioForm()
return render(request, "continue_cad_funcionario.html", {"form": form})
# if request.method == "POST":
# form = FuncionarioForm(request.POST)
# if form.is_valid():
# #'nome', 'rua', 'cpf', 'rg', 'fone', 'bloqueado', 'usuario_fun'
# nome = form.cleaned_data['nome']
# rua = form.cleaned_data['rua']
# cpf = form.cleaned_data['cpf']
# rg = form.cleaned_data['rg']
# fone = form.cleaned_data['fone']
# bloqueado = form.cleaned_data['bloqueado']
# usuario_fun = form.cleaned_data['usuario_fun']
# novo = Funcionario(
# nome=nome, rua=rua, cpf=cpf,
# rg=rg, fone=fone, bloqueado=bloqueado,
# suario_fun=usuario_fun
# )
# novo.save()
# return redirect("funcionario")
# else:
# form = FuncionarioForm()
# return render(request, "continue_cad_funcionario.html", {"form": form}) | 626ffeefdeb98f6921b5b76832fd2b622f6d3a26 | 1,189 |
import re
def remove_words(i_list, string):
"""
remove the input list of word from string
i_list: list of words to be removed
string: string on the operation to be performed
"""
regexStr = re.compile(r'\b%s\b' %
r'\b|\b'.join(map(re.escape, i_list)))
o_string = regexStr.sub("", string)
return o_string | 59ac5c2660459f7d2def0d7958a002977a6ca643 | 1,190 |
from datetime import datetime
def save_user_time():
"""
Creates a DateTime object with correct save time
Checks if that save time is now
"""
save_time = datetime.utcnow().replace(hour=18, minute=0, second=0, microsecond=0)
return (save_time == (datetime.utcnow() - timedelta(hours=4))) | 3a16e5de6d912487ca0c46c48d039cf7d44a4991 | 1,191 |
def manage_rating_mails(request, orders_sent=[], template_name="manage/marketing/rating_mails.html"):
"""Displays the manage view for rating mails
"""
return render(request, template_name, {}) | 4afb288233bd4e1fe1b288e6872b522b993ae434 | 1,192 |
from typing import Optional
import time
from datetime import datetime
def cancel(request_url: str,
wait: Optional[bool] = False,
poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
verbose: Optional[bool] = False) -> int:
"""
Cancel the request at the given URL.
This method returns immediately by default since the API processes
this request asynchronously. If you would prefer to wait for it
to be completed, set the 'wait' parameter to True. You can adjust
the polling time using the 'poll_interval' parameter.
Args:
request_url: the URL string of the request to be canceled
wait: set to True to block until the cancellation request
has been completed (may wait for several minutes)
poll_interval: seconds to wait between polling
calls, defaults to STANDARD_POLLING_SLEEP_TIME.
verbose: if True then output poll times and other
progress, defaults to False
Returns:
1 on success
Raises:
pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
"""
# do request
req = AuroraXRequest(method="delete",
url=request_url,
null_response=True)
req.execute()
# return immediately if we don't want to wait
if (wait is False):
return 1
# get status
status = get_status(request_url)
# wait for request to be cancelled
while (status["search_result"]["data_uri"] is None and status["search_result"]["error_condition"] is False):
time.sleep(poll_interval)
if (verbose is True):
print("[%s] Checking for cancellation status ..." % (datetime.datetime.now()))
status = get_status(request_url)
# return
if (verbose is True):
print("[%s] The request has been cancelled" % (datetime.datetime.now()))
return 1 | 9f9534c3114c42cfbc08330566244d8981659cdb | 1,193 |
def selected_cases(self):
"""Get a list of all grid cases selected in the project tree
Returns:
A list of :class:`rips.generated.generated_classes.Case`
"""
case_infos = self._project_stub.GetSelectedCases(Empty())
cases = []
for case_info in case_infos.data:
cases.append(self.case(case_info.id))
return cases | 27485e7d2244167c0e9766972856f2cb221f8813 | 1,194 |
import requests
import json
def create_whatsapp_group(org, subject):
"""
Creates a Whatsapp group using the subject
"""
result = requests.post(
urljoin(org.engage_url, "v1/groups"),
headers=build_turn_headers(org.engage_token),
data=json.dumps({"subject": subject}),
)
result.raise_for_status()
return json.loads(result.content)["groups"][0]["id"] | b867046be5623a7e3857ae6ef0069d909db323c1 | 1,195 |
def compute_MVBS_index_binning(ds_Sv, range_sample_num=100, ping_num=100):
"""Compute Mean Volume Backscattering Strength (MVBS)
based on intervals of ``range_sample`` and ping number (``ping_num``) specified in index number.
Output of this function differs from that of ``compute_MVBS``, which computes
bin-averaged Sv according to intervals of range (``echo_range``) and ``ping_time`` specified
in physical units.
Parameters
----------
ds_Sv : xr.Dataset
dataset containing ``Sv`` and ``echo_range`` [m]
range_sample_num : int
number of samples to average along the ``range_sample`` dimension, default to 100
ping_num : int
number of pings to average, default to 100
Returns
-------
A dataset containing bin-averaged Sv
"""
da_sv = 10 ** (ds_Sv["Sv"] / 10) # average should be done in linear domain
da = 10 * np.log10(
da_sv.coarsen(ping_time=ping_num, range_sample=range_sample_num, boundary="pad").mean(
skipna=True
)
)
# Attach attributes and coarsened echo_range
da.name = "Sv"
ds_MVBS = da.to_dataset()
ds_MVBS.coords["range_sample"] = (
"range_sample",
np.arange(ds_MVBS["range_sample"].size),
{"long_name": "Along-range sample number, base 0"},
) # reset range_sample to start from 0
ds_MVBS["echo_range"] = (
ds_Sv["echo_range"]
.coarsen( # binned echo_range (use first value in each average bin)
ping_time=ping_num, range_sample=range_sample_num, boundary="pad"
)
.min(skipna=True)
)
_set_MVBS_attrs(ds_MVBS)
ds_MVBS["Sv"] = ds_MVBS["Sv"].assign_attrs(
{
"cell_methods": (
f"ping_time: mean (interval: {ping_num} pings "
"comment: ping_time is the interval start) "
f"range_sample: mean (interval: {range_sample_num} samples along range "
"comment: range_sample is the interval start)"
),
"comment": "MVBS binned on the basis of range_sample and ping number specified as index numbers", # noqa
"binning_mode": "sample number",
"range_sample_interval": f"{range_sample_num} samples along range",
"ping_interval": f"{ping_num} pings",
"actual_range": [
round(float(ds_MVBS["Sv"].min().values), 2),
round(float(ds_MVBS["Sv"].max().values), 2),
],
}
)
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "preprocess.compute_MVBS_index_binning"
ds_MVBS = ds_MVBS.assign_attrs(prov_dict)
ds_MVBS["frequency_nominal"] = ds_Sv["frequency_nominal"] # re-attach frequency_nominal
return ds_MVBS | c5b163ec0f9b2807580c586a3e40ca81d0bcd6cb | 1,196 |
def set_image_exposure_time(exp_time):
"""
Send the command to set the exposure time per frame to SAMI.
Parameters
----------
exp_time (float) : the exposure time in seconds.
Returns
-------
message (string) : DONE if successful.
"""
message = send_command("dhe set obs.exptime {:f}".format(exp_time))
return message | c01a12607b6554f29c63229eefe87c1bf81bc7e0 | 1,197 |
def stack_exists(client, stack_name):
""" Checks that stack was specified is existing """
cfn_stacks = client.list_stacks()
for cfn_stack in cfn_stacks["StackSummaries"]:
if cfn_stack['StackName'] == stack_name and "COMPLETE" in cfn_stack['StackStatus'] and "DELETE" not in cfn_stack['StackStatus']:
return True
return False | 8e9476b57300cb030ba5292f83060bb5ae652d19 | 1,199 |
def endorsement_services():
"""Return endorsement service list
Loads all defined service modules unless settings specifies otherwise
"""
global ENDORSEMENT_SERVICES
if ENDORSEMENT_SERVICES is None:
ENDORSEMENT_SERVICES = _load_endorsement_services()
return ENDORSEMENT_SERVICES | 543b6c86587a0da58a3e9b8d1756a6d763e60d6a | 1,200 |
def select(arrays, index):
"""
Index each array in a tuple of arrays.
If the arrays tuple contains a ``None``, the entire tuple will be returned
as is.
Parameters
----------
arrays : tuple of arrays
index : array
An array of indices to select from arrays.
Returns
-------
indexed_arrays : tuple of arrays
Examples
--------
>>> import numpy as np
>>> select((np.arange(5), np.arange(-3, 2, 1)), [1, 3])
(array([1, 3]), array([-2, 0]))
>>> select((None, None, None, None), [1, 2])
(None, None, None, None)
"""
if arrays is None or any(i is None for i in arrays):
return arrays
return tuple(i.ravel()[index] for i in arrays) | 70109fbda58055d9712295dff261a95d99caac03 | 1,201 |
def waypoint(waypoint_id):
"""view a book page"""
wp = Waypoint.query.filter_by(id=waypoint_id).first()
options = Option.query.filter_by(sourceWaypoint_id=waypoint_id)
if wp is None:
abort(404)
return render_template('books/waypoint.html', book=wp.book_of, waypoint=wp, options=options) | 520883bdcb29f3e273f7836c4db128c859d9347f | 1,202 |
def encode_big_endian_16(i):
"""Take an int and return big-endian bytes"""
return encode_big_endian_32(i)[-2:] | c26557a6ac30f54746a8a5a1676cec22e3b3b197 | 1,203 |
from typing import List
import requests
from bs4 import BeautifulSoup
import re
def get_comments_from_fawm_page(
url: str,
username: str,
password: str,
) -> List[Response]:
"""Extract comments from a given FAWM page."""
response = requests.get(url, auth=(username, password))
response.encoding = "UTF-8"
html = response.text
soup = BeautifulSoup(html, "html.parser")
responses = []
# there are non-comments with the class "comment-item", so we need to narrow down
for el in soup.find_all("li", {"class": "comment-item", "id": re.compile(r"c\d+")}):
responses.append(get_response_from_li(url, el))
return responses | 68ec542bf909fc543c97836209c2803cd6e0f119 | 1,204 |
def send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
message = (service.users().messages().send(userId=user_id, body=message)
.execute())
print ('Message Id: %s' % message['id'])
return message
except errors.HttpError, error:
print ('An error occurred: %s' % error) | 6bbb3935e596d7d19669f5a0094f58542dd764d3 | 1,205 |
def get_supported_solvers():
"""
Returns a list of solvers supported on this machine.
:return: a list of SolverInterface sub-classes :list[SolverInterface]:
"""
return [sv for sv in builtin_solvers if sv.supported()] | b8fb9e9d780158ab0f45565c05e42fe47ae0d9f2 | 1,206 |
def _length_hint(obj):
"""Returns the length hint of an object."""
try:
return len(obj)
except (AttributeError, TypeError):
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if hint is NotImplemented or \
not isinstance(hint, (int, long)) or \
hint < 0:
return None
return hint | 226ede41ec49fef4b43df92f425eb7f5135041ea | 1,207 |
def chimeric_data():
"""Example containing spanning + junction reads from single fusion."""
return _build_chimeric_data(
[('1', 300, 1, 'T2onc', 420, 1, 2, '100M2208p38M62S', '62M38S', 'R1'),
('1', 300, 1, 'T2onc', 420, 1, 1, '100M2208p52M48S', '48M52S', 'R2'),
('1', 301, 1, 'T2onc', 420, 1, 1, '100M2208p52M48S', '48M52S', 'R3'),
('1', 300, 1, 'T2onc', 421, 1, 1, '100M2208p52M48S', '48M52S', 'R4'),
('1', 280, 1, 'T2onc', 435, 1, -1, '100M', '97M3S', 'S1'),
('1', 270, 1, 'T2onc', 445, 1, -1, '100M', '98M2S', 'S2'),
('1', 275, 1, 'T2onc', 435, 1, -1, '100M', '98M2S', 'S3')]) | 79277d820c0d3e28708d9ead49a55cbe4f51c4e3 | 1,208 |
def _get_merge_for_alias_key(database, key):
"""Return the Alias record of the merged player.
Allow for value.merge on the record with key srkey being any value.
Return the record if value.merge is None True or False.
Otherwise assume value.merge is integer and use it to retreive and
return a record.
return None if get_alias() returns None.
"""
r = resultsrecord.get_alias(database, key)
if r is None:
return
elif r.value.merge is None:
return r
elif r.value.merge is True:
return r
elif r.value.merge is False:
return r
r = resultsrecord.get_alias(database, r.value.merge)
if r is None:
return
return r | 2384e9db49af07512c86f58b5c9eb964f9e9b1b2 | 1,209 |
def get_bucket(self):
"""
Documentation:
---
Description:
Use bucket name to return a single S3 bucket object.
---
Returns:
bucket : S3 bucket
S3 bucket object
"""
# return
# 6 dictionary containing Name tag / EC2 instance object
buckets = self.get_buckets()
# check that there is an instance with that name
assert self.bucket_name in self.get_bucket_names(), "\nNo S3 bucket with that name.\n"
# filter instances by instance_name
bucket = buckets[self.bucket_name]
return bucket | 0d8ed3c8557e57fb8094524bc4cb4dcae09fe384 | 1,211 |
def euclidean_distance(x, y, weight=None):
"""Computes the Euclidean distance between two time series.
If the time series do not have the same length, an interpolation is performed.
Parameters
----------
x : nd-array
Time series x.
y : nd-array
Time series y.
weight: nd-array (Default: None)
query weight values.
Returns
-------
float
Euclidean distance value.
"""
p = 2
if len(x) != len(y):
x, y = interpolation(x, y)
if weight is None:
ed = np.linalg.norm(x - y, p)
else:
if len(np.shape(x)) > 1:
distance = _lnorm_multidimensional(x, y, weight, p=p)
else:
distance = _lnorm_unidimensional(x, y, weight, p=p)
ed = np.sum(distance)
return ed | 03a1cb557d7d295a6ebd89b0bd1dab937206c8e0 | 1,212 |
def path(artifactory_server, artifactory_auth):
"""ArtifactoryPath with defined server URL and authentication"""
def f(uri):
return artifactory.ArtifactoryPath(
artifactory_server + uri, auth=artifactory_auth
)
return f | 0eabd46b50812ce219affae5ce0d70ee66c7adc5 | 1,213 |
def get_outmost_polygon_boundary(img):
"""
Given a mask image with the mask describes the overlapping region of
two images, get the outmost contour of this region.
"""
mask = get_mask(img)
mask = cv2.dilate(mask, np.ones((2, 2), np.uint8), iterations=2)
cnts, hierarchy = cv2.findContours(
mask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
# get the contour with largest aera
C = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True)[0]
# polygon approximation
polygon = cv2.approxPolyDP(C, 0.009 * cv2.arcLength(C, True), True)
return polygon | 73896a69809259f3bf395895097d1fb81e05706e | 1,214 |
from apex.parallel import DistributedDataParallel as apex_DDP
def check_ddp_wrapped(model: nn.Module) -> bool:
"""
Checks whether model is wrapped with DataParallel/DistributedDataParallel.
"""
parallel_wrappers = nn.DataParallel, nn.parallel.DistributedDataParallel
# Check whether Apex is installed and if it is,
# add Apex's DistributedDataParallel to list of checked types
try:
parallel_wrappers = parallel_wrappers + (apex_DDP,)
except ImportError:
pass
return isinstance(model, parallel_wrappers) | e14b0b1c09c088b310574e4a772c7dc3bec83ddf | 1,215 |
def adminRecords(request):
"""
管理租赁记录
:param request:
:return: html page
"""
token = request.COOKIES.get('admintoken')
if token is None:
return redirect('/adminLogin/')
result = MysqlConnector.get_one('YachtClub', 'select adminname from admincookies where token=%s', token)
if result is None:
return redirect('/adminLogin/')
return render(request, 'adminRecords.html') | 75a4d1da4e7556de46a455c1304cc80a5660c9ce | 1,216 |
def _make_fold(draw):
"""
Helper strategy for `test_line_fold` case.
The shape of the content will be the same every time:
a
b
c
But the chars and size of indent, plus trailing whitespace on each line
and number of line breaks will all be fuzzed.
"""
return (
draw(make_interspace(symbol_a, 0)),
draw(make_interspace(symbol_b, 1)),
draw(make_interspace(symbol_c, 1)),
) | 1488cafd51b7000ac0fe111b445fcc706876da00 | 1,217 |
import requests
def get_user_jwt() -> str:
"""
Returns:
str: The JWT token of the user
"""
login_data = check_login()
if not login_data:
token = requests.get(
'https://formee-auth.hackersreboot.tech/visitor').json()['token']
return token
if login_data:
token = requests.get('https://formee-auth.hackersreboot.tech/', json={
'username': login_data['username'], 'password': login_data['password']}).json()['token']
return token | 1e52921ba88dfefcf320895f98420a73af3f86ee | 1,218 |
def add_gradient_penalty(critic, C_input_gp, C_input_fake):
"""Helper Function: Add gradient penalty to enforce Lipschitz continuity
Interpolates = Real - alpha * ( Fake - Real )
Parameters
----------
critic : tf.Sequential
Critic neural network
C_input_gp : np.matrix
Critic input for gradient penalty. Mean values of all similar samples
provided by the Sampler.
C_input_fake : tf.Tensor
Critic input Generator(X)
Returns
-------
tf.tensor(dtype=tf.Float64)
Gradient Penalty
"""
alpha = tf.random.uniform(
shape=[1, int(C_input_fake.shape[1])], minval=0.0, maxval=1.0, dtype=tf.float64
)
interpolates = C_input_gp + alpha * (C_input_fake - C_input_gp)
disc_interpolates = critic(interpolates)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients)))
return tf.reduce_mean((slopes - 1) ** 2) | e015cc7d5e168293c2dd741c95b38fc8aac4fbc8 | 1,220 |
from datetime import datetime
import pytz
def parse_airomon_datetime(airomon_dt: str) -> datetime:
"""Parse string used by airomon and also make timezone aware."""
aileen_tz = pytz.timezone(settings.TIME_ZONE)
try:
dt: datetime = datetime.strptime(airomon_dt, "%Y-%m-%d %H:%M:%S")
dt = dt.astimezone(aileen_tz)
except ValueError:
print(
"%s Warning: could not parse datetime %s, using 1-1-1970 for this one!"
% (settings.TERM_LBL, airomon_dt)
)
dt = datetime(1970, 1, 1, 1, 1, 1, tzinfo=aileen_tz)
return dt | fd91e09ebef4f8af55686d38ef3e763ec546a844 | 1,221 |
import array
def i2nm(i):
"""
Return the n and m orders of the i'th zernike polynomial
========= == == == == == == == == == == == == == == == ===
i 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ...
n-order 0 1 1 2 2 2 3 3 3 3 4 4 4 4 4 ...
m-order 0 -1 1 -2 0 2 -3 -1 1 3 -4 -2 0 2 4 ...
========= == == == == == == == == == == == == == == == ===
"""
ia = array(i)
n = (1 + (sqrt(8 * (ia) + 1) - 3) / 2).astype(int)
ni = n * (n + 1) / 2
m = -n + 2 * (i - ni)
return n, m | 6da858f4cb58a10c480641ce736398d66baf0a7a | 1,222 |
from typing import Dict
from typing import Any
def update_ftov_msgs(
ftov_msgs: jnp.ndarray, updates: Dict[Any, jnp.ndarray], fg_state: FactorGraphState
) -> jnp.ndarray:
"""Function to update ftov_msgs.
Args:
ftov_msgs: A flat jnp array containing ftov_msgs.
updates: A dictionary containing updates for ftov_msgs
fg_state: Factor graph state
Returns:
A flat jnp array containing updated ftov_msgs.
Raises: ValueError if:
(1) provided ftov_msgs shape does not match the expected ftov_msgs shape.
(2) provided name is not valid for ftov_msgs updates.
"""
for names in updates:
data = updates[names]
if names in fg_state.variable_group.names:
variable = fg_state.variable_group[names]
if data.shape != (variable.num_states,):
raise ValueError(
f"Given belief shape {data.shape} does not match expected "
f"shape {(variable.num_states,)} for variable {names}."
)
var_states_for_edges = np.concatenate(
[
wiring_by_type.var_states_for_edges
for wiring_by_type in fg_state.wiring.values()
]
)
starts = np.nonzero(
var_states_for_edges == fg_state.vars_to_starts[variable]
)[0]
for start in starts:
ftov_msgs = ftov_msgs.at[start : start + variable.num_states].set(
data / starts.shape[0]
)
else:
raise ValueError(
"Invalid names for setting messages. "
"Supported names include a tuple of length 2 with factor "
"and variable names for directly setting factor to variable "
"messages, or a valid variable name for spreading expected "
"beliefs at a variable"
)
return ftov_msgs | f54150e5f310905e37820e225e8d29ab6d9e9717 | 1,223 |
from typing import Optional
def normalize_features(
current: np.ndarray,
previous: Optional[np.ndarray],
normalize_samples: int,
method: str = NORM_METHODS.MEAN.value,
clip: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""Normalize features with respect to the past number of normalize_samples.
Parameters
----------
current : numpy array
current features to normalize.
previous : numpy array or None
previous features, not normalized. Used for normalization of current features.
normalize_samples : int
number of past samples considered for normalization
method : str | default is 'mean'
data is normalized via subtraction of the 'mean' or 'median' and
subsequent division by the 'mean' or 'median'. For z-scoring enter
'zscore'.
clip : int | float, optional
value at which to clip on the lower and upper end after normalization.
Useful for artifact rejection and handling of outliers.
Returns
-------
current : numpy array
normalized current features
previous : numpy array
previous features, not normalized.
Raises
------
ValueError
returned if method is not 'mean', 'median' or 'zscore'
"""
if previous is None:
return np.zeros_like(current), current
previous = np.vstack((previous, current))
previous = _transform_previous(
previous=previous, normalize_samples=normalize_samples
)
current, previous = _normalize_and_clip(
current=current,
previous=previous,
method=method,
clip=clip,
description="feature",
)
return current, previous | 8b18f12272eb92ae60631b0c4dcdb138a5596d44 | 1,224 |
def anim(filename, rows: int, cols: int ,
frame_duration: float = 0.1, loop=True) -> Animation:
"""Create Animation object from image of regularly arranged subimages.
+filename+ Name of file in resource directory of image of subimages
regularly arranged over +rows+ rows and +cols+ columns.
+frame_duration+ Seconds each frame of animation should be displayed.
"""
img = pyglet.resource.image(filename)
image_grid = pyglet.image.ImageGrid(img, rows, cols)
animation = image_grid.get_animation(frame_duration, True)
centre_animation(animation)
return animation | 2ced01a961d05e6968c14023a935623bc2011069 | 1,225 |
def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
register_swift_info('vertigo')
conf = global_conf.copy()
conf.update(local_conf)
vertigo_conf = dict()
vertigo_conf['devices'] = conf.get('devices', '/srv/node')
vertigo_conf['execution_server'] = conf.get('execution_server')
vertigo_conf['mc_timeout'] = conf.get('mc_timeout', 5)
vertigo_conf['mc_pipe'] = conf.get('mc_pipe', 'vertigo_pipe')
# vertigo_conf['api_pipe'] = conf.get('mc_pipe', 'api_pipe')
vertigo_conf['metadata_visibility'] = conf.get('metadata_visibility', True)
vertigo_conf['mc_dir'] = conf.get('mc_dir', '/home/docker_device/vertigo/scopes')
vertigo_conf['cache_dir'] = conf.get('cache_dir', '/home/docker_device/cache/scopes')
vertigo_conf['mc_container'] = conf.get('mc_container', 'microcontroller')
vertigo_conf['mc_dependency'] = conf.get('mc_dependency', 'dependency')
''' Load storlet parameters '''
configParser = RawConfigParser()
configParser.read(conf.get('__file__'))
storlet_parameters = configParser.items('filter:storlet_handler')
for key, val in storlet_parameters:
vertigo_conf[key] = val
""" Load Storlets Gateway configuration """
configParser = RawConfigParser()
configParser.read(vertigo_conf['storlet_gateway_conf'])
additional_items = configParser.items("DEFAULT")
for key, val in additional_items:
vertigo_conf[key] = val
""" Load Storlets Gateway class """
module_name = vertigo_conf.get('storlet_gateway_module', 'stub')
gateway_class = load_gateway(module_name)
vertigo_conf['storlets_gateway_module'] = gateway_class
"""
Register Lua script to retrieve policies in a single redis call
"""
vertigo_conf['redis_host'] = conf.get('redis_host', 'controller')
vertigo_conf['redis_port'] = int(conf.get('redis_port', 6379))
vertigo_conf['redis_db'] = int(conf.get('redis_db', 0))
if vertigo_conf['execution_server'] == 'proxy':
r = redis.StrictRedis(vertigo_conf['redis_host'],
vertigo_conf['redis_port'],
vertigo_conf['redis_db'])
lua = """
local t = {}
if redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])
elseif redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])
end
return t"""
lua_sha = r.script_load(lua)
vertigo_conf['LUA_get_mc_sha'] = lua_sha
def swift_vertigo(app):
return VertigoHandlerMiddleware(app, global_conf, vertigo_conf)
return swift_vertigo | ca9c72f2237cfb2054ffc2b38038c75c96679ade | 1,226 |
import statistics
def get_review_score_fields(call, proposals):
"""Return a dictionary of the score banner fields in the reviews.
Compute the score means and stdevs. If there are more than two score
fields, then also compute the mean of the means and the stdev of the means.
This is done over all finalized reviews for each proposal.
Store the values in the proposal document.
"""
fields = dict([(f['identifier'], f)
for f in call['review']
if f.get('banner') and f['type'] == constants.SCORE])
for proposal in proposals:
reviews = utils.get_docs_view('reviews', 'proposal',
proposal['identifier'])
# Only include finalized reviews in the calculation.
reviews = [r for r in reviews if r.get('finalized')]
scores = dict([(id, list()) for id in fields])
for review in reviews:
for id in fields:
value = review['values'].get(id)
if value is not None: scores[id].append(float(value))
proposal['scores'] = dict()
for id in fields:
proposal['scores'][id] = d = dict()
d['n'] = len(scores[id])
try:
d['mean'] = round(statistics.mean(scores[id]), 1)
except statistics.StatisticsError:
d['mean'] = None
try:
d['stdev'] = round(statistics.stdev(scores[id]), 1)
except statistics.StatisticsError:
d['stdev'] = None
if len(fields) >= 2:
mean_scores = [d['mean'] for d in proposal['scores'].values()
if d['mean'] is not None]
try:
mean_means = round(statistics.mean(mean_scores), 1)
except statistics.StatisticsError:
mean_means = None
proposal['scores']['__mean__'] = mean_means
try:
stdev_means = round(statistics.stdev(mean_scores), 1)
except statistics.StatisticsError:
stdev_means = None
proposal['scores']['__mean__'] = mean_means
proposal['scores']['__stdev__'] = stdev_means
return fields | bda5899a105942a456872d0bacadaf124832cd65 | 1,227 |
def tokenize(text):
"""
Tokenize and normalize
"""
tokens = nltk.word_tokenize(text)
lemmatizer = nltk.WordNetLemmatizer()
clean_tokens = [lemmatizer.lemmatize(w).lower().strip() for w in tokens]
return clean_tokens | 2485181433208ee871e881312400806539d5bc73 | 1,228 |
def _makeSSDF(row, minEvents):
"""
Function to change form of TRDF for subpace creation
"""
index = range(len(row.Clust))
columns = [x for x in row.index if x != 'Clust']
DF = pd.DataFrame(index=index, columns=columns)
DF['Name'] = ['SS%d' % x for x in range(len(DF))] # name subspaces
# Initialize columns for future use
DF['Events'] = object
DF['AlignedTD'] = object
DF['SVD'] = object
DF['UsedSVDKeys'] = object
DF['FracEnergy'] = object
DF['SVDdefined'] = False
DF['SampleTrims'] = [{} for x in range(len(DF))]
DF['Threshold'] = np.float
DF['SigDimRep'] = object
DF['FAS'] = object
DF['NumBasis'] = int
DF['Offsets'] = object
DF['Stats'] = object
DF['MPtd'] = object
DF['MPfd'] = object
DF['Channels'] = object
DF['Station'] = row.Station
DF = DF.astype(object)
for ind, row2 in DF.iterrows():
evelist = row.Clust[ind]
evelist.sort()
DF['Events'][ind] = evelist
DF['numEvents'][ind] = len(evelist)
DF['MPtd'][ind] = _trimDict(row, 'MPtd', evelist)
DF['MPfd'][ind] = _trimDict(row, 'MPfd', evelist)
DF['Stats'][ind] = _trimDict(row, 'Stats', evelist)
DF['Channels'][ind] = _trimDict(row, 'Channels', evelist)
# only keep subspaces that meet min req, dont renumber
DF = DF[[len(x) >= minEvents for x in DF.Events]]
# DF.reset_index(drop=True, inplace=True)
return DF | 77fb59b0e385d51d06fac1ff64ba12331d514d1d | 1,229 |
def concatenate_constraints(original_set, additional_set):
"""
Method for concatenating sets of linear constraints.
original_set and additional_set are both tuples of
for (C, b, n_eq). Output is a concatenated tuple of
same form.
All equality constraints are always kept on top.
"""
C_org, b_org, n_org = original_set
C_add, b_add, n_add = additional_set
if n_add > 0:
C_out = np.insert(C_org, n_org, C_add[:n_add, :], axis=0)
C_out = np.concatenate((C_out, C_add[n_add:, :]))
b_out = np.insert(b_org, n_org, b_add[:n_add])
b_out = np.concatenate((b_out, b_add[n_add:]))
else:
C_out = np.concatenate((C_org, C_add))
b_out = np.concatenate((b_org, b_add))
n_out = n_org + n_add
return C_out, b_out, n_out | 6b0cc1c75d00ae7b3737638c45a21bc8c609ddb1 | 1,230 |
import signal
def _isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL | 10b814bba12c04cbc6fec08c7783581876f56b6b | 1,231 |
import scipy
def downsampling(conversion_rate,data,fs):
"""
ダウンサンプリングを行う.
入力として,変換レートとデータとサンプリング周波数.
アップサンプリング後のデータとサンプリング周波数を返す.
"""
# 間引くサンプル数を決める
decimationSampleNum = conversion_rate-1
# FIRフィルタの用意をする
nyqF = (fs/conversion_rate)/2.0 # 変換後のナイキスト周波数
cF = (fs/conversion_rate/2.0-500.)/nyqF # カットオフ周波数を設定(変換前のナイキスト周波数より少し下を設定)
taps = 511 # フィルタ係数(奇数じゃないとだめ)
b = scipy.signal.firwin(taps, cF) # LPFを用意
#フィルタリング
data = scipy.signal.lfilter(b,1,data)
#間引き処理
downData = []
for i in range(0,len(data),decimationSampleNum+1):
downData.append(data[i])
return (downData,fs/conversion_rate) | add6768d32cc7675eaecf1e37c5d901f1244702a | 1,232 |
from typing import Union
def get_client_cache_key(
request_or_attempt: Union[HttpRequest, AccessBase], credentials: dict = None
) -> str:
"""
Build cache key name from request or AccessAttempt object.
:param request_or_attempt: HttpRequest or AccessAttempt object
:param credentials: credentials containing user information
:return cache_key: Hash key that is usable for Django cache backends
"""
if isinstance(request_or_attempt, AccessBase):
username = request_or_attempt.username
ip_address = request_or_attempt.ip_address
user_agent = request_or_attempt.user_agent
else:
username = get_client_username(request_or_attempt, credentials)
ip_address = get_client_ip_address(request_or_attempt)
user_agent = get_client_user_agent(request_or_attempt)
filter_kwargs_list = get_client_parameters(username, ip_address, user_agent)
return make_cache_key_list(filter_kwargs_list) | 8d9a128b326a8ab7c320f73f49a313f30d6cd268 | 1,233 |
def loadMaterials(matFile):
"""
Loads materials into Tom's code from external file of all applicable materials.
These are returned as a dictionary.
"""
mats = {}
name, no, ne, lto, lte, mtype = np.loadtxt(matFile, dtype=np.str, unpack=True)
no = np.array(list(map(np.float, no)))
ne = np.array(list(map(np.float, ne)))
lto = 1.0e-4 * np.array(list(map(np.float, lto)))
lte = 1.0e-4 * np.array(list(map(np.float, lte)))
for (i,n) in enumerate(name):
mats[n] = tm.material(no[i], ne[i], lto[i], lte[i], n, mtype[i])
return mats | 650b04a27741777e5e344696e123d4fd669a5b28 | 1,234 |
def prepend_with_baseurl(files, base_url):
"""prepend url to beginning of each file
Parameters
------
files (list): list of files
base_url (str): base url
Returns
------
list: a list of files with base url pre-pended
"""
return [base_url + file for file in files] | 4c29b3e9230239c1ff8856c707253608ce2503cd | 1,235 |
def _loc(df, start, stop, include_right_boundary=True):
"""
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> _loc(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> _loc(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> _loc(df, 1, 3, include_right_boundary=False)
x
1 10
2 20
2 30
"""
result = df.loc[start:stop]
if not include_right_boundary:
right_index = result.index.get_slice_bound(stop, 'left', 'loc')
result = result.iloc[:right_index]
return result | 1cc3c2b507ed18d18659fb097765ab450972c05a | 1,237 |
def compare_system_and_attributes_faulty_systems(self):
"""compare systems and associated attributes"""
# compare - systems / attributes
self.assertTrue(System.objects.filter(system_name='system_csv_31_001').exists())
self.assertTrue(System.objects.filter(system_name='system_csv_31_003').exists())
self.assertTrue(System.objects.filter(system_name='system_csv_31_006').exists())
# compare - systems / attributes
self.assertEqual(
System.objects.get(system_name='system_csv_31_001').analysisstatus,
Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_003').analysisstatus,
Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_006').analysisstatus,
Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_001').systemstatus,
Systemstatus.objects.get(systemstatus_name='systemstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_003').systemstatus,
Systemstatus.objects.get(systemstatus_name='systemstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_006').systemstatus,
Systemstatus.objects.get(systemstatus_name='systemstatus_1'),
)
# return to test function
return self | 459b853eba3a2705450ac9a33fe14844940bf4c8 | 1,238 |
def get_regions(contig,enzymes):
"""return loci with start and end locations"""
out_sites = []
enz_1 = [enz for enz in Restriction.AllEnzymes if "%s"%enz == enzymes[0]][0]
enz_2 = [enz for enz in Restriction.AllEnzymes if "%s"%enz == enzymes[1]][0]
enz_1_sites = enz_1.search(contig.seq)
enz_2_sites = enz_2.search(contig.seq)
combined_sites = sorted(enz_1_sites + enz_2_sites)
for i in range(len(combined_sites)):
site_A = combined_sites[i]
try:
site_B = combined_sites[i+1]
except IndexError:
break
if site_B - site_A < 30:
continue
if site_A in enz_1_sites and site_B in enz_2_sites:
out_sites.append((site_A + 1, site_B - len(enz_2.site)))
elif site_A in enz_2_sites and site_B in enz_1_sites:
out_sites.append((site_A + 1, site_B - len(enz_1.site)))
return out_sites | b34cab04e0b790b01418555c74c4d810b7184e47 | 1,239 |
def getHighContrast(j17, j18, d17, d18):
"""
contrast enhancement through stacking
"""
summer = j17 + j18
summer = summer / np.amax(summer)
winter = d17 + d18
winter = winter / np.amax(winter)
diff = winter * summer
return diff | cd462aac5c0568f84d64f3020718ec601063044f | 1,240 |
def get_bounding_box(dataframe, dataIdentifier):
"""Returns the rectangle in a format (min_lat, max_lat, min_lon, max_lon)
which bounds all the points of the ´dataframe´.
Parameters
----------
dataframe : pandas.DataFrame
the dataframe with the data
dataIdentifier : DataIdentifier
the identifier of the dataframe to be used
"""
b_box = (getattr(dataframe, dataIdentifier.latitude).min(),
getattr(dataframe, dataIdentifier.latitude).max(),
getattr(dataframe, dataIdentifier.longitude).min(),
getattr(dataframe, dataIdentifier.longitude).max())
return b_box | 6989118af8db36cc38fd670f5cd7506859d2150e | 1,241 |
def get_file_download_response(dbfile):
"""
Create the HttpResponse for serving a file.
The file is not read our output - instead, by setting `X-Accel-Redirect`-
header, the web server (nginx) directly serves the file.
"""
mimetype = dbfile.mimeType
response = HttpResponse(content_type=mimetype)
response["Content-Disposition"] = "inline; filename={0}".format(
to_safe_name(dbfile.name)
)
response['X-Accel-Redirect'] = "/{0}".format(dbfile.path)
return response | 93f7e57daaec6a11e5241682ba976e8d68a91acf | 1,242 |
import time
def keyWait():
"""Waits until the user presses a key.
Then returns a L{KeyDown} event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
@rtype: L{KeyDown}
"""
while 1:
for event in get():
if event.type == 'KEYDOWN':
return event
if event.type == 'QUIT':
# convert QUIT into alt+F4
return KeyDown('F4', '', True, False, True, False, False)
time.sleep(.001) | 60ee6ad29c215585aef03237a23f15581deb8f5e | 1,243 |
from datetime import datetime
def create_comentarios_instancia(id_instancia):
"""
@retorna un ok en caso de que se halla ejecutado la operacion
@except status 500 en caso de presentar algun error
"""
if request.method == 'POST':
try:
values = json.loads( request.data.decode('8859') )
mensaje = values['com_mensaje']
autor = values['com_usuario']
fecha = datetime.today()
comentario = comentarios_instancia_curso(instancias_curso_id = id_instancia , mensaje = mensaje , autor = autor, fecha = fecha)
session.add(comentario)
session.commit()
except Exception, e:
session.rollback()
return "Operacion No se pudo llevar a cabo", 500
return "ok"
else:
return "Operacion No se pudo llevar a cabo", 500 | 58a49f4c76976bf0a13f07f6e6de73f358f34e4a | 1,244 |
async def osfrog(msg, mobj):
"""
Patch 7.02: help string was removed from Captain's Mode
"""
osfrogs = [
"Added Monkey King to the game",
"Reduced Lone Druid's respawn talent -50s to -40s",
]
return await client.send_message(mobj.channel, choice(osfrogs)) | f1b5907cad42d7e6d6021e447ab8cd6dd91429e5 | 1,246 |
def _add_normalizing_vector_point(mesh, minpt, maxpt):
"""
This function allows you to visualize all meshes in their size relative to each other
It is a quick simple hack: by adding 2 vector points at the same x coordinates at the
extreme left and extreme right of the largest .stl mesh, all the meshes are displayed
with the same scale.
input: [mesh], minpoint coordinates, maxpoint coordinates
output: [mesh] with 2 added coordinate points
"""
newmesh = Mesh(np.zeros(mesh.vectors.shape[0]+2, dtype=Mesh.dtype))
# newmesh.vectors = np.vstack([mesh.vectors,
# np.array([ [[0,maxpt,0], [0,maxpt,0], [0,maxpt,0]],
# [[0,minpt,0], [0,minpt,0], [0,minpt,0]] ], float) ])
newmesh.vectors = np.vstack([mesh.vectors,
np.array([ [[0,0,maxpt], [0,0,maxpt], [0,0,maxpt]],
[[0,0,minpt], [0,0,minpt], [0,0,minpt]] ], float) ])
return newmesh | a60e1f0dd4bc6c60e40096cb4412b47a5a3d139a | 1,247 |
import numpy
def radii_ratio(collection):
"""
The Flaherty & Crumplin (1992) index, OS_3 in Altman (1998).
The ratio of the radius of the equi-areal circle to the radius of the MBC
"""
ga = _cast(collection)
r_eac = numpy.sqrt(pygeos.area(ga) / numpy.pi)
r_mbc = pygeos.minimum_bounding_radius(ga)
return r_eac / r_mbc | 16008df64f999b615f855e92f727638813434e98 | 1,248 |
from datetime import datetime
def create_jwt(project_id, private_key_file, algorithm):
"""Create a JWT (https://jwt.io) to establish an MQTT connection."""
token = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'aud': project_id
}
with open(private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm) | e365cc67a3587a64b38276ce95e8e0c389e54314 | 1,249 |
def games(engine1, engine2, number_of_games):
"""Let engine1 and engine2 play several games against each other.
Each begin every second game."""
engine1_wins = 0
engine2_wins = 0
draws = 0
for n in range(number_of_games):
if n % 2:
result = game(engine1, engine2, True)
else:
result = game(engine1, engine2, False)
if result == "engine1":
engine1_wins += 1
elif result == "engine2":
engine2_wins += 1
else:
draws += 1
return ("engine1 wins: " + str(engine1_wins) +
" engine2 wins: " + str(engine2_wins) + " draws: " + str(draws)) | f520a08214d1ad063f747b01582b2dbfc94d5d9e | 1,250 |
def tissue2line(data, line=None):
"""tissue2line
Project tissue probability maps to the line by calculating the probability of each tissue type in each voxel of the 16x720 beam and then average these to get a 1x720 line. Discrete tissues are assigned by means of the highest probability of a particular tissue type.
Parameters
----------
data: list,numpy.ndarray,str
for tissue data: list of three numpy array/nifti images/strings describing the probability of white matter/gray matter and CSF
line: str,nibabel.Nifti1Image,numpy.ndarray
used for the direction of the line and should have the same dimensions as `data`. Generally this is the output from create_line_from_slice
Returns
----------
numpy.ndarray
(1,720) array of your `data` in the line
"""
# load in reference line data
if isinstance(line, str):
ref = nb.load(line).get_fdata()
elif isinstance(line, nb.Nifti1Image):
ref = line.get_fdata()
elif isinstance(line, np.ndarray):
ref = line
else:
raise ValueError("Unknown input type for line; should be a string, nifti-image, or numpy array")
if isinstance(data, list):
# we have receive a list, assuming tissue probability maps.
if len(data) > 3:
raise ValueError(f'Data contains {len(data)} items, this should be three: 1) WM prob, 2) GM prob, 3) CSF prob')
if isinstance(data[0], str):
input = [nb.load(i).get_fdata() for i in data]
elif isinstance(data[0], nb.Nifti1Image):
input = [i.get_fdata() for i in data]
elif isinstance(data[0], np.ndarray):
input = data
# remove existing 4th dimension
input = [np.squeeze(i, axis=3) for i in input if len(i.shape) == 4]
for i in input:
if i.shape != ref.shape:
raise ValueError(f"Dimensions of line [{ref.shape}] do not match dimension of input seg [{i.shape}]")
# put wm/gm/csf in three channels of a numpy array
prob_stack = np.dstack([input[0],input[1],input[2]])
prob_stack_avg = np.average(prob_stack, axis=1)
# normalize averages between 0-1
scaler = MinMaxScaler()
scaler.fit(prob_stack_avg)
avg_norm = scaler.transform(prob_stack_avg)
output = []
lut = {'wm':2,'gm':1,'csf':0}
# avg_norm has 3 columns; 1st = WM, 2nd = GM, 3rd = CSF
for i,r in enumerate(avg_norm):
max_val = np.amax(r)
# check tissue type only if non-zero value. If all probabilities are 0 is should be set to zero regardless
if max_val == 0:
output.append(lut['csf'])
else:
# make list of each row for nicer indexing
idx = list(r).index(max_val)
if idx == 0:
# type = 'wm' = '1' in nighres segmentation
output.append(lut['wm'])
elif idx == 1:
# type = 'gm' = '2' in nighres segmentation
output.append(lut['gm'])
elif idx == 2:
# type = 'csf' = '0' in nighres segmentation
output.append(lut['csf'])
output = np.array(output)[:,np.newaxis]
return output | 3966f789e1093e11e4b31deda0f7d43f753007b0 | 1,251 |
def get_version(pyngrok_config=None):
"""
Get a tuple with the ``ngrok`` and ``pyngrok`` versions.
:param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,
overriding :func:`~pyngrok.conf.get_default()`.
:type pyngrok_config: PyngrokConfig, optional
:return: A tuple of ``(ngrok_version, pyngrok_version)``.
:rtype: tuple
"""
if pyngrok_config is None:
pyngrok_config = conf.get_default()
ngrok_version = process.capture_run_process(pyngrok_config.ngrok_path, ["--version"]).split("version ")[1]
return ngrok_version, __version__ | 256216926a6c91a8000f8b823202cde576af3a67 | 1,252 |
def stat_cleaner(stat: str) -> int:
"""Cleans and converts single stat.
Used for the tweets, followers, following, and likes count sections.
Args:
stat: Stat to be cleaned.
Returns:
A stat with commas removed and converted to int.
"""
return int(stat.replace(",", "")) | cb6b6035ab21871ca5c00d5d39d9efe87e0acc89 | 1,253 |