content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def change_log_root_key():
"""Root key of an entity group with change log."""
# Bump ID to rebuild the change log from *History entities.
return ndb.Key('AuthDBLog', 'v1') | 465ab46b7e884d7f5e217861d6c451c491e04f07 | 100 |
import numpy
def load_file(filename):
"""Loads a TESS *spoc* FITS file and returns TIME, PDCSAP_FLUX"""
hdu = fits.open(filename)
time = hdu[1].data['TIME']
flux = hdu[1].data['PDCSAP_FLUX']
flux[flux == 0] = numpy.nan
return time, flux | 50c777903b26c658c828346af53e3a659d1eb46b | 101 |
def create_insight_id_extension(
insight_id_value: str, insight_system: str
) -> Extension:
"""Creates an extension for an insight-id with a valueIdentifier
The insight id extension is defined in the IG at:
https://alvearie.io/alvearie-fhir-ig/StructureDefinition-insight-id.html
Args:
insight_id_value - the value of the insight id
insight_system - urn for the system used to create the insight
Returns: The insight id extension
Example:
>>> ext = create_insight_id_extension("insight-1", "urn:id:alvearie.io/patterns/QuickUMLS_v1.4.0")
>>> print(ext.json(indent=2))
{
"url": "http://ibm.com/fhir/cdm/StructureDefinition/insight-id",
"valueIdentifier": {
"system": "urn:id:alvearie.io/patterns/QuickUMLS_v1.4.0",
"value": "insight-1"
}
}
"""
insight_id_ext = Extension.construct()
insight_id_ext.url = alvearie_ext_url.INSIGHT_ID_URL
insight_id = Identifier.construct()
insight_id.system = insight_system
insight_id.value = insight_id_value
insight_id_ext.valueIdentifier = insight_id
return insight_id_ext | 72b000cdc3903308ca8692c815e562511fd50b91 | 102 |
def ReadNotifyResponseHeader(payload_size, data_type, data_count, sid, ioid):
"""
Construct a ``MessageHeader`` for a ReadNotifyResponse command.
Read value of a channel. Sent over TCP.
Parameters
----------
payload_size : integer
Size of DBR formatted data in payload.
data_type : integer
Payload format.
data_count : integer
Payload element count.
sid : integer
SID of the channel.
ioid : integer
IOID of this operation.
"""
struct_args = (15, payload_size, data_type, data_count, sid, ioid)
# If payload_size or data_count cannot fit into a 16-bit integer, use the
# extended header.
return (ExtendedMessageHeader(*struct_args)
if any((payload_size > 0xffff, data_count > 0xffff, ))
else MessageHeader(*struct_args)) | 5d088416f5fca6e0aeb3ef1f965ca7c00d8a6c90 | 103 |
def substitute_T5_cols(c, cols, nlu_identifier=True):
"""
rename cols with base name either <t5> or if not unique <t5_<task>>
"""
new_cols = {}
new_base_name = 't5' if nlu_identifier=='UNIQUE' else f't5_{nlu_identifier}'
for col in cols :
if '_results' in col : new_cols[col] = new_base_name
elif '_beginnings' in col : new_cols[col] = f'{new_base_name}_begin'
elif '_endings' in col : new_cols[col] = f'{new_base_name}_end'
elif '_embeddings' in col : continue # Token never stores Embeddings new_cols[col] = f'{new_base_name}_embedding'
elif '_types' in col : continue # new_cols[col] = f'{new_base_name}_type'
elif 'meta' in col:
if '_sentence' in col : new_cols[col] = f'{new_base_name}_origin_sentence' # maps to which sentence token comes from
else : logger.info(f'Dropping unmatched metadata_col={col} for c={c}')
# new_cols[col]= f"{new_base_name}_confidence"
return new_cols | 2820706faff786011abbd896551026c65bb0d848 | 104 |
def _get_trigger_func(name, trigger_name):
"""
Given a valid vulnerability name, get the trigger function corresponding
to the vulnerability name and trigger name.
If the trigger function isn't found, raise NotFound.
"""
try:
return get_trigger(name, trigger_name)
except AttributeError:
raise NotFound() | a93207cd7319aa10bfe853fe21bbb04a8c22f3c2 | 105 |
def _get_rotated_bounding_box(size, quaternion):
"""Calculates the bounding box of a rotated 3D box.
Args:
size: An array of length 3 specifying the half-lengths of a box.
quaternion: A unit quaternion specifying the box's orientation.
Returns:
An array of length 3 specifying the half-lengths of the bounding box of
the rotated box.
"""
corners = ((size[0], size[1], size[2]),
(size[0], size[1], -size[2]),
(size[0], -size[1], size[2]),
(-size[0], size[1], size[2]))
rotated_corners = tuple(
transformations.quat_rotate(quaternion, corner) for corner in corners)
return np.amax(np.abs(rotated_corners), axis=0) | 7c70ea23051dacdf2447a252488456c5b4d23901 | 106 |
def Cos(
a: float = 1.,
b: float = 1.,
c: float = 0.) -> InternalLayer:
"""Affine transform of `Cos` nonlinearity, i.e. `a cos(b*x + c)`.
Args:
a: output scale.
b: input scale.
c: input phase shift.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return Sin(a=a, b=b, c=c + np.pi / 2) | 7327c68bf5a182cf90fa1d0b0280128c99f323b3 | 107 |
def key_make_model(chip):
""" Given a chip, return make and model string.
Make and model are extracted from chip.misc using the keys "make" and
"model". If they are missing it returns None for that value. If misc
missing or not a dictionary, (None, None) is returned.
Args:
chip: A chip named tuple
Returns:
string: "make_model" from the chip. The string "None" may be returned
for one of the positions (or both) if it is missing in the chip.
"""
output = [None, None]
# Ensure we have a misc dictionary
if hasattr(chip, "misc"):
misc = chip.misc
if hasattr(misc, "get"):
output[0] = misc.get("make", None)
output[1] = misc.get("model", None)
return tuple_to_string(output) | 8729d95537700be1a468411aa1b51b094a4bb34f | 108 |
from typing import Sequence
def alternating_epsilons_actor_core(
policy_network: EpsilonPolicy, epsilons: Sequence[float],
) -> actor_core_lib.ActorCore[EpsilonActorState, None]:
"""Returns actor components for alternating epsilon exploration.
Args:
policy_network: A feedforward action selecting function.
epsilons: epsilons to alternate per-episode for epsilon-greedy exploration.
Returns:
A feedforward policy.
"""
epsilons = jnp.array(epsilons)
def apply_and_sample(params: networks_lib.Params,
observation: networks_lib.Observation,
state: EpsilonActorState):
random_key, key = jax.random.split(state.rng)
actions = policy_network(params, key, observation, state.epsilon)
return (actions.astype(jnp.int32),
EpsilonActorState(rng=random_key, epsilon=state.epsilon))
def policy_init(random_key: networks_lib.PRNGKey):
random_key, key = jax.random.split(random_key)
epsilon = jax.random.choice(key, epsilons)
return EpsilonActorState(rng=random_key, epsilon=epsilon)
return actor_core_lib.ActorCore(
init=policy_init, select_action=apply_and_sample,
get_extras=lambda _: None) | 9a247d5e98cef7e653a074cb7007f5823ec686c7 | 109 |
from typing import List
from typing import Optional
from typing import Dict
def get_keywords(
current_user: models.User = Depends(deps.get_current_active_user),
controller_client: ControllerClient = Depends(deps.get_controller_client),
labels: List = Depends(deps.get_personal_labels),
q: Optional[str] = Query(None, description="query keywords"),
offset: int = Query(0),
limit: Optional[int] = Query(None),
) -> Dict:
"""
Get keywords and aliases
"""
filter_f = partial(filter_keyword, q) if q else None
items = list(labels_to_keywords(labels, filter_f))
if settings.REVERSE_KEYWORDS_OUTPUT:
items.reverse()
res = {"total": len(items), "items": paginate(items, offset, limit)}
return {"result": res} | c835c55464e16debdfcb7c79f63cd7501a9a6e76 | 110 |
def _compile_unit(i):
"""Append gas to unit and update CO2e for pint/iam-unit compatibility"""
if " equivalent" in i["unit"]:
return i["unit"].replace("CO2 equivalent", "CO2e")
if i["unit"] in ["kt", "t"]:
return " ".join([i["unit"], i["gas"]])
else:
return i["unit"] | 0692167e95159d08b306a241baf4eadefdc29b35 | 111 |
def get_fdfs_url(file):
"""
上传文件或图片到FastDFS
:param file: 文件或图片对象,二进制数据或本地文件
:return: 文件或图片在FastDFS中的url
"""
# 创建FastDFS连接对象
fdfs_client = Fdfs_client(settings.FASTDFS_CONF_PATH)
"""
client.upload_by_filename(文件名),
client.upload_by_buffer(文件bytes数据)
"""
# 上传文件或图片到fastDFS
if isinstance(file, InMemoryUploadedFile):
result = fdfs_client.upload_by_buffer(file.read())
else:
result = fdfs_client.upload_by_filename(file)
"""
result = {
'Group name': 'group1', # FastDFS服务端Storage组名
'Remote file_id': 'group1/M00/00/00/wKgThF0LMsmATQGSAAExf6lt6Ck10.jpeg', # 文件存储的位置(索引),可用于下载
'Status': 'Upload successed.', # 文件上传结果反馈
'Local file name': '/home/python/Desktop/upload_Images/02.jpeg', # 所上传文件的真实路径
'Uploaded size': '76.00KB', # 文件大小
'Storage IP': '192.168.19.132'} # FastDFS服务端Storage的IP
"""
# 判断是否上传成功,result为一个字典
if result['Status'] != 'Upload successed.':
return Response(status=403)
# 获取文件或图片上传后的路径
file_url = result['Remote file_id']
return file_url | ba23e4f416b4b418706b3253cb26ff63b8e62fc6 | 112 |
from typing import Any
from typing import List
def get_all_edge_detects(clip: vs.VideoNode, **kwargs: Any) -> List[vs.VideoNode]:
"""Allows you to get all masks inheriting from EdgeDetect.
Args:
clip (vs.VideoNode):
Source clip.
kwargs:
Arguments passed to EdgeDetect().get_mask
Returns:
List[vs.VideoNode]: List of masks.
Example:
from vardefunc.mask import get_all_edge_detect
clip.set_output(0)
for i, mask in enumerate(get_all_edge_detect(get_y(clip)), start=1):
mask.set_output(i)
"""
masks = [
edge_detect().get_mask(clip, **kwargs).text.Text(edge_detect.__name__) # type: ignore
for edge_detect in EdgeDetect.__subclasses__()
]
return masks | 7324568af5e6ff27f8f7d14b27a25e19666903a4 | 113 |
from datetime import datetime
import pytz
def get_ustz(localdt, timezone):
"""
Returns the timezone associated to a local datetime and an IANA timezone.
There are two common timezone conventions. One is the Olson/IANA and
the other is the Microsoft convention. For example, the closest IANA
timezone for Boston, MA is America/New_York. More commonly, this is
known as Eastern time zone. The goal of this function is to return the
common name for a timezone in the contiguous US.
Note that Arizona has its own IANA timezone and does not observe daylight
savings. So depending on the time of year, the offset for Arizona will
correspond to either Pacific or Mountain time.
Parameters
----------
localdt : datetime
The local datetime instance.
timezone : str
The IANA timezone associated with `localdt`. This should be a timezone
for the contiguous US.
use_noon : bool
If `True`, ignore the time for the incoming datetime and use noon
instead. This is nice for quick checks, but undesirable when accurate
timezone identification is needed late at night or early morning
Returns
------
tz : str
The common name for the timezone. This will be one of Pacific, Mountain,
Central, or Eastern.
"""
# Use noon to guarantee that we have the same day in each timezone.
# This is desirable in the sense that we don't want someone's tweet jumping
# from Eastern to Central, for example, at the end of daylight savings time.
localdt = datetime.datetime(localdt.year, localdt.month, localdt.day, 12)
timezone = pytz.timezone(timezone)
dt = timezone.localize(localdt)
for tz, tz_ref in TIMEZONES:
dt_new = dt.astimezone(tz_ref)
if dt_new.utcoffset() == dt.utcoffset():
return tz | c08d7407a8025522baf559becb649bde7aad3fa0 | 114 |
def plot_rolling_sharpe(returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6,
**kwargs):
"""
Plots the rolling Sharpe ratio versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_window : int, optional
The days window over which to compute the sharpe ratio.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
rolling_sharpe_ts = timeseries.rolling_sharpe(
returns, rolling_window)
return rolling_sharpe_ts | e9a3ebcbfc46c403c82cf2b65502e30f3526ee15 | 115 |
def ResolveNamespace(namespace):
"""Validate app namespace, providing a default.
If the argument is None, namespace_manager.get_namespace() is substituted.
Args:
namespace: The namespace argument value to be validated.
Returns:
The value of namespace, or the substituted default.
Always a non-empty string or None.
Raises:
BadArgumentError if the value is not a string.
"""
if namespace is None:
namespace = namespace_manager.get_namespace()
else:
namespace_manager.validate_namespace(
namespace, datastore_errors.BadArgumentError)
return namespace | c84e30250a7b1eed200de0bd9a59e77df05829e1 | 116 |
import math
def Calculo_por_etapas(Diccionario):
"""Calculo de la hornilla por etapas"""
Lista_Contenido=[]
Lista_columnas=[]
#Normalización de la capacidad de la hornilla
#Mem_dias=float(Diccionario['¿Cada cuantos días quiere moler? (días)'])
#Mem_Temp=Normalizar_Capacidad(float(Diccionario['Capacidad estimada de la hornilla']),Mem_dias)
#print(float(Diccionario['Capacidad estimada de la hornilla']))
#print(Mem_Temp)
Etapas=int(float(Diccionario['Etapas']))#Mem_Temp[1]
#Etapas=12
#Saturador "minimo son dos etapas"
if (Etapas>2):
Factor_Division=Etapas-2
else:
Factor_Division=2
Etapas=2
#Caracteristicas de las celdas de cada columna (Lista_columnas)
#Fila 0 concentración de solidos inicial
#Fila 1 Concentración de solidos final
#Fila 2 Concentración promedio
#Fila 3 Masa de jugo de entrada
#Fila 4 Calor Especifico P Cte jugo
#Fila 5 Densidad del Jugo
#Fila 6 Volumen de jugo kg
#Fila 7 Volumen de jugo en L
#Fila 8 Temperatura de Entrada
#Fila 9 Temperatura de Salida
#Fila 10 Entalpia de Vaporización
#Fila 11 Masa de Agua a Evaporar
#Fila 12 Calor Nece Calc por Etapa
for i in range(13):
for j in range (Etapas):
Lista_columnas.append(float(i+j))
Lista_Contenido.append(Lista_columnas)
Lista_columnas=[]
Lista_Contenido[0][0]=float(Diccionario['CSS del jugo pos-evaporación']) #Concentracion_solidos_inicial (CSS02)
Lista_Contenido[1][0]=float(Diccionario['CSS panela']) #Concentracion_solidos_final (CSSF1)
Lista_Contenido[0][Etapas-1]=float(Diccionario['CSS del jugo de Caña']) #Concentracion_solidos_inicial (CSS01)
Lista_Contenido[1][Etapas-1]=float(Diccionario['CSS del jugo clarificado']) #Concentracion_solidos_final (CSSF1)
if(Etapas>2):
ite=0
for i in range(Etapas-2,0,-1):
Lista_Contenido[0][i]=Lista_Contenido[1][i+1]
if(ite==0):
Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][i])/Factor_Division)+Lista_Contenido[0][i]
ite=ite+1
else:
Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][Etapas-2])/Factor_Division)+Lista_Contenido[0][i]
for i in range(Etapas-1,-1,-1):
#Concentración promedio=(Concentracion_solidos_inicial+Concentracion_solidos_final)/2
Lista_Contenido[2][i]=(Lista_Contenido[0][i]+Lista_Contenido[1][i])/2
if(i==Etapas-1):
#Masa de jugo de entrada
Lista_Contenido[3][i]=float(Diccionario['A clarificación'])
else:
#Masa de jugo de entrada=(Masa de jugo etapa anterior*CCS inicial etapa anterior)/CCS Final etapa anterior
Lista_Contenido[3][i]=Lista_Contenido[3][i+1]*Lista_Contenido[0][i+1]/Lista_Contenido[1][i+1]
#Calor_Especifico_P_Cte_jugo=4.18*(1-(0.006*Concetracion_promedio))
Lista_Contenido[4][i]=4.18*(1-(0.006*Lista_Contenido[2][i]))
#Densidad_del_Jugo=997.39+(4.46*Concetracion_promedio)
Lista_Contenido[5][i]=997.39+(4.46*Lista_Contenido[2][i])
#Volumen_jugo=Masa_jugo_de_entrada/Densidad_del_Jugo
Lista_Contenido[6][i]=Lista_Contenido[3][i]/Lista_Contenido[5][i]
#Volumen_jugo_L=Volumen_jugo*1000
Lista_Contenido[7][i]=Lista_Contenido[6][i]*1000.0
if(i==Etapas-1):
#Temperatura_Entrada=Temperatura ambiente
Lista_Contenido[8][i]=float(Diccionario['Temperatura del ambiente'])
else:
#Temperatura_Entrada=Temperatura_ebullición_agua+0.2209*math.exp(0.0557*Concentracion_solidos_inicial)
Lista_Contenido[8][i]=Lista_Contenido[9][i+1]
#Temperatura_Salida=G37+0.2209*math.exp(0.0557*Concentracion_solidos_final)
Lista_Contenido[9][i]=float(Diccionario['Temperatura de ebullición del agua'])+0.2209*math.exp(0.0557*Lista_Contenido[1][i])
#Entalpia_Vaporizacion=(2492.9-(2.0523*Temperatura_Entrada))-(0.0030752*(Temperatura_Entrada**2))
Lista_Contenido[10][i]=(2492.9-(2.0523*Lista_Contenido[8][i]))-(0.0030752*(Lista_Contenido[8][i]**2))
#Masa_Agua_Evaporar=Masa_jugo_de_entrada-(Masa_jugo_de_entrada*Concentracion_solidos_inicial/Concentracion_solidos_final)
Lista_Contenido[11][i]=Lista_Contenido[3][i]-(Lista_Contenido[3][i]*Lista_Contenido[0][i]/Lista_Contenido[1][i])
#Calor_por_Etapa=(Masa_jugo_de_entrada*Calor_Especifico_P_Cte_jugo*(Temperatura_Salida-Temperatura_Entrada)+Masa_Agua_Evaporar*Entalpia_Vaporizacion)/3600
Lista_Contenido[12][i]=(Lista_Contenido[3][i]*Lista_Contenido[4][i]*(Lista_Contenido[9][i]-Lista_Contenido[8][i])+Lista_Contenido[11][i]*Lista_Contenido[10][i])/3600.0
#Fijar decimales en 3
for j in range (13):
for i in range (Etapas):
Lista_Contenido[j][i]=round(Lista_Contenido[j][i],3)
#Cambiar la salida o posicion de la paila de punteo a la paila 3 o 4
Lista_contenido_2=[]
L_aux=[]
for i in Lista_Contenido:
inio=3
if (Etapas!=7):
L_aux.append(i[2])
L_aux.append(i[1])
L_aux.append(i[0])
inio=3
else:
L_aux.append(i[3])
L_aux.append(i[2])
L_aux.append(i[1])
L_aux.append(i[0])
inio=4
for t in range(inio,len(i)):
L_aux.append(i[t])
Lista_contenido_2.append(L_aux)
L_aux=[]
Lista_Contenido=Lista_contenido_2
Etiquetas=[
'Concentracion de Solidos Inicial [ºBrix]',
'Concentracion de Solidos Final [ºBrix]',
'Concentracion de Solidos Promedio [ºBrix]',
'Masa de Jugo Entrada [Kg]',
'Calor Especifico P Cte jugo [kJ/Kg °C]',
'Densidad del Jugo [kg/m3]',
'Volumen de jugo [m^3/kg]',
'Volumen de jugo [L]',
'Temperatura de Entrada [ºC]',
'Temperatura de Salida [ºC]',
'Entalpia de Vaporización [kJ/kg]',
'Masa de Agua a Evaporar [kg]',
'Calor Nece Calc por Etapa [kW]'
]
Dict_aux=dict(zip(Etiquetas,Lista_Contenido))
Dict_aux_2=dict(zip(['Etapas'],[Etapas]))
Dict_aux.update(Dict_aux_2)
return Dict_aux | c3b531e1b3fbb3491a9d7a5521c216e5ce5c5b38 | 117 |
def venus_equ_proportional_minute(x, y, e, R):
"""
Venus equation proportional minute
:param x: true argument (av) in degree
:param y: mean center (cm) in degree
:param e: eccentricity
:param R: radius of the epicycle
:return: minutes proportional in degree
"""
return utils.minuta_proportionalia(x, R, e, y) | 5e1dfc8bdd00cbced144d8d4fdf3c62bfe12346b | 118 |
def e2_cond(p, m, sigma, alpha, mu):
"""
This function is dependent from the gamma function.
Conditional mean of the square of the normal distribution.
See the article for more informations.
Parameters
----------
p : float
Proportion of persistent species.
m : float
Mean of the persistent species.
sigma : float
Root square mean of the persistent species.
alpha : float
Parameter of the model - Interaction strength.
mu : float
Parameter of the model - Interaction drift.
Returns
-------
float
Conditional mean associated to the system.
"""
# The value delta is similar in the article.
delta = alpha/(sigma*np.sqrt(p))*(1+mu*p*m)
p_1 = np.exp(-delta**2/2)
p_2 = 1-stats.norm.cdf(-delta)
return (1/np.sqrt(2*np.pi))*-delta*p_1/p_2+1 | a39dfe45e94dbb33c99ffa1c21b7cd0eb2032fd9 | 119 |
from collections import defaultdict
def concline_generator(matches, idxs, df, metadata,
add_meta, category, fname, preserve_case=False):
"""
Get all conclines
:param matches: a list of formatted matches
:param idxs: their (sent, word) idx
"""
conc_res = []
# potential speedup: turn idxs into dict
mdict = defaultdict(list)
# if remaking idxs here, don't need to do it earlier
idxs = list(matches.index)
for mid, (s, i) in zip(matches, idxs):
#for s, i in matches:
mdict[s].append((i, mid))
# shorten df to just relevant sents to save lookup time
df = df.loc[list(mdict.keys())]
# don't look up the same sentence multiple times
for s, tup in sorted(mdict.items()):
sent = df.loc[s]
if not preserve_case:
sent = sent.str.lower()
meta = metadata[s]
sname = meta.get('speaker', 'none')
for i, mid in tup:
if not preserve_case:
mid = mid.lower()
ix = '%d,%d' % (s, i)
start = ' '.join(sent.loc[:i-1].values)
end = ' '.join(sent.loc[i+1:].values)
lin = [ix, category, fname, sname, start, mid, end]
if add_meta:
for k, v in sorted(meta.items()):
if k in ['speaker', 'parse', 'sent_id']:
continue
if isinstance(add_meta, list):
if k in add_meta:
lin.append(v)
elif add_meta is True:
lin.append(v)
conc_res.append(lin)
return conc_res | b0f9cc9039f78996b38ed87f5faf3b725226a7dd | 120 |
import tqdm
def match_detections(predicted_data, gt_data, min_iou):
"""Carry out matching between detected and ground truth bboxes.
:param predicted_data: List of predicted bboxes
:param gt_data: List of ground truth bboxes
:param min_iou: Min IoU value to match bboxes
:return: List of matches
"""
all_matches = {}
total_gt_bbox_num = 0
matched_gt_bbox_num = 0
frame_ids = gt_data.keys()
for frame_id in tqdm(frame_ids, desc='Matching detections'):
if frame_id not in predicted_data.keys():
all_matches[frame_id] = []
continue
gt_bboxes = gt_data[frame_id]
predicted_bboxes = predicted_data[frame_id]
total_gt_bbox_num += len(gt_bboxes)
similarity_matrix = calculate_similarity_matrix(gt_bboxes, predicted_bboxes)
matches = []
for _ in xrange(len(gt_bboxes)):
best_match_pos = np.unravel_index(similarity_matrix.argmax(), similarity_matrix.shape)
best_match_value = similarity_matrix[best_match_pos]
if best_match_value <= min_iou:
break
gt_id = best_match_pos[0]
predicted_id = best_match_pos[1]
similarity_matrix[gt_id, :] = 0.0
similarity_matrix[:, predicted_id] = 0.0
matches.append((gt_id, predicted_id))
matched_gt_bbox_num += 1
all_matches[frame_id] = matches
print('Matched gt bbox: {} / {} ({:.2f}%)'
.format(matched_gt_bbox_num, total_gt_bbox_num,
100. * float(matched_gt_bbox_num) / float(max(1, total_gt_bbox_num))))
return all_matches | 1030ddd34d174cca343f0c888502f191d6cf9af4 | 121 |
def map_string(affix_string: str, punctuation: str, whitespace_only: bool = False) -> str:
"""Turn affix string into type char representation. Types are 'w' for non-whitespace char,
and 's' for whitespace char.
:param affix_string: a string
:type: str
:param punctuation: the set of characters to treat as punctuation
:type punctuation: str
:param whitespace_only: whether to treat only whitespace as word boundary or also include (some) punctuation
:type whitespace_only: bool
:return: the type char representation
:rtype: str
"""
if whitespace_only:
return "".join(["s" if char == " " else "w" for char in affix_string])
else:
return "".join(["s" if char == " " or char in punctuation else "w" for char in affix_string]) | 6258f9e57a9081a1c791ec7c22f855079a99cdfb | 122 |
import yaml
def config_loads(cfg_text, from_cfg=None, whitelist_keys=None):
"""Same as config_load but load from a string
"""
try:
cfg = AttrDict(yaml.load(cfg_text))
except TypeError:
# empty string
cfg = AttrDict()
if from_cfg:
if not whitelist_keys:
whitelist_keys = []
_validate_config(cfg, from_cfg, whitelist_keys)
return from_cfg + cfg
return cfg | 02a5aa3713590fb6f1fcd0665e55698b857f6f1c | 123 |
def align_column ( table , index , align = 'left') :
"""Aling the certain column of the table
>>> aligned = align_column ( table , 1 , 'left' )
"""
nrows = [ list ( row ) for row in table ]
lmax = 0
for row in nrows :
if index <= len ( row ) :
item = decolorize ( row [ index ] )
lmax = max ( lmax , len ( item ) )
if not lmax : return table
aleft = align.lower() in left
aright = not aleft and align.lower() in right
new_table = []
for row in nrows :
if index <= len ( row ) :
item = decolorize ( row [ index ] )
nspace = lmax - len ( item )
if aleft :
item = row [ index ] + nspace * ' '
elif aright:
item = nspace * ' ' + row [ index ]
else :
sl = nspace / 2
sr = nspace - sl
item = sl * ' ' + row [ index ] + sr * ' '
row[ index ] = item
new_table.append ( row )
return [ tuple ( row ) for row in new_table ] | 3042d885b902ad188a919e1aea3d285a889b7935 | 124 |
def make_form(domain, parent, data, existing=None):
"""simulate a POST payload from the location create/edit page"""
location = existing or Location(domain=domain, parent=parent)
def make_payload(k, v):
if hasattr(k, '__iter__'):
prefix, propname = k
prefix = 'props_%s' % prefix
else:
prefix, propname = 'main', k
return ('%s-%s' % (prefix, propname), v)
payload = dict(make_payload(k, v) for k, v in data.iteritems())
return LocationForm(location, payload) | 2db1d21e808e8c94ca7dbfae85fff29974b63c52 | 125 |
from typing import Iterable
def score_auroc(y_true: Iterable[int], y_prob: Iterable[Iterable[float]]) -> float:
"""
Computes the Area Under ROC curve (AUROC).
Parameters
----------
y_true : List
TODO
y_prob : List
TODO
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
"""
assert len(y_true) == len(y_prob)
tpr, fpr = roc_curve_multiclass(y_true, y_prob)
fpr_diffs = [fpr[i] - fpr[i - 1] for i in range(1, len(fpr))]
tpr_means = [(tpr[i] + tpr[i - 1]) / 2.0 for i in range(1, len(tpr))]
return sum([tpr_i * fpr_i for tpr_i, fpr_i in zip(tpr_means, fpr_diffs)]) | f81e011d8aba0c196cf1eaed33901a359fbbe271 | 126 |
import torch
def abs_densitye_seed(model, inputs, args, tokenizer, **kwargs):
"""Maximum density sampling by calculating information density for
example when passed through [model]"""
# print('getting embedding_a')
X_a = load_and_embed_examples(args, model, tokenizer, evaluate=True, text = 'text_a')
# print('getting embedding_b')
X_b = load_and_embed_examples(args, model, tokenizer, evaluate=True, text = 'text_b')
X = np.absolute(X_a - X_b)
similarity_mtx = 1 / (1 + pairwise_distances(X, X, metric='euclidean'))
scores = torch.tensor(similarity_mtx.mean(axis=1))
return scores | 098bf5cb6afdc23f6e22a40c90d2a93165be4c8a | 127 |
import os
def read_capacity_from_file(experiment_name):
""" Read and return the min capacity, max capacity, interpolation, gamma as a tuple if the capacity
is variable. Otherwise return the constant capacity as is.
TODO: This is a bit brittle at the moment - We should take a look at fixing this for static beta later.
Parameters
----------
experiment_name : str
The name of the experiment, which is the name of the folder that the model is expected to be in.
"""
meta_data = load_metadata(os.path.join(RES_DIR, experiment_name))
min_capacity = meta_data['betaB_initC']
max_capacity = meta_data['betaB_finC']
interp_capacity = meta_data['betaB_stepsC']
gamma = meta_data['betaB_G']
return (min_capacity, max_capacity, interp_capacity, gamma) | 9421eb39d2c88cda6e2ab32511c23b87bab74017 | 128 |
def gains2utvec(g):
"""Converts a vector into an outer product matrix and vectorizes its upper
triangle to obtain a vector in same format as the CHIME visibility matrix.
Parameters
----------
g : 1d array
gain vector
Returns
-------
1d array with vectorized form of upper triangle for the outer product of g
"""
n = len(g)
G = np.dot(g.reshape(n, 1), g.conj().reshape(1, n))
return mat2utvec(G) | 3782bd1c4215b97e6a398700ef8e7f7bf65b0416 | 129 |
from typing import List
from typing import Dict
def get_user_surveys(user: User) -> List[Dict]:
"""
Returns a list of all surveys created by
specific user with survey secret.
"""
return list(map(Survey.get_api_brief_result_with_secrets, db.get_all_surveys(user))) | 76bc202bfc770814467f8ff7dc35a829c8bde9f0 | 130 |
def combine_mpgs(objs, cls=None):
"""
Combine multiple multipart geometries into a single multipart geometry of
geometry collection.
"""
# Generate new list of individual geometries
new = []
for obj in objs:
if isinstance(obj, shapely.geometry.base.BaseMultipartGeometry):
new.extend(list(obj))
elif isinstance(obj, shapely.geometry.base.BaseGeometry):
new.extend([obj])
else:
raise TypeError("Invalid geometry type")
# Convert list to geometry collection or provided class
if cls is None:
new = shapely.geometry.collection.GeometryCollection(new)
else:
new = cls(new)
return new | de3050005152c9da3072e76e24c0087931473d61 | 131 |
def get_polygon_point_dist(poly, pt):
"""Returns the distance between a polygon and point.
Parameters
----------
poly : libpysal.cg.Polygon
A polygon to compute distance from.
pt : libpysal.cg.Point
a point to compute distance from
Returns
-------
dist : float
The distance between ``poly`` and ``point``.
Examples
--------
>>> poly = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))])
>>> pt = Point((2, 0.5))
>>> get_polygon_point_dist(poly, pt)
1.0
>>> pt2 = Point((0.5, 0.5))
>>> get_polygon_point_dist(poly, pt2)
0.0
"""
if get_polygon_point_intersect(poly, pt) is not None:
dist = 0.0
else:
part_prox = []
for vertices in poly._vertices:
vx_range = range(-1, len(vertices) - 1)
seg = lambda i: LineSegment(vertices[i], vertices[i + 1])
_min_dist = min([get_segment_point_dist(seg(i), pt)[0] for i in vx_range])
part_prox.append(_min_dist)
dist = min(part_prox)
return dist | a3a6feff77440bd9d35029f8976564774e4f4cc1 | 132 |
def score_bearing(
wanted: LocationReferencePoint,
actual: PointOnLine,
is_last_lrp: bool,
bear_dist: float
) -> float:
"""Scores the difference between expected and actual bearing angle.
A difference of 0° will result in a 1.0 score, while 180° will cause a score of 0.0."""
bear = compute_bearing(wanted, actual, is_last_lrp, bear_dist)
return score_angle_sector_differences(wanted.bear, bear) | 3027edd5fd2055ade160e20b1d2b01c25aa32a30 | 133 |
import json
def load_graph (graph_path):
"""
load a graph from JSON
"""
with open(graph_path) as f:
data = json.load(f)
graph = json_graph.node_link_graph(data, directed=True)
return graph | 7f012360861410803edbd628d8ba685e1a9ee936 | 134 |
def assign_bond_states_to_dataframe(df: pd.DataFrame) -> pd.DataFrame:
"""
Takes a ``PandasPDB`` atom dataframe and assigns bond states to each atom based on:
Atomic Structures of all the Twenty Essential Amino Acids and a Tripeptide, with Bond Lengths as Sums of Atomic Covalent Radii
Heyrovska, 2008
First, maps atoms to their standard bond states (:const:`~graphein.protein.resi_atoms.DEFAULT_BOND_STATE`).
Second, maps non-standard bonds states (:const:`~graphein.protein.resi_atoms.RESIDUE_ATOM_BOND_STATE`).
Fills NaNs with standard bond states.
:param df: Pandas PDB dataframe
:type df: pd.DataFrame
:return: Dataframe with added ``atom_bond_state`` column
:rtype: pd.DataFrame
"""
# Map atoms to their standard bond states
naive_bond_states = pd.Series(df["atom_name"].map(DEFAULT_BOND_STATE))
# Create series of bond states for the non-standard states
ss = (
pd.DataFrame(RESIDUE_ATOM_BOND_STATE)
.unstack()
.rename_axis(("residue_name", "atom_name"))
.rename("atom_bond_state")
)
# Map non-standard states to the dataframe based on the residue and atom name
df = df.join(ss, on=["residue_name", "atom_name"])
# Fill the NaNs with the standard states
df = df.fillna(value={"atom_bond_state": naive_bond_states})
return df | 6c8b204a6d4ca30b1fac46dc08b74ba47d7089be | 135 |
def lastero(f, B=None):
"""
Last erosion.
y = lastero(f, B=None)
`lastero` creates the image y by computing the last erosion by
the structuring element B of the image f . The objects found in
y are the objects of the erosion by nB that can not be
reconstructed from the erosion by (n+1)B , where n is a generic
non negative integer. The image y is a proper subset of the
morphological skeleton by B of f .
Parameters
----------
f : Binary image.
B : Structuring Element (default: 3x3 elementary cross).
Returns
-------
y : Binary image.
"""
assert isbinary(f),'pymorph.lastero: can only process binary images'
if B is None: B = secross()
dt = dist(f,B)
return regmax(dt,B) | 094cd5f93959d82487fb9b4518d751763fa79901 | 136 |
def Sparse2Raster(arr, x0, y0, epsg, px, py, filename="", save_nodata_as=-9999):
"""
Sparse2Rastersave_nodata_as
"""
BS = 256
geotransform = (x0, px, 0.0, y0, 0.0, -(abs(py)))
srs = osr.SpatialReference()
srs.ImportFromEPSG(int("%s" % (epsg)))
projection = srs.ExportToWkt()
if issparse(arr):
m, n = arr.shape
if m > 0 and n > 0:
dtype = str(arr.dtype)
if dtype in ["uint8"]:
fmt = gdal.GDT_Byte
elif dtype in ["uint16"]:
fmt = gdal.GDT_UInt16
elif dtype in ["uint32"]:
fmt = gdal.GDT_UInt32
elif dtype in ["float32"]:
fmt = gdal.GDT_Float32
elif dtype in ["float64"]:
fmt = gdal.GDT_Float64
else:
fmt = gdal.GDT_Float64
CO = ["BIGTIFF=YES", "TILED=YES", "BLOCKXSIZE=256", "BLOCKYSIZE=256", 'COMPRESS=LZW']
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(filename, n, m, 1, fmt, CO)
if (geotransform != None):
dataset.SetGeoTransform(geotransform)
if (projection != None):
dataset.SetProjection(projection)
band = dataset.GetRasterBand(1)
band.SetNoDataValue(save_nodata_as)
for i in range(0, m, BS):
for j in range(0, n, BS):
BY = min(m - i, BS)
BX = min(n - j, BS)
a = arr[i:i + BY, j:j + BX].todense()
if save_nodata_as==0 and (np.isnan(a)).all():
#do nothing
pass
else:
band.WriteArray(a, j, i)
dataset = None
return filename
return None | 23e23c0c1ea59f37fbf52a97b8d8e70933c1cd55 | 137 |
def modularity(modules, G, L):
""" calculate modularity
modularity = [list of nx.Graph objects]
G = graph
L = num of links
"""
N_m = len(modules)
M = 0.0
for s in range(N_m):
l_s = 0.0
d_s = 0
for i in modules[s]:
l_s += float(modules[s].degree(i))
d_s += float(G.degree(i))
M += (l_s / L) - (d_s / (2.0 * L))**2
return M | fc818a1f8cda14c04f90c94b699853465da11797 | 138 |
def nonan_compstat_tstat_scan(dist, aInd, bInd, returnMaxInds = False):
"""
For local sieve analysis, compare A and B group for each site using a max t-statistic over a parameter space
filteredDist: [ptid x sites x params] ndarray
Returns tstat array [sites]
aInd, bInd: Boolean row index for the two groups
"""
a = dist[aInd]
b = dist[bInd]
aN = aInd.sum()
bN = bInd.sum()
tstat = tstatistic(a, b, axis = 0, equal_var = False)
"""se = np.sqrt((aN-1)*np.var(a,axis=0)/((aN+bN) - 2) + (bN-1)*np.var(b,axis=0)/((aN+bN) - 2))
tstat = (np.mean(a,axis=0) - np.mean(b,axis=0)) / se"""
"""Even in the nonan cases, the tstat can be nan if there is no variation in either group (divide by zero)"""
sitesNani = np.all(np.isnan(tstat), axis=1)
"""For sites with all nans across params, set all to 0. this makes maxi = 0"""
tstat[sitesNani,:] = 0
"""Zeros are better than returning nan because if this perm produces a nan
result then it is not as extreme as observed (which is probably also nan)"""
maxi = np.nanargmax(np.abs(tstat), axis=1)
inds = np.ravel_multi_index((np.arange(maxi.shape[0]), maxi), tstat.shape)
if not returnMaxInds:
return tstat.flat[inds]
else:
return tstat.flat[inds], maxi | eae9c1c045e4ebda7372d8c23fbb447cd2c7a4cf | 139 |
import os
def path_to_key(path, base):
"""Return the relative path that represents the absolute path PATH under
the absolute path BASE. PATH must be a path under BASE. The returned
path has '/' separators."""
if path == base:
return ''
if base.endswith(os.sep) or base.endswith('/') or base.endswith(':'):
# Special path format on Windows:
# 'C:/' Is a valid root which includes its separator ('C:/file')
# 'C:' is a valid root which isn't followed by a separator ('C:file')
#
# In this case, we don't need a separator between the base and the path.
pass
else:
# Account for a separator between the base and the relpath we're creating
base += os.sep
assert path.startswith(base), "'%s' is not a prefix of '%s'" % (base, path)
return to_relpath(path[len(base):]) | 646f410475acfe649b11f361b20446d6c7073403 | 140 |
import re
def extract_digits_from_end_of_string(input_string):
"""
Gets digits at the end of a string
:param input_string: str
:return: int
"""
result = re.search(r'(\d+)$', input_string)
if result is not None:
return int(result.group(0)) | aae771a051a228c53c36062437de65ae4aa15d44 | 141 |
import torch
def move_bdim_to_front(x, result_ndim=None):
"""
Returns a tensor with a batch dimension at the front. If a batch
dimension already exists, move it. Otherwise, create a new batch
dimension at the front. If `result_ndim` is not None, ensure that the
resulting tensor has rank equal to `result_ndim`.
"""
x_dim = len(x.shape)
x_bdim = x.bdim
if x_bdim is None:
x = torch.unsqueeze(x, 0)
else:
x = torch.movedim(x, x_bdim, 0)
if result_ndim is None:
return x
diff = result_ndim - x_dim - (x_bdim is None)
for _ in range(diff):
x = torch.unsqueeze(x, 1)
return x | 313a1837b6c3b451cebacaa7815f2631dfa387e5 | 142 |
def paginate(**options):
"""
Automatically force request pagination for endpoints
that shouldn't return all items in the database directly.
If this decorator is used, ``limit`` and ``offset`` request
arguments are automatically included in the request. The
burden is then on developers to do something with those
``limit`` and ``offset`` arguments. An example request header
set by this decorator is as follows:
.. code-block:: text
Link: <https://localhost/items?limit=50&offset=50>; rel="next",
<https://localhost/items?limit=50&offset=500>; rel="last"
Args:
limit (int): Number of entries to limit a query by.
total (int, callable): Number or callable for determining
the total number of records that can be returned
for the request. This is used in determining the
pagination header.
"""
if 'total' not in options:
raise AssertionError(
'`@paginate` decorator requires `total=` parameter '
'for determining total number of records to paginate. '
'See the documentation for more details.')
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
# only paginate on get requests
if request.method != 'GET':
return func(*args, **kwargs)
# format parameters
limit = request.args.get('limit', options.get('limit'))
offset = int(request.args.get('offset', options.get('offset', 0)))
total = options['total']() if callable(options['total']) else options['total']
url = options.get('url', request.base_url)
# config request parameters
request.args = request.args.copy()
request.args.setdefault('limit', limit)
request.args.setdefault('offset', offset)
# if no need to paginate, return without setting headers
if limit is None:
return func(*args, **kwargs)
limit = int(limit)
# add next page link
headers = {}
next_page = '<{}?limit={}&offset={}>'.format(url, limit, offset + limit)
headers['Link'] = '{}; rel="next"'.format(next_page)
# add last page link and header
if options['total'] is not None:
total = options['total']() if callable(options['total']) else options['total']
last_page = '<{}?limit={}&offset={}>'.format(url, limit, offset + limit)
headers['Link'] += ', {}; rel="last"'.format(last_page)
headers['X-Total-Count'] = str(total)
# call the function and create response
response = func(*args, **kwargs)
# if a specific response has already been crafted, use it
if isinstance(response, Response):
return response
# normalize response data
if not isinstance(response, tuple):
response = [response]
response = list(response)
if hasattr(response[0], 'json'):
content_length = len(response[0].json)
else:
content_length = len(response[0])
if len(response) == 1:
response.append(200)
if len(response) == 2:
response.append({})
# if the response data is equal to the pagination, it's
# truncated and needs updated headers/status
if content_length == limit:
response[1] = 206
response[2].update(headers)
return tuple(response)
return inner
return decorator | f2d7f38007c235507dd5f2eaed737292679440b9 | 143 |
import datasets
def fetch_basc_vascular_atlas(n_scales='scale007',
target_affine=np.diag((5, 5, 5))):
""" Fetch the BASC brain atlas given its resolution.
Parameters
----------
hrf_atlas: str, BASC dataset name possible values are: 'scale007',
'scale012', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325',
'scale444'
target_affine : np.array, (default=np.diag((5, 5, 5))), affine matrix for
the produced Nifti images
Return
------
mask_full_brain : Nifti Image, full mask brain
atlas_rois : Nifti Image, ROIs atlas
"""
if n_scales not in valid_scales:
raise ValueError(f"n_scales should be in {valid_scales}, "
f"got '{n_scales}'")
basc_dataset = datasets.fetch_atlas_basc_multiscale_2015(version='sym')
atlas_rois_fname = basc_dataset[n_scales]
atlas_to_return = image.load_img(atlas_rois_fname)
atlas_to_return = image.resample_img(atlas_to_return, target_affine,
interpolation='nearest')
brain_mask = image_nilearn.binarize_img(atlas_to_return, threshold=0)
return brain_mask, atlas_to_return | 86e6ded04118f5a1a3dc76f5f5a29d241e5071dc | 144 |
def _get_time_slices(
window_start,
window,
projection, # Defer calling until called by test code
resampling_scale,
lag = 1,
):
"""Extracts the time slice features.
Args:
window_start: Start of the time window over which to extract data.
window: Length of the window (in days).
projection: projection to reproject all data into.
resampling_scale: length scale to resample data to.
lag: Number of days before the fire to extract the features.
Returns:
A list of the extracted EE images.
"""
image_collections, time_sampling = _get_all_image_collections()
window_end = window_start.advance(window, 'day')
drought = image_collections['drought'].filterDate(
window_start.advance(-lag - time_sampling['drought'], 'day'),
window_start.advance(
-lag, 'day')).median().reproject(projection).resample('bicubic')
vegetation = image_collections['vegetation'].filterDate(
window_start.advance(-lag - time_sampling['vegetation'], 'day'),
window_start.advance(
-lag, 'day')).median().reproject(projection).resample('bicubic')
weather = image_collections['weather'].filterDate(
window_start.advance(-lag - time_sampling['weather'], 'day'),
window_start.advance(-lag, 'day')).median().reproject(
projection.atScale(resampling_scale)).resample('bicubic')
fire = image_collections['fire'].filterDate(window_start, window_end).map(
ee_utils.remove_mask).max()
detection = fire.clamp(6, 7).subtract(6).rename('detection')
return [drought, vegetation, weather, fire, detection] | 8350cc7fcd61b5aa53863ad00463d0eb3cc9d89e | 145 |
def hash_parameters(keys, minimize=True, to_int=None):
"""
Calculates the parameters for a perfect hash. The result is returned
as a HashInfo tuple which has the following fields:
t
The "table parameter". This is the minimum side length of the
table used to create the hash. In practice, t**2 is the maximum
size of the output hash.
slots
The original inputs mapped to a vector. This is the hash
function.
r
The displacement vector. This is the displacement of the given
row in the result vector. To find a given value, use
``x + r[y]``.
offset
The amount by which to offset all values (once converted to ints)
to_int
A function that converts the input to an int (if given).
Keyword parameters:
``minimize``
Whether or not offset all integer keys internally by the minimum
value. This typically results in smaller output.
``to_int``
A callable that converts the input keys to ints. If not
specified, all keys should be given as ints.
>>> hash_parameters([1, 5, 7], minimize=False)
HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None)
>>> hash_parameters([1, 5, 7])
HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None)
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> phash = hash_parameters(l)
>>> phash.slots
(18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15)
For some values, the displacement vector will be rather empty:
>>> hash_parameters('Andrea', to_int=ord).r
(1, None, None, None, 0, -3, 4, None)
"""
# If to_int is not assigned, simply use the identity function.
if to_int is None:
to_int = __identity
key_to_original = {to_int(original): original for original in keys}
# Create a set of all items to be hashed.
items = list(key_to_original.keys())
if minimize:
offset = 0 - min(items)
items = frozenset(x + offset for x in items)
else:
offset = 0
# 1. Start with a square array (not stored) that is t units on each side.
# Choose a t such that t * t >= max(S)
t = choose_best_t(items)
assert t * t > max(items) and t * t >= len(items)
# 2. Place each key K in the square at location (x,y), where
# x = K mod t, y = K / t.
row_queue = place_items_in_square(items, t)
# 3. Arrange rows so that they'll fit into one row and generate a
# displacement vector.
final_row, displacement_vector = arrange_rows(row_queue, t)
# Translate the internal keys to their original items.
slots = tuple(key_to_original[item - offset] if item is not None else None
for item in final_row)
# Return the parameters
return HashInfo(
t=t,
slots=slots,
r=displacement_vector,
offset=offset,
to_int=to_int if to_int is not __identity else None
) | 899657596669de4852936737efbfecd9f7b4734a | 146 |
import base64
import binascii
def Base64EncodeHash(digest_value):
"""Returns the base64-encoded version of the input hex digest value."""
return base64.encodestring(binascii.unhexlify(digest_value)).rstrip('\n') | d1fa662c6bacbde84413edb8272b445bed26de90 | 147 |
def init_node(node_name, publish_topic):
"""
Init the node.
Parameters
----------
node_name
Name assigned to the node
publish_topic
Name of the publisher topic
"""
rospy.init_node(node_name, anonymous=True)
publisher = rospy.Publisher(publish_topic, Int16MultiArray, queue_size=10)
return publisher | 47371f1617937842991db80dea3f2bc15fee4b43 | 148 |
import astropy.io.fits as pf
def fits_checkkeyword(fitsfile, keyword, ext=0, silent=False):
"""
Check the keyword value of a FITS extension.
Parameters
----------
fitsfile : str
Path to the FITS file.
keyword : str
The keyword to check.
ext : int or str
Extension index (int) or key (str).
Returns
-------
Header key value
If both the specified extension and keyword exist.
``None``
If a ``KeyError`` exception would have been raised and ``silent=True``
is set.
Raises
------
KeyError
If either the specified extension or the keyword cannot be found, and
``silent=False``, a KeyError exception will be raised.
OSError
If the specified file cannot be found, astropy.io.fits will raise
OSError.
"""
fh = pf.open(fitsfile)
try:
return fh[ext].header[keyword]
except KeyError as e:
if silent:
return None
else:
print('The specified extension or keyword is not found.')
raise e | 7c60d410bcfed6c6fdfece6f7bfec173b6cbbd9a | 149 |
def arr_ds(time=True, var='tmp'):
"""
Read in a saved dataset containing lat, lon, and values
:param time: (boolean) - whether to return dataset with time
:param var: (str) - variable type (only tmp/rh currently)
:return ds: (xr.dataset) - dataset
"""
if time:
if var is 'tmp':
path = pre.join_cwd('data/air.sig995.1948.nc')
if var is 'rh':
path = pre.join_cwd('data/rhum.sig995.1948.nc')
else:
path = pre.join_cwd('data/slp.nc')
return xr.open_dataset(path) | 0f8afcf09eae925247a0b174ac2247713ef63377 | 150 |
from django_toolkit.datetime_util import quarter as datetime_quarter
from datetime import datetime
def quarter(d):
"""
Return start/stop datetime for the quarter as defined by dt.
"""
first_date, last_date = datetime_quarter(datetime(d.year, d.month, d.day))
return first_date.date(), last_date.date() | 9c5510e4b2c131715a1bde8233fd50b7241b1d39 | 151 |
def _fp(yhat, ytrue):
"""
Class wise false positive count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat == np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * (1. - ytrue), axis=0) | 3a9ac128ac3a845183d219e029f93452bb94c3b7 | 152 |
import os
def rawmap(k2i, file):
"""
Map index to raw data from file
Arguments
k2i: key-to-index map
file: file containing raw data map
Returns
raw: index-to-raw map if file exists else identity map
"""
raw = {0: ''}
if os.path.isfile(file):
with open(file, "r") as f:
for line in f.readlines():
line = line.split("\t")
k, rw = line[0].strip(), line[1].strip()
raw[k2i[k]] = rw
else:
for k in k2i: raw[k2i[k]] = k2i[k]
return raw | 2f9c9d9ef8c4606eef61feb03cbd01c7ba88f716 | 153 |
import random
def out_flag():
"""Either -o or --outfile"""
return '-o' if random.randint(0, 1) else '--outfile' | 129e7a493618ca7457fab271a396023807fd2f38 | 154 |
def guess_from_peak(y, x, negative=False):
"""Estimate starting values from 1D peak data and return (height,center,sigma).
Parameters
----------
y : array-like
y data
x : array-like
x data
negative : bool, optional
determines if peak height is positive or negative, by default False
Returns
-------
(height, center, sigma) : (float, float, float)
Estimates of 1 gaussian line parameters.
"""
sort_increasing = np.argsort(x)
x = x[sort_increasing]
y = y[sort_increasing]
# find the max/min values of x and y, and the x value at max(y)
maxy, miny = max(y), min(y)
maxx, minx = max(x), min(x)
height = maxy - miny
# set a backup sigma, and center in case using the halfmax calculation doesn't work.
# The backup sigma = 1/6 the full x range and the backup center is the
# location of the maximum
sig = (maxx - minx) / 6.0
cen = x[np.argmax(y)]
# the explicit conversion to a NumPy array is to make sure that the
# indexing on line 65 also works if the data is supplied as pandas.Series
# find the x positions where y is above (ymax+ymin)/2
x_halfmax = np.array(x[y > (maxy + miny) / 2.0])
if negative:
height = -(maxy - miny)
# backup center for if negative.
cen = x[np.argmin(y)]
x_halfmax = x[y < (maxy + miny) / 2.0]
# calculate sigma and center based on where y is above half-max:
if len(x_halfmax) > 2:
sig = (x_halfmax[-1] - x_halfmax[0]) / 2.0
cen = x_halfmax.mean()
return height, cen, sig | b78f42ba0fed1a1a696000d223c42bd9972409f4 | 155 |
def get_default_extension():
"""
return the default view extension
"""
return rawData.Visualization | 0539866dee782b7cb605c2c54e1896375b31cd95 | 156 |
def getNewPluginManager() -> pluginManager.ArmiPluginManager:
"""
Return a new plugin manager with all of the hookspecs pre-registered.
"""
pm = pluginManager.ArmiPluginManager("armi")
pm.add_hookspecs(ArmiPlugin)
return pm | dac7694587528d3d7213294eb7f9fccbc1dca7b2 | 157 |
def get_utterances_from_stm(stm_file):
"""
Return list of entries containing phrase and its start/end timings
:param stm_file:
:return:
"""
res = []
with io.open(stm_file, "r", encoding='utf-8') as f:
for stm_line in f:
if re.match ("^;;",stm_line) is None :
tokens = stm_line.split()
start_time = float(tokens[3])
end_time = float(tokens[4])
filename = tokens[0]
if tokens[2] != "inter_segment_gap":
transcript = " ".join(t for t in tokens[6:]).strip().encode("utf-8", "ignore").decode("utf-8", "ignore")
if transcript != "ignore_time_segment_in_scoring" and transcript.strip() !="": # if the transcription not empty and not equal to ignore_time_segment_in_scoring
res.append({"start_time": start_time, "end_time": end_time, "filename": filename, "transcript": transcript })
return res | e8c7329ec04824570071994b6d6b05609f68b7a4 | 158 |
def lookup_quo_marks(lang='en-US', map_files=MAP_FILES, encoding='utf-8'):
"""Looks up quotation marks for a language.
Arguments:
``lang`` (``str``):
An RFC 5646-ish language code (e.g., "en-US", "pt-BR",
"de", "es"). Defines the language the quotation marks
of which to look up. Default: 'en-US'.
``maps`` (sequence of ``str`` instances):
A List of possible locations of mappsings of RFC 5646-like
language codes to lists of quotation marks.
Default: ``MAP_FILES`` (module constant).
``encoding`` (``str``):
The encoding of those files. Defaults to 'utf-8'.
If ``lang`` contains a country code, but no quotation marks have
been defined for that country, the country code is discarded and
the quotation marks for the language simpliciter are looked up.
For example, 'de-DE' will find 'de'.
If ``lang`` does not contain a country code or if that code has been
discarded and no quotation marks have been defined for that language
simpliciter, but quotation marks have been defined for variants of that
language as they are spoken in a particular country, the quotation
marks of the variant that has been defined first are used. For example,
'en' will find 'en-US'.
Returns (``QuoMarks``):
The quotation marks of that language.
Raises:
``QuoMarkUnknownLanguageError``:
If no quotation marks have been defined for ``lang``.
All exceptions ``load_quotation_maps`` and
``QuoMarks.__init__`` raise.
"""
map_ = load_maps(map_files, encoding=encoding)
for i in range(3):
try:
return QuoMarks(*map_[lang])
except KeyError:
if i == 0:
lang = lang.split('-')[0]
elif i == 1:
for j in map_:
if not isinstance(j, basestring): # pylint: disable=E0602
continue
if j.startswith(lang):
lang = j
break
else:
break
raise QuoMarkUnknownLangError(lang=lang) | e97d245faae256184809b61da1646c607042db3c | 159 |
import sys
def extractVars(*names,**kw):
"""Extract a set of variables by name from another frame.
:Parameters:
- `*names`: strings
One or more variable names which will be extracted from the caller's
frame.
:Keywords:
- `depth`: integer (0)
How many frames in the stack to walk when looking for your variables.
Examples:
In [2]: def func(x):
...: y = 1
...: print extractVars('x','y')
...:
In [3]: func('hello')
{'y': 1, 'x': 'hello'}
"""
depth = kw.get('depth',0)
callerNS = sys._getframe(depth+1).f_locals
return dict((k,callerNS[k]) for k in names) | cf9f75a23869e66c52e4bbe16aeb22b3dffc1096 | 160 |
def replace_unwanted_xml_attrs(body):
"""
Method to return transformed string after removing all the unwanted characters from given xml body
:param body:
:return:
"""
return body.replace('&', '&').replace('<', '<').replace('>', '>') | 6f7dde06590bc8b8ad8477e7cee284ae38568b42 | 161 |
def protocol_version_to_kmip_version(value):
"""
Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent.
Args:
value (ProtocolVersion): A ProtocolVersion struct to be converted into
a KMIPVersion enumeration.
Returns:
KMIPVersion: The enumeration equivalent of the struct. If the struct
cannot be converted to a valid enumeration, None is returned.
"""
if not isinstance(value, ProtocolVersion):
return None
if value.major == 1:
if value.minor == 0:
return enums.KMIPVersion.KMIP_1_0
elif value.minor == 1:
return enums.KMIPVersion.KMIP_1_1
elif value.minor == 2:
return enums.KMIPVersion.KMIP_1_2
elif value.minor == 3:
return enums.KMIPVersion.KMIP_1_3
elif value.minor == 4:
return enums.KMIPVersion.KMIP_1_4
else:
return None
elif value.major == 2:
if value.minor == 0:
return enums.KMIPVersion.KMIP_2_0
else:
return None
else:
return None | 6180f1eed3411e5257a989fdbb2fda48d4c59277 | 162 |
def UniversalInput(i, o, *args, **kwargs):
"""
Returns the most appropriate input UI element, based on available keys
of input devices present. For now, always returns UI elements configured
for character input.
TODO: document arguments (most of them are passed through, like "name" or "message")
"""
charmap = kwargs.pop("charmap", "full")
# Determining which input is necessary, according to the charmap requested
numpadinputs = {"full":NumpadCharInput, "number":NumpadNumberInput, "hex":NumpadHexInput, "password":NumpadPasswordInput}
numpadinput_cls = numpadinputs[charmap]
# What goes here for NumpadKeyboardInput
arrowkeyinput_maps = {"full":['][S', '][c', '][C', '][s', '][n'], "number":['][n'], "hex":['][h']}
arrowkeyinput_maps["password"] = arrowkeyinput_maps["full"]
arrowkeyinput_map = arrowkeyinput_maps[charmap]
# First, checking if any of the drivers with None as available_keys is present
if None in i.available_keys.values():
# HID driver (or other driver with "any key is possible" is likely used
# Let's use the most fully-functional input available at the moment
return numpadinput_cls(i, o, *args, **kwargs)
all_available_keys = sum(i.available_keys.values(), [])
ascii_keys = ["KEY_{}".format(c.upper()) for c in list("abcdefghijklmnopqrstuvwxyz123456789") + ["SPACE"]]
ascii_keys_available = all([ascii_key in all_available_keys for ascii_key in ascii_keys])
action_keys = ["KEY_F1", "KEY_F2"]
action_keys_available = all([action_key in all_available_keys for action_key in action_keys])
if ascii_keys_available and action_keys_available:
# All required ASCII and action keys are supported
return NumpadKeyboardInput(i, o, *args, **kwargs)
number_keys = ["KEY_{}".format(x) for x in range(10)]
number_keys.append("KEY_*")
number_keys.append("KEY_#")
number_keys_available = all([number_key in all_available_keys for number_key in number_keys ])
if number_keys_available and action_keys_available:
# All number and action keys are supported
return numpadinput_cls(i, o, *args, **kwargs)
# Fallback - only needs five primary keys
return CharArrowKeysInput(i, o, allowed_chars=arrowkeyinput_map, *args, **kwargs) | e425adec01521e3b34691490ec604868baa0f385 | 163 |
def valid_template(template):
"""Is this a template that returns a valid URL?"""
if template.name.lower() == "google books" and (
template.has("plainurl") or template.has("plain-url")
):
return True
if template.name.lower() == "billboardurlbyname":
return True
return False | 51191d6b60af23265dc6cb4ff87c520e80bac59f | 164 |
def build_get_complex_item_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get array of complex type with null item [{'integer': 1 'string': '2'}, null, {'integer': 5,
'string': '6'}].
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"integer": 0, # Optional.
"string": "str" # Optional.
}
]
"""
accept = "application/json"
# Construct URL
url = '/array/complex/itemnull'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | 96866e7b35c925459d594698759d67979e6d0d6f | 165 |
def get_choice():
""" Gets and returns choice for mode to use when running minimax """
choice = input(
"Please enter a number (1 - 4)\n 1. Both players use minimax correctly at every turn\n 2. The starting player (X) is an expert and the opponent (0) only has a 50% chance to use minimax\n\t at each turn\n 3. The starting player (X) only has a 50% chance to use minimax at each turn and the opponent (0)\n\t is an expert.\n 4. Both players only have a 50% chance to use minimax at each turn.\n"
)
while (choice != '1' and choice != '2' and choice != '3' and choice != '4'):
choice = input("Not a choice. Go agane: (1 - 4)\n")
return choice | d79278acc9bc0a36480c1067b81e64c5512dd586 | 166 |
def _arange_ndarray(arr, shape, axis, reverse=False):
"""
Create an ndarray of `shape` with increments along specified `axis`
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
shape : tuple of ints
Shape of desired array. Should be equivalent to `arr.shape` except
`shape[axis]` which may have any positive value.
axis : int
Axis to increment along.
reverse : bool
If False, increment in a positive fashion from 1 to `shape[axis]`,
inclusive. If True, the bounds are the same but the order reversed.
Returns
-------
padarr : ndarray
Output array sized to pad `arr` along `axis`, with linear range from
1 to `shape[axis]` along specified `axis`.
Notes
-----
The range is deliberately 1-indexed for this specific use case. Think of
this algorithm as broadcasting `np.arange` to a single `axis` of an
arbitrarily shaped ndarray.
"""
initshape = tuple(1 if i != axis else shape[axis]
for (i, x) in enumerate(arr.shape))
if not reverse:
padarr = np.arange(1, shape[axis] + 1)
else:
padarr = np.arange(shape[axis], 0, -1)
padarr = padarr.reshape(initshape)
for i, dim in enumerate(shape):
if padarr.shape[i] != dim:
padarr = padarr.repeat(dim, axis=i)
return padarr | f869bcd0d51ec7dc7858570337019e00948eaee9 | 167 |
def rotate_pt(x_arr, y_arr, theta_deg, xoff=0, yoff=0):
"""
Rotate an array of points (x_arr, y_arr) by theta_deg offsetted
from a center point by (xoff, yoff).
"""
# TODO: use opencv acceleration if available
a_arr = x_arr - xoff
b_arr = y_arr - yoff
cos_t = np.cos(np.radians(theta_deg))
sin_t = np.sin(np.radians(theta_deg))
ap = (a_arr * cos_t) - (b_arr * sin_t)
bp = (a_arr * sin_t) + (b_arr * cos_t)
return np.asarray((ap + xoff, bp + yoff)) | 65caadd763d5e79986a45026562a5f0c05b70484 | 168 |
from typing import Union
def get_frames(data: Union[sc.DataArray, sc.Dataset], **kwargs) -> sc.Dataset:
"""
For a supplied instrument chopper cascade and detector positions, find
the locations in microseconds of the WFM frames.
TODO: Currently, only the analytical (time-distance) method has been tested
and is enabled.
The peak-finding method is temporarily disabled.
"""
# if data is not None:
# return frames_peakfinding(data=data,
# instrument=instrument,
# plot=plot,
# **kwargs)
# else:
return frames_analytical(data=data, **kwargs) | 197bdbcd3daf68c3165c71c8e5042de114068dd2 | 169 |
from typing import Any
from typing import Type
def _map_nonlinearities(
element: Any, nonlinearity_mapping: Type[NonlinearityMapping] = NonlinearityMapping
) -> Any:
"""Checks whether a string input specifies a PyTorch layer.
The method checks if the input is a string.
If the input is a string, it is preprocessed and then mapped to
a corresponding PyTorch activation layer.
If the input is not a string it is returned unchanged.
Parameters
----------
element : Any
Arbitrary input to this function.
Returns
-------
Any
Returns either a callable activation or normalization layer
or the input element.
"""
nonlinearities = nonlinearity_mapping()
return _map_call_dict(nonlinearities, element) | 0ea5972471f1c766bb5303a60eee4e50df14c9d5 | 170 |
import os
def confirm(text, app, version, services=None, default_yes=False):
"""Asks a user to confirm the action related to GAE app.
Args:
text: actual text of the prompt.
app: instance of Application.
version: version or a list of versions to operate upon.
services: list of services to operate upon (or None for all).
Returns:
True on approval, False otherwise.
"""
print(text)
print(' Directory: %s' % os.path.basename(app.app_dir))
print(' App ID: %s' % app.app_id)
print(' Version: %s' % version)
print(' Services: %s' % ', '.join(services or app.services))
if default_yes:
return raw_input('Continue? [Y/n] ') not in ('n', 'N')
else:
return raw_input('Continue? [y/N] ') in ('y', 'Y') | 3fc0aa29b73fff6af6fd06c96add9d5dede1a5fc | 171 |
def _get_streamflow(product, feature_id, s_date, s_time, e_date, lag):
"""Downloads streamflow time series for a given river.
Downloads streamflow time series for a given river feature using
the HydroShare archive and Web service. Units are in cubic feet per
second as returned by HydroShare. For the API description, see
https://apps.hydroshare.org/apps/nwm-data-explorer/api/
Args:
product: String indicating model product. Valid values are:
analysis_assim, short_range, medium_range, long_range
feature_id: String identifier of the river feature.
s_date: (String or Date) Valid date for the model simulation.
s_time: (String) Two digit simulation hour, e.g., '06'.
e_date: (String or Date) End date of data to retrieve. Valid
for analysis_assim only.
lag: (String) Lag argument for URI. This is an escaped comma
delimited list of long_range forecast simulation hours,
e.g., 00z%2C06z%2C12z%2C18z.
Returns:
A list of dicts representing time series. Each series includes
name, datetimes, and values. For example:
{'name': 'Member 1 t00z',
'dates': ['2016-06-02 01:00:00+00:00', '2016-06-02 02:...']
'values': [257.2516, 1295.7293]}
Raises:
HTTPError: An error occurred accessing data from HydroShare.
ValueError: Service request returned no data, likely due to
invalid input arguments.
"""
if 'long_range' in product:
product = 'long_range'
s_date = date_parser.parse(str(s_date)).strftime('%Y-%m-%d')
if e_date:
e_date = date_parser.parse(str(e_date)).strftime('%Y-%m-%d')
uri_template = (
HS_API_URI + 'get-netcdf-data?config={0}&geom=channel_rt&'
'variable=streamflow&COMID={1}&'
'startDate={2}&time={3}&endDate={4}&lag={5}')
uri = uri_template.format(product, feature_id, s_date, s_time, e_date, lag)
response = urlopen(uri)
json_data = _get_netcdf_data_response_to_json(uri, response)
series_list = _unpack_series(json_data, product)
return series_list | 4485b29f3a34862cd674243314296010e10d0847 | 172 |
import functools
def debug(func):
"""Debug the decorated function"""
@functools.wraps(func)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
print(f"Calling {func.__name__}({signature})")
value = func(*args, **kwargs)
print(f"{func.__name__!r} returned {value!r}")
return value
return wrapper_debug | 60d15174feb30aeb80a0ea1b9b191fba3462a957 | 173 |
def get_profanity(text: str, duplicates=False) -> list:
"""Gets all profane words and returns them in a list"""
text: str = text.lower()
additional: list = []
profane: list = [word for word in PROFANE_WORD_LIST if word in text]
if duplicates:
for word in profane:
c: int = text.count(word)
if c > 1:
x: list = [word for _ in range(c - 1)]
additional.extend(list(x))
profane.extend(additional)
return profane | 332b8ac355e974d0f750ad003a907e22b4f1b552 | 174 |
def build_stats(train_result, eval_result, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
train_result: The final loss at training time.
eval_result: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback instance.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_result:
stats['eval_loss'] = eval_result[0]
stats['eval_acc'] = eval_result[1]
stats['train_loss'] = train_result[0]
stats['train_acc'] = train_result[1]
if time_callback:
timestamp_log = time_callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log) - 1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats | 959c4ac9b1ed9aabb41a329dba6e06384d4492a7 | 175 |
def multiplicative(v1, v2, alpha=1, beta=1):
"""
Weighted elementwise multiplication.
"""
compword = str(v1.row2word[0]) + " " + str(v2.row2word[0])
comp = (alpha * v1) * (beta * v2)
comp.row2word = [compword]
return comp | 9305291f4e0a43a47d578962f205797ead5fcf04 | 176 |
def is_information(status_code, **options):
"""
gets a value indicating that given status code is a information code.
if returns True if the provided status code is
from `InformationResponseCodeEnum` values.
:param int status_code: status code to be checked.
:keyword bool strict_status: specifies that it should only consider
the status code as information if it is from
`InformationResponseCodeEnum` values. otherwise
all codes from `INFORMATION_CODE_MIN` to
`INFORMATION_CODE_MAX` will be considered as
information. defaults to True if not provided.
:rtype: bool
"""
return get_component(ResponseStatusPackage.COMPONENT_NAME).is_information(status_code,
**options) | 45bce315e582a93a76f34d3a027a4dd041655aba | 177 |
from typing import Any
from typing import Optional
import time
def create_application_registration(
onefuzz_instance_name: str, name: str, approle: OnefuzzAppRole, subscription_id: str
) -> Any:
"""Create an application registration"""
app = get_application(
display_name=onefuzz_instance_name, subscription_id=subscription_id
)
if not app:
raise Exception("onefuzz app registration not found")
resource_access = [
{"id": role["id"], "type": "Scope"}
for role in app["appRoles"]
if role["value"] == approle.value
]
params = {
"isDeviceOnlyAuthSupported": True,
"displayName": name,
"publicClient": {
"redirectUris": ["https://%s.azurewebsites.net" % onefuzz_instance_name]
},
"isFallbackPublicClient": True,
"requiredResourceAccess": (
[
{
"resourceAccess": resource_access,
"resourceAppId": app["appId"],
}
]
if len(resource_access) > 0
else []
),
}
registered_app = query_microsoft_graph(
method="POST",
resource="applications",
body=params,
subscription=subscription_id,
)
logger.info("creating service principal")
service_principal_params = {
"accountEnabled": True,
"appRoleAssignmentRequired": False,
"servicePrincipalType": "Application",
"appId": registered_app["appId"],
}
def try_sp_create() -> None:
error: Optional[Exception] = None
for _ in range(10):
try:
query_microsoft_graph(
method="POST",
resource="servicePrincipals",
body=service_principal_params,
subscription=subscription_id,
)
return
except GraphQueryError as err:
# work around timing issue when creating service principal
# https://github.com/Azure/azure-cli/issues/14767
if (
"service principal being created must in the local tenant"
not in str(err)
):
raise err
logger.warning(
"creating service principal failed with an error that occurs "
"due to AAD race conditions"
)
time.sleep(60)
if error is None:
raise Exception("service principal creation failed")
else:
raise error
try_sp_create()
registered_app_id = registered_app["appId"]
app_id = app["appId"]
def try_authorize_application(data: Any) -> None:
authorize_application(
UUID(registered_app_id),
UUID(app_id),
subscription_id=subscription_id,
)
retry(try_authorize_application, "authorize application")
def try_assign_instance_role(data: Any) -> None:
assign_instance_app_role(onefuzz_instance_name, name, subscription_id, approle)
retry(try_assign_instance_role, "assingn role")
return registered_app | 9f7a6f52e4e07437eec655c2d51bb5cdc9659a21 | 178 |
import os
def get_all_stocks():
"""获取本地文件已有数据的股票列表"""
stock_files = os.listdir(PATH_STOCK)
stocks = [ file.split('_')[0] for file in stock_files if file.endswith('csv')]
return stocks | c30edb0590fdbe3903721c0929a6d205c18def14 | 179 |
def from_mel(
mel_,
sr=16000,
n_fft=2048,
n_iter=32,
win_length=1000,
hop_length=100,
):
"""
Change melspectrogram into waveform using Librosa.
Parameters
----------
spectrogram: np.array
Returns
--------
result: np.array
"""
return librosa.feature.inverse.mel_to_audio(
mel_,
sr=sr,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window='hann',
center=True,
pad_mode='reflect',
power=1.0,
n_iter=n_iter,
) | 939791db3df6a5099b548abc14033a8463c39ed6 | 180 |
def expand(arg):
"""
sp.expand currently has no matrix support
"""
if isinstance(arg, sp.Matrix):
return arg.applyfunc(sp.expand)
else:
return sp.expand(arg) | d71fa3f2747cb3cdaa4c5b844037f1a1c4fa7480 | 181 |
def ElementTreeToDataset(element_tree, namespaces, csv_path, load_all_data):
"""Convert an ElementTree tree model into a DataSet object.
Args:
element_tree: ElementTree.ElementTree object containing complete data from
DSPL XML file
namespaces: A list of (namespace_id, namespace_url) tuples
csv_path: Directory where CSV files associated with dataset can be found
load_all_data: Boolean indicating whether all CSV data should be loaded
Returns:
dspl_model.DataSet object
"""
dspl_dataset = dspl_model.DataSet()
# Fill in basic info
dspl_dataset.namespace = element_tree.getroot().get(
_DSPL_SCHEMA_PREFIX + 'targetNamespace', default='')
for namespace_id, namespace_url in namespaces:
if namespace_id:
dspl_dataset.AddImport(
dspl_model.Import(namespace_id=namespace_id,
namespace_url=namespace_url))
info_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'info')
if info_element is not None:
dspl_dataset.name = _GetValue(
info_element.find(_DSPL_SCHEMA_PREFIX + 'name'))
dspl_dataset.description = (
_GetValue(info_element.find(_DSPL_SCHEMA_PREFIX + 'description')))
dspl_dataset.url = (
_GetValue(info_element.find(_DSPL_SCHEMA_PREFIX + 'url')))
provider_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'provider')
if provider_element is not None:
dspl_dataset.provider_name = _GetValue(
provider_element.find(_DSPL_SCHEMA_PREFIX + 'name'))
dspl_dataset.provider_url = (
_GetValue(provider_element.find(_DSPL_SCHEMA_PREFIX + 'url')))
# Get topics
topics_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'topics')
if topics_element is not None:
topic_elements = topics_element.findall(_DSPL_SCHEMA_PREFIX + 'topic')
for topic_element in topic_elements:
dspl_dataset.AddTopic(ElementToTopic(topic_element))
# Get concepts
concepts_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'concepts')
if concepts_element is not None:
concept_elements = concepts_element.findall(_DSPL_SCHEMA_PREFIX + 'concept')
for concept_element in concept_elements:
dspl_dataset.AddConcept(ElementToConcept(concept_element))
# Get slices
slices_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'slices')
if slices_element is not None:
slice_elements = slices_element.findall(_DSPL_SCHEMA_PREFIX + 'slice')
for slice_element in slice_elements:
dspl_dataset.AddSlice(ElementToSlice(slice_element, dspl_dataset))
# Get tables
tables_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'tables')
if tables_element is not None:
table_elements = tables_element.findall(_DSPL_SCHEMA_PREFIX + 'table')
for table_element in table_elements:
dspl_dataset.AddTable(
ElementToTable(table_element, csv_path, load_all_data))
return dspl_dataset | 20c5d2ad06971a994a47e5d8317c3a81c0895a06 | 182 |
def load_config(path):
"""
load the config of LSTMLM
"""
if path.rfind('.ckpt') != -1:
path_name = path[0: path.rfind('.ckpt')]
else:
path_name = path
with open(path_name + '.config', 'rt') as f:
name = f.readline().split()[0]
config = wb.Config.load(f)
return config | 595e6c73f45d94f7d691a64e88cd81d67d4ad1aa | 183 |
def box_plot_stats(
## arguments / inputs
x, ## input array of values
coef = 1.5 ## positive real number
## (determines how far the whiskers extend from the iqr)
):
"""
calculates box plot five-number summary: the lower whisker extreme, the
lower ‘hinge’ (observed value), the median, the upper ‘hinge’, and upper
whisker extreme (observed value)
returns a results dictionary containing 2 items: "stats" and "xtrms"
1) the "stats" item contains the box plot five-number summary as an array
2) the "xtrms" item contains values which lie beyond the box plot extremes
functions much the same as R's 'boxplot.stats()' function for which this
Python implementation was predicated
ref:
The R Project for Statistical Computing. (2019). Box Plot Statistics.
http://finzi.psych.upenn.edu/R/library/grDevices/html/boxplot.stats.html.
Tukey, J. W. (1977). Exploratory Data Analysis. Section 2C.
McGill, R., Tukey, J.W. and Larsen, W.A. (1978). Variations of Box Plots.
The American Statistician, 32:12-16. http://dx.doi.org/10.2307/2683468.
Velleman, P.F. and Hoaglin, D.C. (1981). Applications, Basics and
Computing of Exploratory Data Analysis. Duxbury Press.
Emerson, J.D. and Strenio, J. (1983). Boxplots and Batch Comparison.
Chapter 3 of Understanding Robust and Exploratory Data Analysis,
eds. D.C. Hoaglin, F. Mosteller and J.W. Tukey. Wiley.
Chambers, J.M., Cleveland, W.S., Kleiner, B. and Tukey, P.A. (1983).
Graphical Methods for Data Analysis. Wadsworth & Brooks/Cole.
"""
## quality check for coef
if coef <= 0:
raise ValueError("cannot proceed: coef must be greater than zero")
## convert input to numpy array
x = np.array(x)
## determine median, lower ‘hinge’, upper ‘hinge’
median = np.quantile(a = x, q = 0.50, interpolation = "midpoint")
first_quart = np.quantile(a = x, q = 0.25, interpolation = "midpoint")
third_quart = np.quantile(a = x, q = 0.75, interpolation = "midpoint")
## calculate inter quartile range
intr_quart_rng = third_quart - first_quart
## calculate extreme of the lower whisker (observed, not interpolated)
lower = first_quart - (coef * intr_quart_rng)
lower_whisk = np.compress(x >= lower, x)
lower_whisk_obs = np.min(lower_whisk)
## calculate extreme of the upper whisker (observed, not interpolated)
upper = third_quart + (coef * intr_quart_rng)
upper_whisk = np.compress(x <= upper, x)
upper_whisk_obs = np.max(upper_whisk)
## store box plot results dictionary
boxplot_stats = {}
boxplot_stats["stats"] = np.array([lower_whisk_obs,
first_quart,
median,
third_quart,
upper_whisk_obs])
## store observations beyond the box plot extremes
boxplot_stats["xtrms"] = np.array(x[(x < lower_whisk_obs) |
(x > upper_whisk_obs)])
## return dictionary
return boxplot_stats | 4bc56d85103f6ba9c2267685e5d64c51ab5e1101 | 184 |
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False) | 7069035c449e9969907dbb5854b30da9701c194a | 185 |
import pickle
def unpickle_tokens(filepath):
"""Unpickle the tokens into memory."""
try:
with open(filepath+'_tokens.pickle', 'rb') as f:
tokens = pickle.load(f)
except FileNotFoundError:
tokens = tokenize_and_tag(filepath)
pickle_tokens(tokens, filepath)
return tokens | 5c68a4f4ba05983577e7de65ad0f47e61528dc38 | 186 |
def make_cov(df, columns=["parallax", "pmra", "pmdec"]):
"""Generate covariance matrix from Gaia data
columns : list
list of columns to calculate covariance.
Must be a subset of 'ra', 'dec' 'parallax', 'pmra', 'pmdec'.
Returns
-------
numpy.array
(N, number of columns) array of covariance matrices
"""
gaia_order = ["ra", "dec", "parallax", "pmra", "pmdec"]
N = len(np.atleast_1d(df[columns[0] + "_error"])) # N could be 1
n = len(columns)
C = np.zeros([N, n, n])
for i, j in zip(*np.triu_indices(n)):
if i == j:
C[:, [i], [j]] = np.atleast_1d(
df[f"{columns[i]}_error"] * df[f"{columns[j]}_error"]
)[:, None]
else:
corr_name = (
"_".join(
sorted([columns[i], columns[j]], key=lambda x: gaia_order.index(x))
)
+ "_corr"
)
C[:, [i, j], [j, i]] = np.atleast_1d(
df[f"{columns[i]}_error"] * df[f"{columns[j]}_error"] * df[corr_name]
)[:, None]
return C.squeeze() | c246151b68b744b1f1c1fa03276d098d4683409f | 187 |
from functools import reduce
def kinetics(request, section='', subsection=''):
"""
The RMG database homepage.
"""
# Make sure section has an allowed value
if section not in ['libraries', 'families', '']:
raise Http404
# Load the kinetics database, if necessary
database.load('kinetics', section)
# Determine which subsection we wish to view
db = None
try:
db = database.get_kinetics_database(section, subsection)
except ValueError:
pass
if db is not None:
# A subsection was specified, so render a table of the entries in
# that part of the database
is_group_database = False
# Sort entries by index
if db.top is not None and len(db.top) > 0:
# If there is a tree in this database, only consider the entries
# that are in the tree
entries0 = getDatabaseTreeAsList(db, db.top)
tree = '<ul class="kineticsTree">\n{0}\n</ul>\n'.format(getKineticsTreeHTML(db, section, subsection, db.top))
else:
# If there is not a tree, consider all entries
entries0 = list(db.entries.values())
if any(isinstance(item, list) for item in entries0):
# if the entries are lists
entries0 = reduce(lambda x, y: x+y, entries0)
# Sort the entries by index and label
entries0.sort(key=lambda entry: (entry.index, entry.label))
tree = ''
entries = []
for entry0 in entries0:
if isinstance(entry0.data, str):
data_format = 'Link'
else:
data_format = entry0.data.__class__.__name__
entry = {
'index': entry0.index,
'label': entry0.label,
'dataFormat': data_format,
}
if isinstance(db, KineticsGroups):
is_group_database = True
entry['structure'] = getStructureInfo(entry0.item)
entry['parent'] = entry0.parent
entry['children'] = entry0.children
elif 'rules' in subsection:
if isinstance(entry0.item, list):
# if the reactants are not group objects, then this rate rule came from
# the averaging step, and we don't want to show all of the averaged nodes
# in the web view. We only want to show nodes with direct values or
# training rates that became rate rules.
continue
else:
entry['reactants'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.reactants])
entry['products'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.products])
entry['arrow'] = '⇔' if entry0.item.reversible else '→'
else:
entry['reactants'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.reactants])
entry['products'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.products])
entry['arrow'] = '⇔' if entry0.item.reversible else '→'
entries.append(entry)
return render(request, 'kineticsTable.html', {'section': section, 'subsection': subsection, 'databaseName': db.name, 'databaseDesc': db.long_desc, 'entries': entries, 'tree': tree, 'isGroupDatabase': is_group_database})
else:
# No subsection was specified, so render an outline of the kinetics
# database components
kinetics_libraries = [(label, library) for label, library in database.kinetics.libraries.items() if subsection in label]
kinetics_libraries.sort()
# If this is a subsection, but not the main kinetics page,
# we don't need to iterate through the entire database, as this takes a long time to load.
try:
families_to_process = [database.kinetics.families[subsection]]
except KeyError: # if main kinetics page, or some other error
families_to_process = database.kinetics.families.values()
for family in families_to_process:
for i in range(0, len(family.depositories)):
if 'untrained' in family.depositories[i].name:
family.depositories.pop(i)
family.depositories.append(getUntrainedReactions(family))
kinetics_families = [(label, family) for label, family in database.kinetics.families.items() if subsection in label]
kinetics_families.sort()
return render(request, 'kinetics.html', {'section': section, 'subsection': subsection, 'kineticsLibraries': kinetics_libraries, 'kineticsFamilies': kinetics_families}) | 5a6d53282ff462912e9a9ba899e71aadaa0c7392 | 188 |
import xml
def cot_to_cot(craft: dict, known_craft: dict = {}) -> str:
"""
Given an input CoT XML Event with an ICAO Hex as the UID, will transform the Event's name, callsign & CoT Event
Type based on known craft input database (CSV file).
"""
return xml.etree.ElementTree.tostring(cot_to_cot_xml(craft, known_craft)) | 54fd916e9c0d32aec57c38ac40c2aee97491314d | 189 |
def update_export(module, export, filesystem, system):
""" Create new filesystem or update existing one"""
assert export
changed = False
name = module.params['name']
client_list = module.params['client_list']
if client_list:
if set(map(transform, unmunchify(export.get_permissions()))) \
!= set(map(transform, client_list)):
if not module.check_mode:
export.update_permissions(client_list)
changed = True
return changed | d7f122c63fc892c05215d2ed8c5c9e5d227209ca | 190 |
import typing
def create_kdf(kdf_type: str) -> typing.Type[KDF]:
"""Returns the class corresponding to the given key derivation function
type name.
Args:
kdf_type
The name of the OpenSSH private key key derivation function type.
Returns:
The subclass of :py:class:`KDF` corresponding to the key derivation
function type name.
Raises:
KeyError: There is no subclass of :py:class:`KDF` corresponding to
the given key derivation function type name.
"""
return _KDF_MAPPING[kdf_type] | 453b417534c87c71e73ed7b39b33ec3ba8d4e9af | 191 |
def promote(lhs, rhs, promote_option=True):
"""Promote two scalar dshapes to a possibly larger, but compatible type.
Examples
--------
>>> from datashape import int32, int64, Option
>>> x = Option(int32)
>>> y = int64
>>> promote(x, y)
Option(ty=ctype("int64"))
>>> promote(int64, int64)
ctype("int64")
Don't promote to option types.
>>> promote(x, y, promote_option=False)
ctype("int64")
Notes
----
This uses ``numpy.result_type`` for type promotion logic. See the numpy
documentation at
http://docs.scipy.org/doc/numpy/reference/generated/numpy.result_type.html
"""
if lhs == rhs:
return lhs
else:
left, right = getattr(lhs, 'ty', lhs), getattr(rhs, 'ty', rhs)
dtype = datashape.CType.from_numpy_dtype(
np.result_type(
datashape.to_numpy_dtype(left),
datashape.to_numpy_dtype(right),
),
)
if promote_option:
dtype = optionify(lhs, rhs, dtype)
return dtype | 8b197d631ad71bdbb7a4d4fcf1f6513aa4f5a41b | 192 |
import argparse
def validate_esc(esc):
"""Validate esc options\n
Give an error if the characters aren't '*?[]'
"""
esc = esc.replace("]", "[")
argset = set(esc)
charset = {"*", "?", "["}
if argset.difference(charset):
err = "input character is not '*?[]'"
raise argparse.ArgumentTypeError(err)
return "".join(argset) | 26e30eb8a5a9d62fc311d0c9b41adfbe2fd5f6cd | 193 |
def calc_amp_pop(eigenvecs, wave_func, nstates):
"""Calculates amplitudes and population from wave function, eigenvectors"""
pop = np.zeros(nstates)
amp = np.zeros((nstates), dtype=np.complex128)
for j in range(nstates):
amp[j] = np.dot(eigenvecs[:, j], wave_func)
pop[j] = np.real(bra_ket(amp[j], amp[j]))
return amp, pop | 7f092a9634bfff0e7e04582965667c9b7ecb0aaa | 194 |
from typing import Hashable
import typing
def reflect(cls, *args, **kwargs):
"""
Construct a funsor, populate ``._ast_values``, and cons hash.
This is the only interpretation allowed to construct funsors.
"""
if len(args) > len(cls._ast_fields):
# handle varargs
new_args = tuple(args[:len(cls._ast_fields) - 1]) + (args[len(cls._ast_fields) - 1 - len(args):],)
assert len(new_args) == len(cls._ast_fields)
_, args = args, new_args
# JAX DeviceArray has .__hash__ method but raise the unhashable error there.
cache_key = tuple(id(arg) if type(arg).__name__ == "DeviceArray" or not isinstance(arg, Hashable)
else arg for arg in args)
if cache_key in cls._cons_cache:
return cls._cons_cache[cache_key]
arg_types = tuple(typing.Tuple[tuple(map(type, arg))]
if (type(arg) is tuple and all(isinstance(a, Funsor) for a in arg))
else typing.Tuple if (type(arg) is tuple and not arg)
else type(arg) for arg in args)
cls_specific = (cls.__origin__ if cls.__args__ else cls)[arg_types]
result = super(FunsorMeta, cls_specific).__call__(*args)
result._ast_values = args
# alpha-convert eagerly upon binding any variable
result = _alpha_mangle(result)
cls._cons_cache[cache_key] = result
return result | 2b898de824f86460c8f7abbb4c0a9375e90ae1aa | 195 |
import argparse
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description='Semantic Segmentation')
# Data parameters.
parser.add_argument('--batch_size', type=int, default=1,
help='Number of images in one step.')
parser.add_argument('--use_lemniscate', type=str, default='',
help='Path to lemniscate embeddings.')
parser.add_argument('--data_dir', type=str, default='',
help='/path/to/dataset/.')
parser.add_argument('--input_size', type=str, default='336,336',
help='Comma-separated string with H and W of image.')
parser.add_argument('--random_seed', type=int, default=1234,
help='Random seed to have reproducible results.')
parser.add_argument('--num_gpu', type=int, default=2,
help='Number of gpus for training.')
# Training paramters.
parser.add_argument('--is_training', action='store_true',
help='Whether to updates weights.')
parser.add_argument('--use_global_status', action='store_true',
help='Whether to updates moving mean and variance.')
parser.add_argument('--learning_rate', type=float, default=2.5e-4,
help='Base learning rate.')
parser.add_argument('--momentum', type=float, default=0.9,
help='Momentum component of the optimiser.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Regularisation parameter for L2-loss.')
parser.add_argument('--num_classes', type=int, default=1000,
help='Number of classes to predict.')
parser.add_argument('--num_epochs', type=int, default=300,
help='Number of training steps.')
# parser.add_argument('--iter_size', type=int, default=10,
# help='Number of iteration to update weights')
parser.add_argument('--random_mirror', action='store_true',
help='Whether to randomly mirror the inputs.')
parser.add_argument('--random_crop', action='store_true',
help='Whether to randomly crop the inputs.')
parser.add_argument('--random_scale', action='store_true',
help='Whether to randomly scale the inputs.')
parser.add_argument('--num_loading_workers', type=int, default=10,
help='Number of workers to load imagenet.')
parser.add_argument('--schedule', type=int, nargs='+', default=[40],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--power', type=float, default=0.6,
help='Decay for poly learing rate policy.')
parser.add_argument('--decay', type=float, default=0.4,
help='Decay for exponential learing rate policy.')
parser.add_argument('--use_embed_preloaded', type=str, default="",
help='Path to preloaded numpy embeddings as torch tensor.')
# SegSort parameters.
parser.add_argument('--embedding_dim', type=int, default=32,
help='Dimension of the feature embeddings.')
# Misc paramters.
parser.add_argument('--restore_from', type=str, default='',
help='Where restore checkpoint/model parameters from.')
parser.add_argument('--save_pred_every', type=int, default=10000,
help='Save summaries and checkpoint every often.')
parser.add_argument('--update_tb_every', type=int, default=20,
help='Update summaries every often.')
parser.add_argument('--snapshot_dir', type=str, default='',
help='Where to save snapshots of the model.')
parser.add_argument('--not_restore_classifier', action='store_true',
help='Whether to not restore classifier layers.')
return parser.parse_args() | a191d1aa3b86cc47ae2ddff6a89007b166f9b767 | 196 |
import torch
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
"""Shift a point to the interior of a feasible region.
Each element of the returned vector is at least at a relative distance
`rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
"""
x_new = x.clone()
active = find_active_constraints(x, lb, ub, rstep)
lower_mask = torch.eq(active, -1)
upper_mask = torch.eq(active, 1)
if rstep == 0:
torch.nextafter(lb[lower_mask], ub[lower_mask], out=x_new[lower_mask])
torch.nextafter(ub[upper_mask], lb[upper_mask], out=x_new[upper_mask])
else:
x_new[lower_mask] = lb[lower_mask].add(lb[lower_mask].abs().clamp(1,None), alpha=rstep)
x_new[upper_mask] = ub[upper_mask].sub(ub[upper_mask].abs().clamp(1,None), alpha=rstep)
tight_bounds = (x_new < lb) | (x_new > ub)
x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
return x_new | cb38cf093b5459c3e32a1a02e4767dee6dae6637 | 197 |
def add_emails(request):
"""
Args:
request: Http Request (ignored in this function)
Returns: Add operation status wrapped on response's object
"""
error_messages = []
success_messages = []
status = HTTP_200_OK
success, message = queries.add_emails(request.data)
if success:
success_messages.append(message)
else:
error_messages.append(message)
status = HTTP_403_FORBIDDEN
return create_response(error_messages=error_messages, success_messages=success_messages, status=status) | c4b8de45a5a233dd0e43febda4f973ecf64745d4 | 198 |
import math
def tanD(angle):
"""
angle est la mesure d'un angle en degrés
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Retourne la tangente de angle.
"""
return math.tan(math.radians(angle)) | 641e564fefcdf6d1b804507b672e0e6476144b48 | 199 |