content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def ruru_old_log_checker(s):
"""
古いログ形式ならTrue、そうでないならFalseを返す
:param s:
:return:
"""
time_data_regex = r'[0-9]{4}\/[0-9]{2}\/[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}'
# るる鯖新ログ形式なら1つ目のdiv:d12150で時刻が取得可能。そうでないなら取得不可
time_data = re.search(time_data_regex, str(s.find('div', class_='d12150')))
return False if time_data else True | 54f6a94dab98ef6947496b8e1f95401d99424ee2 | 2,500 |
def get_user_map_best(beatmap_id, user, enabled_mods=0):
"""
gets users best play on map
:param beatmap_id: beatmap id
:param user: username
:param enabled_mods: mods used
:return: list of plays
"""
response = OSU_API.get('/get_scores', {"b": beatmap_id, "u": user, "mods": enabled_mods}).json()
if Config.debug:
Log.log(response)
# if len(response) == 0:
# raise NoScore("Couldn't find user score for this beatmap")
for i, j in enumerate(response):
response[i] = Play(j)
response[i].beatmap_id = beatmap_id
return response | 545ede7ef7d1133bbbcab9640a9e3edcdcca111f | 2,501 |
def scale_z_by_atom(z, scale, copy=True):
"""
Parameters
----------
z_ : array, shape (n_trials, n_atoms, n_times - n_times_atom + 1)
Can also be a list of n_trials LIL-sparse matrix of shape
(n_atoms, n_times - n_times_atom + 1)
The sparse activation matrix.
scale : array, shape = (n_atoms, )
The scales to apply on z.
"""
if is_list_of_lil(z):
n_trials, n_atoms, n_times_valid = get_z_shape(z)
assert n_atoms == len(scale)
if copy:
z = deepcopy(z)
for z_i in z:
for k in range(z_i.shape[0]):
z_i.data[k] = [zikt * scale[k] for zikt in z_i.data[k]]
else:
if copy:
z = z.copy()
z *= scale[None, :, None]
return z | b87368c1ea8dcd18fcbfd85ef8cde5450d5fcf33 | 2,502 |
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
if auth.is_logged_in():
# # if newly registered user is not in auth_membership add him as an administrator
if not db(db.auth_membership.user_id == auth.user_id).count() > 0:
auth.add_membership(auth.id_group(ADMIN), auth.user_id)
session.user_info = get_user_info()
response.user_info = session.user_info
if request.user_agent().is_mobile:
return response.render('../views/default/index-m.html')
else:
return response.render('../views/default/index.html') | d48d29ee65ddf064dc2f424f2be9f46da23cbd4a | 2,503 |
def compute_classification_metrics_at_ks(is_match, num_predictions, num_trgs, k_list=[5,10], meng_rui_precision=False):
"""
:param is_match: a boolean np array with size [num_predictions]
:param predicted_list:
:param true_list:
:param topk:
:return: {'precision@%d' % topk: precision_k, 'recall@%d' % topk: recall_k, 'f1_score@%d' % topk: f1, 'num_matches@%d': num_matches}
"""
assert is_match.shape[0] == num_predictions
#topk.sort()
if num_predictions == 0:
precision_ks = [0] * len(k_list)
recall_ks = [0] * len(k_list)
f1_ks = [0] * len(k_list)
num_matches_ks = [0] * len(k_list)
num_predictions_ks = [0] * len(k_list)
else:
num_matches = np.cumsum(is_match)
num_predictions_ks = []
num_matches_ks = []
precision_ks = []
recall_ks = []
f1_ks = []
for topk in k_list:
if topk == 'M':
topk = num_predictions
elif topk == 'G':
#topk = num_trgs
if num_predictions < num_trgs:
topk = num_trgs
else:
topk = num_predictions
if meng_rui_precision:
if num_predictions > topk:
num_matches_at_k = num_matches[topk-1]
num_predictions_at_k = topk
else:
num_matches_at_k = num_matches[-1]
num_predictions_at_k = num_predictions
else:
if num_predictions > topk:
num_matches_at_k = num_matches[topk - 1]
else:
num_matches_at_k = num_matches[-1]
num_predictions_at_k = topk
precision_k, recall_k, f1_k = compute_classification_metrics(num_matches_at_k, num_predictions_at_k, num_trgs)
precision_ks.append(precision_k)
recall_ks.append(recall_k)
f1_ks.append(f1_k)
num_matches_ks.append(num_matches_at_k)
num_predictions_ks.append(num_predictions_at_k)
return precision_ks, recall_ks, f1_ks, num_matches_ks, num_predictions_ks | 189a6491e1b5d8e3bf8869586b69667eb1b9d9c9 | 2,504 |
def coordinatesOfPosition(shape, distance):
"""Compute the point at a given distance from the beginning of a shape.
The shape is a list of points. A point is a sequence of two floats.
The returned point is the x- and y-coordinate of the point that has
the given distance along the line of the shape from its starting point.
The shape must contain at least one point coordinate. If the distance
argument is larger than the length of the shape, the last point
of the shape is returned.
"""
prevPoint = shape[0]
currentDistance = 0.0
for point in shape:
diffX = point[0] - prevPoint[0]
diffY = point[1] - prevPoint[1]
sectionLength = math.sqrt(diffX * diffX + diffY * diffY)
if currentDistance + sectionLength > distance:
fraction = (distance - currentDistance) / sectionLength
return (prevPoint[0] + diffX * fraction,
prevPoint[1] + diffY * fraction)
currentDistance += sectionLength
if cliArgs.debug:
print "coordinatesOfPosition: Exceeded the shape."
return point | 7ae8c53468d5c8be8fe2daca1d0dfe358ad6c24e | 2,505 |
def compute_dose_median_scores(null_dist_medians, dose_list):
"""
Align median scores per dose, this function return a dictionary,
with keys as dose numbers and values as all median scores for each dose
"""
median_scores_per_dose = {}
for dose in dose_list:
median_list = []
for keys in null_distribution_medians:
dose_median_list = null_distribution_medians[keys][dose-1]
median_list += dose_median_list
median_scores_per_dose[dose] = median_list
return median_scores_per_dose | ec23f186c10a6921cdae9d4965a51343dc78011e | 2,506 |
def generate_converter(name, taskdep, **options) :
"""
taskdep 是执行该程序之前应该执行的任务
task_html_generator 表示的是能够生成html的任务,我们需要从这个任务中提取result
taskname是生成的任务名
"""
converter = options.get('converter',
Pandoc("-f", "html", "-t", "markdown", "--wrap=none"))
flowdep = options.get('flowdep', taskdep[0])
return lift_process_to_task(name, converter, taskdep, flowdep=flowdep) | 3e60abfcdabfb0c35ff8b9692b21b27af2300da8 | 2,507 |
def symmetric_product(tensor):
"""
Symmetric outer product of tensor
"""
shape = tensor.size()
idx = list(range(len(shape)))
idx[-1], idx[-2] = idx[-2], idx[-1]
return 0.5 * (tensor + tensor.permute(*idx)) | 4f96ab5f0bd41080352b1e5e806b6a73b3506950 | 2,508 |
import torch
def prep_image(img, inp_dim):
"""
Function:
Prepare image for inputting to the neural network.
Arguments:
img -- image it self
inp_dim -- dimension for resize the image (input dimension)
Return:
img -- image after preparing
"""
img = (letterbox_image(img, (inp_dim, inp_dim)))
img = img[:,:,::-1].transpose((2,0,1)).copy()
img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)
return img | 4f32717cb06b32cab2e0b92d3a24a9a665baf27b | 2,509 |
async def auth_check(request):
"""
No-op view to set the session cookie, this is used by websocket since the "Set-Cookie" header
doesn't work with 101 upgrade
"""
return json_response(status='ok') | 60419c8d32bbc41525ebf44d4d7bcabe8a117df0 | 2,510 |
def check_constraint(term_freq,top_terms,top_terms_test_freq):
"""
Check the constraint 12%-30% for the test set
term_freq is the dictionnary of all term frequencies
top_terms is the list of terms we care about (first 300?)
top_terms_freq is an array of frequency of top terms in test set.
RETURN
True if constraint satisfied, False otherwise
"""
return check_constraint_12pc(term_freq,top_terms,top_terms_test_freq) and check_constraint_30pc(term_freq,top_terms,top_terms_test_freq) | d2b31c68d1a8cd1a7d8471818cc46de943496aaa | 2,511 |
import os
def create_LSTM_model(patient_idx, time_steps, save_model=False, plot_loss=False):
"""
Trains an LSTM model over a patient
@param patient_idx: number
@param time_steps: number of concatenated heartbeats per datapoint
@param save_model: whether to save the model to h5 file
@param plot_loss: whether to plot the loss during training
@return:
"""
orig_data = np.load(os.path.join("Working_Data/Normalized_Fixed_Dim_HBs_Idx" + str(patient_idx) + ".npy"))
data = orig_data[0:1000, :, :]
# print(data[0:10].reshape(10000,4).shape)
X, y = create_lstm_datapoints(data, time_steps)
model = Sequential()
model.add(LSTM(30, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(rate=0.2))
model.add(RepeatVector(X.shape[1]))
model.add(LSTM(30, return_sequences=True))
model.add(Dropout(rate=0.2))
model.add(TimeDistributed(Dense(X.shape[2])))
model.compile(optimizer='adam', loss='mse')
model.summary()
history = model.fit(X, X, epochs=100, batch_size=1, validation_split=0.1,
callbacks=[keras.callbacks.EarlyStopping(monitor='loss', patience=3, mode='min')],
shuffle=False)
if save_model:
model.save(f"Working_Data/LSTM_Model_Idx{patient_idx}.h5")
if plot_loss:
# plot the loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig("Working_Data/lstm_loss.png")
plt.show()
print("loss of the model is: ")
print(history.history['loss'])
print(f"Created LSTM model for patient {patient_idx}")
return model | 8a32e6106602f949000c57445317d7f0ec83102b | 2,512 |
def get_supported_providers() -> list[str]:
"""
Return the list of supported discussion providers
TODO: Load this from entry points?
"""
providers = [
'legacy',
'piazza',
]
return providers | bea650b51447734ae83949f0d792f850aa3d0aa3 | 2,513 |
import pytz
def predict_split(history, prediction_length=7*24, hyperparameters={}):
"""
This function predicts a time series of gas prices by splitting it into a
tren and a residual and then applying a feature pipeline and predicting
each of them individually.
Keyword arguments:
history -- the time series to split up
prediction_length -- the number of time steps to predict (default 7*24)
hyperparameters -- values used for the prediction model (default {})
Return value:
2 time series predicted: trend and residual
"""
#extract parameters
r = hyperparameters["r"] if "r" in hyperparameters else default_hyperparameters["r"]
#split data
trend, res = split_trend(history)
#create index for prediction time series
index_pred = pd.date_range(
start=history.index.max() + timedelta(hours=1),
end=history.index.max() + timedelta(hours=prediction_length),
freq="1H",
tz=pytz.utc
)
#predict the trend
trend_pred = predict_ts(
(trend - trend.shift(1)).fillna(0.),
get_feature_pipeline("trend", hyperparameters),
index_pred,
hyperparameters=hyperparameters
).cumsum() + trend.iloc[-1]
#compute residual prediction
res_pred = predict_ts(
res.iloc[-r:],
get_feature_pipeline("res", hyperparameters),
index_pred,
hyperparameters=hyperparameters
)
#alternative: using AR from statsmodels
#res_model = AR(res)
#res_results = res_model.fit(disp=-1, maxlag=p)
#res_pred = res_results.predict(len(res), len(res) + prediction_length)
#return result
return trend_pred, res_pred | 025d4b753754ad18a04b46f95002f9ab54ccd9bd | 2,514 |
def SFRfromLFIR(LFIR):
"""
Kennicut 1998
To get Star formation rate from LFIR (8-1000um)
LFIR in erg s-1
SFR in Msun /year
"""
SFR = 4.5E-44 * LFIR
return SFR | 4adf401bbf2c6547cea817b52eb881531db8c798 | 2,515 |
def inc_group_layers(n_list, d_list, c_list):
"""
Helper function for inc_tmm. Groups and sorts layer information.
See coh_tmm for definitions of n_list, d_list.
c_list is "coherency list". Each entry should be 'i' for incoherent or 'c'
for 'coherent'.
A "stack" is a group of one or more consecutive coherent layers. A "stack
index" labels the stacks 0,1,2,.... The "within-stack index" counts the
coherent layers within the stack 1,2,3... [index 0 is the incoherent layer
before the stack starts]
An "incoherent layer index" labels the incoherent layers 0,1,2,...
An "alllayer index" labels all layers (all elements of d_list) 0,1,2,...
Returns info about how the layers relate:
* stack_d_list[i] = list of thicknesses of each coherent layer in the i'th
stack, plus starting and ending with "inf"
* stack_n_list[i] = list of refractive index of each coherent layer in the
i'th stack, plus the two surrounding incoherent layers
* all_from_inc[i] = j means that the layer with incoherent index i has
alllayer index j
* inc_from_all[i] = j means that the layer with alllayer index i has
incoherent index j. If j = nan then the layer is coherent.
* all_from_stack[i1][i2] = j means that the layer with stack index i1 and
within-stack index i2 has alllayer index j
* stack_from_all[i] = [j1 j2] means that the layer with alllayer index i is
part of stack j1 with withinstack-index j2. If stack_from_all[i] = nan
then the layer is incoherent
* inc_from_stack[i] = j means that the i'th stack comes after the layer
with incoherent index j, and before the layer with incoherent index j+1.
* stack_from_inc[i] = j means that the layer with incoherent index i comes
immediately after the j'th stack. If j=nan, it is not immediately
following a stack.
* num_stacks = number of stacks
* num_inc_layers = number of incoherent layers
* num_layers = number of layers total
"""
if (d_list.ndim != 1):
raise ValueError("Problem with n_list or d_list!")
if (d_list[0] != np.inf) or (d_list[-1] != np.inf):
raise ValueError('d_list must start and end with inf!')
if (c_list[0] != 'i') or (c_list[-1] != 'i'):
raise ValueError('c_list should start and end with "i"')
if not len(n_list) == d_list.size == len(c_list):
raise ValueError('List sizes do not match!')
inc_index = 0
stack_index = 0
stack_d_list = []
stack_n_list = []
all_from_inc = []
inc_from_all = []
all_from_stack = []
stack_from_all = []
inc_from_stack = []
stack_from_inc = []
stack_in_progress = False
for alllayer_index in range(len(n_list)):
if c_list[alllayer_index] == 'c': # coherent layer
inc_from_all.append(np.nan)
if not stack_in_progress: # this layer is starting new stack
stack_in_progress = True
ongoing_stack_d_list = [np.inf, d_list[alllayer_index]]
ongoing_stack_n_list = [n_list[alllayer_index - 1],
n_list[alllayer_index]]
stack_from_all.append([stack_index, 1])
all_from_stack.append([alllayer_index - 1, alllayer_index])
inc_from_stack.append(inc_index - 1)
within_stack_index = 1
else: # another coherent layer in the same stack
ongoing_stack_d_list.append(d_list[alllayer_index])
ongoing_stack_n_list.append(n_list[alllayer_index])
within_stack_index += 1
stack_from_all.append([stack_index, within_stack_index])
all_from_stack[-1].append(alllayer_index)
elif c_list[alllayer_index] == 'i': # incoherent layer
stack_from_all.append(np.nan)
inc_from_all.append(inc_index)
all_from_inc.append(alllayer_index)
if not stack_in_progress: # previous layer was also incoherent
stack_from_inc.append(np.nan)
else: # previous layer was coherent
stack_in_progress = False
stack_from_inc.append(stack_index)
ongoing_stack_d_list.append(np.inf)
stack_d_list.append(ongoing_stack_d_list)
ongoing_stack_n_list.append(n_list[alllayer_index])
stack_n_list.append(ongoing_stack_n_list)
all_from_stack[-1].append(alllayer_index)
stack_index += 1
inc_index += 1
else:
raise ValueError("Error: c_list entries must be 'i' or 'c'!")
return {'stack_d_list': stack_d_list,
'stack_n_list': stack_n_list,
'all_from_inc': all_from_inc,
'inc_from_all': inc_from_all,
'all_from_stack': all_from_stack,
'stack_from_all': stack_from_all,
'inc_from_stack': inc_from_stack,
'stack_from_inc': stack_from_inc,
'num_stacks': len(all_from_stack),
'num_inc_layers': len(all_from_inc),
'num_layers': len(n_list)} | 1b25975169839e54feae58f98b5de98916c51541 | 2,516 |
def get_heater_device_json():
""" returns information about the heater in json """
return '{\n "state" : "' + _pretty_state_identifier(brew_logic.heater_state) + '",\n "overridden" : "' + str(brew_logic.heater_override).lower() + '"\n }' | 3997e9eee7cbb058adf4900b571c8458e2464e19 | 2,517 |
def rfc_deploy():
"""This function trains a Random Forest classifier and outputs the
out-of-sample performance from the validation and test sets
"""
df = pd.DataFrame()
for pair in pairs:
# retrieving the data and preparing the features
dataset = gen_feat(pair)
dataset.drop(['Open', 'High', 'Low', 'Close', 'volume'], axis=1, inplace=True)
# selecting the features to train on
cols = list(dataset.columns)
feats = cols[2:]
#splitting into training, validation and test sets
df_train = dataset.iloc[:-100,:]
train = df_train.copy()
df_test = dataset.iloc[-100:,:]
test = df_test.copy()
train_f = train.iloc[:-100,:]
valid = train.iloc[-100:,:]
#training the algorithm
m = rfc(train_f[feats], train_f['dir'])
# test sets
test_pred = m.predict(test[feats])
test_proba = m.predict_proba(test[feats])
df1 = pd.DataFrame(test_pred,columns=['prediction'], index=test.index)
proba_short = []
proba_long = []
for x in range(len(test_proba)):
proba_short.append(test_proba[x][0])
proba_long.append(test_proba[x][-1])
proba = {'proba_short': proba_short,
'proba_long': proba_long}
df2 = pd.DataFrame(proba, index=test.index)
df1['probability'] = np.where(df1['prediction'] == 1, df2['proba_long'],
np.where(df1['prediction'] == -1, df2['proba_short'], 0))
df1['signal'] = np.where((df1['probability'] >= .7) & (df1['prediction'] == 1), 'Go Long',
np.where((df1['probability'] >= 0.7) & (df1['prediction'] == -1), 'Go Short', 'Stand Aside'))
df1.reset_index(inplace=True)
df1['pair'] = pair
df1.set_index('pair', inplace=True)
entry_sig = df1[['probability', 'signal']].iloc[-1:]
# Merge
df = pd.concat([df, entry_sig], axis=0)
#output
return df | 86c4aa5f44d23cce83f6cc9993c0e10cd124c423 | 2,518 |
def get_block(block_name):
"""Get block from BLOCK_REGISTRY based on block_name."""
if not block_name in BLOCK_REGISTRY:
raise Exception(NO_BLOCK_ERR.format(
block_name, BLOCK_REGISTRY.keys()))
block = BLOCK_REGISTRY[block_name]
return block | 10b86c5045496a865907ef2617b2994d03f1312d | 2,519 |
from pathlib import Path
import yaml
def _determine_role_name(var_file: Path) -> str:
"""
Lookup role name from directory or galaxy_info.
"""
if var_file.is_file():
role_path: Path = var_file.parent / ".."
name = str(role_path.resolve().name)
meta_path: Path = role_path / 'meta' / 'main.yml'
if (meta_path.is_file()):
with open(str(meta_path), 'r') as f:
meta = yaml.load(f, Loader=SafeLoader)
try:
role_name = meta['galaxy_info']['role_name']
name = role_name
except BaseException:
pass
return name | 59e6d60234cc7988fe6c3005176f1c89cac5b60d | 2,520 |
import six
import os
def load_table(file_path, metadata_ext='.pklmetadata'):
"""
Loads a pickled DataFrame from a file along with its metadata.
This function loads a DataFrame from a file stored in pickle format.
Further, this function looks for a metadata file with the same file name
but with an extension given by the user (defaults to '.pklmetadata'. If the
metadata file is present, the function will update the metadata for that
DataFrame in the catalog.
Args:
file_path (string): The file path to load the file from.
metadata_ext (string): The metadata file extension (defaults to
'.pklmetadata') that should be used to generate metadata file name.
Returns:
If the loading is successful, the function will return a pandas
DataFrame read from the file. The catalog will be updated with the
metadata read from the metadata file (if the file was present).
Raises:
AssertionError: If `file_path` is not of type string.
AssertionError: If `metadata_ext` is not of type string.
Examples:
>>> A = em.load_table('./A.pkl')
>>> A = em.load_table('./A.pkl', metadata_ext='.pklmeta')
See Also:
:meth:`~py_entitymatching.save_table`
Note:
This function is different from read_csv_metadata in two aspects.
First, this function currently does not support reading in candidate
set tables, where there are more metadata such as ltable,
rtable than just 'key', and conceptually the user is expected to
provide ltable and rtable information while calling this function. (
this support will be added shortly). Second, this function loads the
table stored in a pickle format.
"""
# Validate input parameters
validate_object_type(file_path, six.string_types, error_prefix='Input file path')
validate_object_type(metadata_ext, six.string_types)
# Load the object from the file path. Note that we use a generic load
# object to load in the DataFrame too.
data_frame = load_object(file_path)
# Load metadata from file path
# # Check if the meta data file is present
if ps._is_metadata_file_present(file_path, extension=metadata_ext):
# Construct the metadata file name, and read it from the disk.
# # Get the file name used to load the DataFrame
file_name, _ = os.path.splitext(file_path)
# # Construct the metadata file name
metadata_filename = file_name + metadata_ext
# # Load the metadata from the disk
metadata_dict = load_object(metadata_filename)
# Update the catalog with the properties read from the disk
for property_name, property_value in six.iteritems(metadata_dict):
if property_name == 'key':
# If the property_name is key call set_key as the function
# will check for the integrity of key before setting it in
# the catalog
cm.set_key(data_frame, property_value)
else:
cm.set_property(data_frame, property_name, property_value)
else:
# If the metadata file is not present then issue a warning
logger.warning('There is no metadata file')
# Return the DataFrame
return data_frame | ca931b895474e83877d17ca4a9e4054a595acf40 | 2,521 |
import os
import uvicorn
import logging
def main() -> int:
"""Ensure runtime environment is ready, and start the server."""
app.utils.setup_runtime_environment()
for safety_check in (
app.utils.ensure_supported_platform, # linux only at the moment
app.utils.ensure_local_services_are_running, # mysql (if local)
app.utils.ensure_directory_structure, # .data/ & achievements/ dir structure
app.utils.ensure_dependencies_and_requirements, # submodules & oppai-ng built
):
if (exit_code := safety_check()) != 0:
return exit_code
""" Server should be safe to start """
# install any debugging hooks from
# _testing/runtime.py, if present
app.utils._install_debugging_hooks()
# check our internet connection status
if not app.utils.check_connection(timeout=1.5):
log("No internet connection available.", Ansi.LYELLOW)
# show info & any contextual warnings.
app.utils.display_startup_dialog()
# the server supports both inet and unix sockets.
if (
app.utils.is_valid_inet_address(app.settings.SERVER_ADDR)
and app.settings.SERVER_PORT is not None
):
server_arguments = {
"host": app.settings.SERVER_ADDR,
"port": app.settings.SERVER_PORT,
}
elif (
app.utils.is_valid_unix_address(app.settings.SERVER_ADDR)
and app.settings.SERVER_PORT is None
):
server_arguments = {
"uds": app.settings.SERVER_ADDR,
}
# make sure the socket file does not exist on disk and can be bound
# (uvicorn currently does not do this for us, and will raise an exc)
if os.path.exists(app.settings.SERVER_ADDR):
if (
app.utils.processes_listening_on_unix_socket(app.settings.SERVER_ADDR)
!= 0
):
log(
f"There are other processes listening on {app.settings.SERVER_ADDR}.\n"
f"If you've lost it, gulag can be killed gracefully with SIGINT.",
Ansi.LRED,
)
return 1
else:
os.remove(app.settings.SERVER_ADDR)
else:
raise ValueError(
"%r does not appear to be an IPv4, IPv6 or Unix address"
% app.settings.SERVER_ADDR,
) from None
# run the server indefinitely
uvicorn.run(
"app.api.init_api:asgi_app",
reload=app.settings.DEBUG,
log_level=logging.WARNING,
server_header=False,
date_header=False,
# TODO: uvicorn calls .lower() on the key & value,
# but i would prefer Gulag-Version to keep
# with standards. perhaps look into this.
headers=(("gulag-version", app.settings.VERSION),),
**server_arguments,
)
return 0 | 6aa408cc6e3ef223a63ca073ddad3b3e5325df85 | 2,522 |
def coco17_category_info(with_background=True):
"""
Get class id to category id map and category id
to category name map of COCO2017 dataset
Args:
with_background (bool, default True):
whether load background as class 0.
"""
clsid2catid = {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 13,
13: 14,
14: 15,
15: 16,
16: 17,
17: 18,
18: 19,
19: 20,
20: 21,
21: 22,
22: 23,
23: 24,
24: 25,
25: 27,
26: 28,
27: 31,
28: 32,
29: 33,
30: 34,
31: 35,
32: 36,
33: 37,
34: 38,
35: 39,
36: 40,
37: 41,
38: 42,
39: 43,
40: 44,
41: 46,
42: 47,
43: 48,
44: 49,
45: 50,
46: 51,
47: 52,
48: 53,
49: 54,
50: 55,
51: 56,
52: 57,
53: 58,
54: 59,
55: 60,
56: 61,
57: 62,
58: 63,
59: 64,
60: 65,
61: 67,
62: 70,
63: 72,
64: 73,
65: 74,
66: 75,
67: 76,
68: 77,
69: 78,
70: 79,
71: 80,
72: 81,
73: 82,
74: 84,
75: 85,
76: 86,
77: 87,
78: 88,
79: 89,
80: 90
}
catid2name = {
0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush'
}
if not with_background:
clsid2catid = {k - 1: v for k, v in clsid2catid.items()}
return clsid2catid, catid2name | f64be8c09b3372ad75826a6bfdc8a2f0bc4f9e25 | 2,523 |
def _parse_parameters(paramdoc):
"""Parse parameters and return list of (name, full_doc_string)
It is needed to remove multiple entries for the same parameter
like it could be with adding parameters from the parent class
It assumes that previously parameters were unwrapped, so their
documentation starts at the begining of the string, like what
should it be after _split_out_parameters
"""
entries = __re_spliter1.split(paramdoc)
result = [(__re_spliter2.split(e)[0].strip(), e)
for e in entries if e != '']
if __debug__:
debug('DOCH', 'parseParameters: Given "%s", we split into %s' %
(paramdoc, result))
return result | 0a580476cf7aa7c84f2081af7b3bbc224f9c3390 | 2,524 |
def example_miller_set(example_crystal):
"""Generate an example miller set."""
ms = miller.set(
crystal_symmetry=example_crystal.get_crystal_symmetry(),
indices=flex.miller_index([(1, 1, 1)] * 8 + [(2, 2, 2)]),
anomalous_flag=False,
)
return ms | 516eca404544d8f8af8dd664488006c67bef03b8 | 2,525 |
async def get(req):
"""
Get a complete analysis document.
"""
db = req.app["db"]
analysis_id = req.match_info["analysis_id"]
document = await db.analyses.find_one(analysis_id)
if document is None:
return not_found()
sample = await db.samples.find_one({"_id": document["sample"]["id"]}, {"quality": False})
if not sample:
return bad_request("Parent sample does not exist")
read, _ = virtool.samples.utils.get_sample_rights(sample, req["client"])
if not read:
return insufficient_rights()
await virtool.subtractions.db.attach_subtraction(db, document)
if document["ready"]:
document = await virtool.analyses.format.format_analysis(req.app, document)
return json_response(virtool.utils.base_processor(document)) | e52598d27b73dd9ef5d24aba196f97f85fb47214 | 2,526 |
from typing import Union
from typing import Mapping
from typing import Any
def get_cube_point_indexes(cube: xr.Dataset,
points: Union[xr.Dataset, pd.DataFrame, Mapping[str, Any]],
dim_name_mapping: Mapping[str, str] = None,
index_name_pattern: str = DEFAULT_INDEX_NAME_PATTERN,
index_dtype=np.float64,
cube_asserted: bool = False) -> xr.Dataset:
"""
Get indexes of given point coordinates *points* into the given *dataset*.
:param cube: The cube dataset.
:param points: A mapping from column names to column data arrays, which must all have the same length.
:param dim_name_mapping: A mapping from dimension names in *cube* to column names in *points*.
:param index_name_pattern: A naming pattern for the computed indexes columns.
Must include "{name}" which will be replaced by the dimension name.
:param index_dtype: Numpy data type for the indexes. If it is a floating point type (default),
then *indexes* will contain fractions, which may be used for interpolation.
For out-of-range coordinates in *points*, indexes will be -1 if *index_dtype* is an integer type, and NaN,
if *index_dtype* is a floating point types.
:param cube_asserted: If False, *cube* will be verified, otherwise it is expected to be a valid cube.
:return: A dataset containing the index columns.
"""
if not cube_asserted:
assert_cube(cube)
dim_name_mapping = dim_name_mapping if dim_name_mapping is not None else {}
dim_names = _get_cube_data_var_dims(cube)
col_names = [dim_name_mapping.get(dim_name, dim_name) for dim_name in dim_names]
_validate_points(points, col_names, param_name="points")
indexes = []
for dim_name, col_name in zip(dim_names, col_names):
col = points[col_name]
coord_indexes = get_dataset_indexes(cube, dim_name, col, index_dtype=index_dtype)
indexes.append((index_name_pattern.format(name=dim_name),
xr.DataArray(coord_indexes, dims=[INDEX_DIM_NAME])))
return xr.Dataset(dict(indexes)) | b1f5eb134ab7119589b54c45b95065c2f57348dc | 2,527 |
def auto_add():
"""
自动添加
1 查找所有amis文件
2 更新记录
3 记录按照app组织,生成dict
4 为每个app生成auto_urls.py
:return:
"""
amis_json_file_list = get_amis_files()
cnt = update_rcd(amis_json_file_list)
aml_app_dict = get_rcd_by_app_name()
add_needed_auto_urls(aml_app_dict)
add_urls_needed(aml_app_dict)
return cnt | 17b6026f56793f3a6f76446145b7f65a6fe29a5a | 2,528 |
from pathlib import Path
from typing import Optional
def _get_configs(cli_args: CLIArgs, project_root: Path) -> Configs:
"""
Deal with extra configs for 3rd party tool.
Parameters
----------
cli_args
Commandline arguments passed to nbqa
project_root
Root of repository, where .git / .hg / .nbqa.ini file is.
Returns
-------
Configs
Taken from CLI (if given), else from .nbqa.ini.
"""
cli_config: Configs = Configs.parse_from_cli_args(cli_args)
file_config: Optional[Configs] = config_parser.parse_config_from_file(
cli_args, project_root
)
if file_config is not None:
cli_config = cli_config.merge(file_config)
return cli_config | d9ef190a99b06f2d17bbc336ace86061ea215d97 | 2,529 |
import os
def run_gx_test(dataset_path, output_dir, dist_types, ex_config, mp_args):
"""
The start and end parameter together make an interval that contains the datasets to be included in this experiment
:param mp_args: the configuration of the multiprocess backend,
go to this site https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark-configure.html for
the correct Spark configuration with AWS; you only need to worry the configs that are exposed to you ->
that is: the number of workers, the max driver memory and the max result size
:param dataset_path: the path to the archive datasets
:param dataset_soi: (soi: size of interest) a iterable of two integers for binning the experiment
:param output_dir: the path to which the result csv's will be saved
:param exclude_list: the list of dataset names to be excluded from the experiment archive
:param dist_types: a list of strings, must contain at least one item. Items must be ones of the following: eu,ch,ma
:param ex_config: a dict contains hyper-parameters for the experiment. They are
'num_sample': int, number of samples to consider in each dataset, set this to math.inf for complete experiment
'query_split': float, a fraction of the dataset to be taken as queries, use 0.2 for the time being
'_lb_opt': bool, whether to turn of lower-bounding optimization for DTW, leave it False in not otherwise specified
'radius': int, the length radius for Genex Query, leave it being 1 if not otherwise specified
'use_spark': bool, whether to use the Spark backend, leave it being True if not otherwise specified
'loi_range': float, only consider sequences within a percentage length of the longest sequence, use 0.1 for the time being
'st': float, hyper-parameters that determines the cluster boundary in genex.build, leave it being True if not otherwise specified
'paa_seg': the n segment of PAA, use 3 as a heuristic approach
"""
valid_dt = ['eu', 'ch', 'ma']
try:
assert os.path.isdir(dataset_path)
assert os.path.isdir(output_dir)
assert 0 < len(dist_types) <= 3
assert np.all([x in valid_dt for x in dist_types])
except AssertionError:
raise Exception('Assertion failed in checking parameters')
exp_set_list = [generate_ex_set_GENEX(dataset_path, output_dir, dt) for dt in dist_types]
return [run_exp_set_GENEX(es, mp_args, **ex_config) for es in exp_set_list] | 4d581dfc979a02d429ac02156e0b7b6cf3d108b5 | 2,530 |
import os
import ctypes
def _load_lib():
"""Load libary in build/lib."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../build/lib/')
path_to_so_file = os.path.join(lib_path, "libc_runtime_api.so")
lib = ctypes.CDLL(path_to_so_file, ctypes.RTLD_GLOBAL)
_check_functions(lib, DNNL_LIB)
return lib | e7f41afbad42ee485c3a65acc9c9d91f4ab56f97 | 2,531 |
from sklearn.preprocessing import RobustScaler
def robust_standardize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame:
"""
Applies the RobustScaler from the module sklearn.preprocessing by
removing the median and scaling the data according to the quantile
range (IQR). This transformation is robust to outliers.
Note: In case multiple dataframes are used (i.e., several partitions of
the dataset in training and testing), make sure that all of them will
be passed to this method at once, and as one single dataframe. Otherwise,
the normalization will be carried out on local (as opposed to global)
extrema, hence unrepresentative IQR. This is a bad practice.
:param df: The dataframe to be normalized.
:param excluded_colnames: The name of non-numeric (e.g., TimeStamp,
ID etc.) that must be excluded before normalization takes place.
They will be added back to the normalized data.
:return: The same dataframe as input, with the label column unchanged,
except that now the numerical values are transformed into new range
determined by IQR.
"""
excluded_colnames = excluded_colnames if excluded_colnames else []
colnames_original_order = list(df)
# Separate data (numeric) from those to be excluded (ids and class_labels)
included_cnames = [colname for colname in list(df) if colname not in excluded_colnames]
# Exclude all non-numeric columns
df_numeric = df[included_cnames].select_dtypes(include=np.number)
# set-difference between the original and numeric columns
excluded_cnames = list(set(colnames_original_order) - set(list(df_numeric)))
df_excluded = df[excluded_cnames]
# prepare normalizer and normalize
scaler = RobustScaler()
res_ndarray = scaler.fit_transform(df_numeric)
df_numeric = pd.DataFrame(res_ndarray, columns=list(df_numeric), dtype=float)
# Reset the indices (so that they match)
df_excluded.reset_index()
df_numeric.reset_index()
# Add the excluded columns back
df_norm = df_excluded.join(df_numeric)
# Restore the original oder of columns
df_norm = df_norm[colnames_original_order]
return df_norm | 0727ce390e773405a221c6fb3248ddd5d40445b2 | 2,532 |
import math
def meanStdDev( valueList, scale ):
"""Compute the mean and standard deviation of a *non-empty* list of numbers."""
numElements = len(valueList)
if numElements == 0:
return(None, 0.0)
mean = float(sum(valueList)) / numElements
variance = 0
for value in valueList:
variance += math.pow( value - mean, 2 )
variance = variance / float(numElements)
return (scale * mean, scale * math.sqrt(variance)) | 2970ae1e4382092eb67219373aa26b9ca75226a3 | 2,533 |
def audience_filter(digest, audience):
"""Check whether the current audience level should include that digest."""
return get_split(
digest,
[
{
"key": "audience_{}".format(idx),
"size": 1.0
} for idx in range(0, 100)
]
) < audience | 811e4e94e68901bfeaedabfec5e16a30de55408c | 2,534 |
def request_specific_data2num(batch_data):
"""
input: next_batch_requestable request_specific_data[slot].
change the data into processable type for tensorflow
:param batch_data: 一个 batch 的训练数据
:return: 直接输入request-specific tracker 模型计算的数据
"""
batchsize_request = len(batch_data)
x_usr = np.zeros((batchsize_request, max_length, embedding_dim))
x_usr_len = np.zeros((batchsize_request), dtype='int32')
x_slot = np.zeros((batchsize_request, embedding_dim))
for batch_id, data in enumerate(batch_data):
for word_id, word in enumerate(data[1]):
if word in vocab_dict:
x_usr[batch_id, word_id, :] = embedding_table[word]
else:
x_usr[batch_id, word_id, :] = embedding_table['unk']
x_usr_len[batch_id] = len(data[1])
x_slot[batch_id, :] = embedding_table[data[2]]
return x_usr, x_usr_len, x_slot | e8a5b414f00e43755719dfadc0b089177cb67152 | 2,535 |
def points_from_x0y0x1y1(xyxy):
"""
Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1]
"""
[x0, y0, x1, y1] = xyxy
return "%s,%s %s,%s %s,%s %s,%s" % (
x0, y0,
x1, y0,
x1, y1,
x0, y1
) | 8a7d766145dc31e6619b290b8d96a95983f9cc01 | 2,536 |
def get_columns(invoice_list, additional_table_columns):
"""return columns based on filters"""
columns = [
_("Invoice") + ":Link/Sales Invoice:120", _("Posting Date") + ":Date:80", _("Status") + "::80",
_("Customer") + ":Link/Customer:120", _("Sales Person") + ":Link/Sales Person:100",
_("AR Status") + "::75", _("Territory") + ":Link/Territory:100",
_("SKU") + ":Link/Item:100", _("Qty") + ":Float:50", _("Price List") + ":Currency/currency:120",
_("Discount") + ":Currency/currency:120", _("Net Price") + ":Currency/currency:120",
_("Amount") + ":Currency/currency:120"
]
columns = columns + [_("Outstanding Amount") + ":Currency/currency:120"]
return columns | c9849e62d401ec5cc8de52d266a39eccf4b4dbe8 | 2,537 |
def one_norm(a):
"""
Return the one-norm of the matrix.
References:
[0] https://www.mathworks.com/help/dsp/ref/matrix1norm.html
Arguments:
a :: ndarray(N x N) - The matrix to compute the one norm of.
Returns:
one_norm_a :: float - The one norm of a.
"""
return anp.max(anp.sum(anp.abs(a), axis=0)) | c3e1c83d3776dda8ffa82b9b36d26866f390f6cc | 2,538 |
def remove_nan_inf(df, reindex=True):
"""
Removes all rows that have NaN, inf or -inf as a value, and then optionally
reindexes the dataframe.
Parameters
----------
df : pd.DataFrame
Dataframe to remove NaNs and Infs from.
reindex : bool, optional
Reindex the dataframe so that there are no missing indices.
Returns
-------
df : pd.DataFrame
Dataframe with all the NaNs and Infs removed.
"""
df = df.replace([np.inf, -np.inf], np.nan).dropna()
if reindex is True:
df = df.reset_index(drop=True)
return df | 3b9339f2ee1315eac458925e5be5279e147d5c7d | 2,539 |
def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True):
"""Return the contingency table for all regions in matched segmentations.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
The ground truth segmentation.
ignore_seg : list of int, optional
Values to ignore in `seg`. Voxels in `seg` having a value in this list
will not contribute to the contingency table. (default: [0])
ignore_gt : list of int, optional
Values to ignore in `gt`. Voxels in `gt` having a value in this list
will not contribute to the contingency table. (default: [0])
norm : bool, optional
Whether to normalize the table so that it sums to 1.
Returns
-------
cont : scipy.sparse.csc_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `seg` and `j` in `gt`. (Or the proportion of such voxels
if `norm=True`.)
"""
segr = seg.ravel()
gtr = gt.ravel()
ij = np.vstack((segr, gtr))
selector = np.ones(segr.shape, np.bool)
data = np.ones(len(gtr))
for i in ignore_seg:
selector[segr == i] = 0
for j in ignore_gt:
selector[gtr == j] = 0
ij = ij[:, selector]
data = data[selector]
cont = sparse.coo_matrix((data, ij)).tocsc()
if norm:
cont /= float(cont.sum())
return cont | 47284bb5aaa492b6cbc50794c8ccd8a1e63676b4 | 2,540 |
def get_basic_track_info(track):
"""
Given a track object, return a dictionary of track name, artist name,
album name, track uri, and track id.
"""
# Remember that artist and album artist have different entries in the
# spotify track object.
name = track["name"]
artist = track['artists'][0]['name']
album = track['album']['name']
uri = track["uri"]
track_id = track['id']
output = {"name": name, "artist": artist, "album": album, "uri": uri,
"id": track_id}
return output | 925f7bb00482e946ad7a6853bac8b243d24145c7 | 2,541 |
def demander_nombre(mini: int = None, maxi: int = None) -> int:
"""
Demande un nombre à l'utilisateur, situé entre min et max.
:param mini: le minimum
:param maxi: le maximum
:return: le nombre entrée par l'utilisateur
"""
message = 'Veuillez rentrer un nombre:'
if mini is not None and maxi is not None:
message = f'Veuillez rentrer un nombre entre {mini} et {maxi}:'
elif mini is not None and maxi is None:
message = f'Veuillez rentrer un nombre supérieur à {mini}:'
while True:
nombre = input(message + '\n> ')
# On s'assure que l'utilisateur vient de rentrer un nombre
try:
# On convertit en nombre base 10
nombre = int(nombre)
except ValueError:
print('Valeur incorrecte.')
continue
# Le nombre est désormais un entier. On vérifie qu'il coincide avec les valeurs min/max
if mini is not None and nombre < mini:
print(f'Le nombre entré est trop petit. Il doit valoir au moins {mini}')
elif maxi is not None and nombre > maxi:
print(f'Le nombre entré est trop grand. Il doit valoir au maximum {maxi}')
else:
return nombre | ac5b949af1ad4ede2f956c7da5d4c2136dc47b15 | 2,542 |
from typing import OrderedDict
import yaml
def ordered_load(stream, merge_duplicate_keys=False):
"""
Parse the first YAML document in a stream and produce the corresponding
Python object, using OrderedDicts instead of dicts.
If merge_duplicate_keys is True, merge the values of duplicate mapping keys
into a list, as the uWSGI "dumb" YAML parser would do.
Otherwise, following YAML 1.2 specification which says that "each key is
unique in the association", raise a ConstructionError exception.
"""
def construct_mapping(loader, node, deep=False):
loader.flatten_mapping(node)
mapping = OrderedDict()
merged_duplicate = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
if not merge_duplicate_keys:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found duplicated key (%s)" % key, key_node.start_mark)
log.debug("Merging values for duplicate key '%s' into a list", key)
if merged_duplicate.get(key):
mapping[key].append(value)
else:
mapping[key] = [mapping[key], value]
merged_duplicate[key] = True
else:
mapping[key] = value
return mapping
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
OrderedLoader.add_constructor('!include', OrderedLoader.include)
return yaml.load(stream, OrderedLoader) | d863d1026e1a8b1048668a6e9e95e732fc361ffa | 2,543 |
from datetime import datetime
def temporal_filter(record_date_time, time_or_period, op):
"""
Helper function to perform temporal filters on feature set
:param record_date_time: datetime field value of a feature
:type record_date_time: :class:`datetime.datetime`
:param time_or_period: the time instant or time span to use as a filter
:type time_or_period: :class:`datetime.datetime` or a tuple of two
datetimes or a tuple of one datetime and one
:class:`datetime.timedelta`
:param op: the comparison operation
:type op: str
:return: a comparison expression result
:rtype: bool
"""
d = datetime.strptime(record_date_time, "%Y-%m-%dT%H:%M:%SZ")
result = None
# perform before and after operations
if op in ['BEFORE', 'AFTER']:
query_date_time = datetime.strptime(
time_or_period.value, "%Y-%m-%dT%H:%M:%SZ")
if op == 'BEFORE':
return d <= query_date_time
elif op == 'AFTER':
return d >= query_date_time
# perform during operation
elif 'DURING' in op:
low, high = time_or_period
low = datetime.strptime(low.value, "%Y-%m-%dT%H:%M:%SZ")
high = datetime.strptime(high.value, "%Y-%m-%dT%H:%M:%SZ")
result = d >= low and d <= high
if 'BEFORE' in op:
result = d <= high
elif 'AFTER' in op:
result = d >= low
return result | 9f76d6a6eb96da9359c4bbb80f6cfb1dfdcb4159 | 2,544 |
from typing import Union
from pathlib import Path
import os
import shutil
def package_conda_env(folder: Union[str, Path]) -> Path:
"""Creates a .rar file of the current conda environment for use in jobs.
For efficiency, existing tarred env are not updated.
Parameter
---------
folder: str/Path
folder where the environment must be dumped
Returns
-------
Path
Path of the created .rar file
"""
# TODO(lowik): could be faster to create tar locally, then copy it
folder = Path(folder).expanduser().absolute()
env_key = "CONDA_DEFAULT_ENV"
if env_key not in os.environ:
raise RuntimeError(
"This executor requires to be executed from a conda environment. Check out README for help."
)
name = os.environ[env_key]
env_path = Path(os.environ["CONDA_PREFIX"])
_check_python_inside(env_path)
tarred_env = (folder / name).with_suffix(".tar")
if tarred_env.exists():
os.remove(str(tarred_env))
output = shutil.make_archive(str(tarred_env.with_suffix("")), "tar", str(env_path))
return Path(output) | 6f711ef5fed7c81b87fee0b82d19f8ca660b8960 | 2,545 |
import os
def get_couchbase_superuser_password(manager, plaintext: bool = True) -> str:
"""Get Couchbase superuser's password from file (default to
``/etc/gluu/conf/couchbase_superuser_password``).
To change the location, simply pass ``GLUU_COUCHBASE_SUPERUSER_PASSWORD_FILE`` environment variable.
:params manager: An instance of :class:`~pygluu.containerlib.manager._Manager`.
:params plaintext: Whether to return plaintext or encoded password.
:returns: Plaintext or encoded password.
"""
password_file = os.environ.get(
"GLUU_COUCHBASE_SUPERUSER_PASSWORD_FILE", "/etc/gluu/conf/couchbase_superuser_password"
)
with open(password_file) as f:
password = f.read().strip()
if not plaintext:
password = encode_text(password, manager.secret.get("encoded_salt")).decode()
return password | 4103733c39c8ac63c022179a19ea988093380604 | 2,546 |
import sys
import os
def GetAvailableDialogs():
"""Returns available dialogs in a list"""
list_path = sys.path
found = 0
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/dialogs"):
found = 1
break
if found == 0:
print ("Could not find /dialogs directory!")
raise IOError
return None
list_dlg = os.listdir(list_path[i]+"/dialogs")
remove = []
for i in range(len(list_dlg)):
list_dlg[i] = "/dialogs/"+list_dlg[i]
if not list_dlg[i].endswith(".csv") and not list_dlg[i].endswith(".dlg"):
remove.append(i)
## remove non csv files
remove.reverse()
for i in remove:
list_dlg.pop(i)
return list_dlg | e5e0b5aeb8ff04b0c640527d8273fc443e046270 | 2,547 |
def convert_rgb2gray(image, convert_dic):
"""convert rgb image to grayscale
Parameters
----------
image: array
RGB image. Channel order should be RGB.
convert_dic: dict
dictionary key is str(rgb list), value is grayscale value
Returns
-------
image_gray: array
Grayscale image
"""
image_r = image[:, :, 0]
image_g = image[:, :, 1]
image_b = image[:, :, 2]
im_shape = image_r.shape
image_gray = np.zeros(im_shape)
for i in range(im_shape[0]):
for j in range(im_shape[1]):
image_gray[i, j] = convert_dic[str([image_r[i, j], image_g[i, j], image_b[i, j]])]
return image_gray | 0132719ef88d139d1d3da4e52312faef25443efd | 2,548 |
def get_external_storage_path():
"""Returns the external storage path for the current app."""
return _external_storage_path | a33704c5b3267a7211c94b5a3a8d8d73b3889d68 | 2,549 |
def blur(old_img):
"""
:param old_img: a original image
:return: a blurred image
"""
blur_img = SimpleImage.blank(old_img.width, old_img.height)
for x in range(old_img.width):
for y in range(old_img.height):
if x == 0 and y == 0: # Upper left corner
old_pixel_00 = old_img.get_pixel(x, y) # Reference point
old_pixel_s = old_img.get_pixel(x, y + 1) # South
old_pixel_e = old_img.get_pixel(x + 1, y) # East
old_pixel_se = old_img.get_pixel(x + 1, y + 1) # Southeast
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_00.red + old_pixel_s.red + old_pixel_e.red + old_pixel_se.red) // 4
blur_pixel.green = (old_pixel_00.green + old_pixel_s.green + old_pixel_e.green + old_pixel_se.green) \
// 4
blur_pixel.blue = (old_pixel_00.blue + old_pixel_s.blue + old_pixel_e.blue + old_pixel_se.blue) // 4
elif x == 0 and y == old_img.height - 1: # Bottom left
old_pixel_0h = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1) # North
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1) # Northeast
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_0h.red + old_pixel_n.red + old_pixel_e.red + old_pixel_ne.red) // 4
blur_pixel.green = (old_pixel_0h.green + old_pixel_n.green + old_pixel_e.green + old_pixel_ne.green) \
// 4
blur_pixel.blue = (old_pixel_0h.blue + old_pixel_n.blue + old_pixel_e.blue + old_pixel_ne.blue) // 4
elif x == old_img.width - 1 and y == 0: # Upper right corner
old_pixel_w0 = old_img.get_pixel(x, y)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_w = old_img.get_pixel(x - 1, y) # West
old_pixel_sw = old_img.get_pixel(x - 1, y + 1) # Southwest
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_w0.red + old_pixel_s.red + old_pixel_w.red + old_pixel_sw.red) // 4
blur_pixel.green = (old_pixel_w0.green + old_pixel_s.green + old_pixel_w.green + old_pixel_sw.green) \
// 4
blur_pixel.blue = (old_pixel_w0.blue + old_pixel_s.blue + old_pixel_w.blue + old_pixel_sw.blue) // 4
elif x == old_img.width - 1 and y == old_img.height - 1: # Bottom right corner
old_pixel_wh = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1) # Northwest
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_wh.red + old_pixel_n.red + old_pixel_w.red + old_pixel_nw.red) // 4
blur_pixel.green = (old_pixel_wh.green + old_pixel_n.green + old_pixel_w.green + old_pixel_nw.green) \
// 4
blur_pixel.blue = (old_pixel_wh.blue + old_pixel_n.blue + old_pixel_w.blue + old_pixel_nw.blue) // 4
elif x == 0 and y != 0 and y != old_img.height - 1: # Left side except for head and tail
old_pixel_0y = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_0y.red + old_pixel_n.red + old_pixel_s.red + old_pixel_ne.red +
old_pixel_e.red + old_pixel_se.red) // 6
blur_pixel.green = (old_pixel_0y.green + old_pixel_n.green + old_pixel_s.green + old_pixel_ne.green +
old_pixel_e.green + old_pixel_se.green) // 6
blur_pixel.blue = (old_pixel_0y.blue + old_pixel_n.blue + old_pixel_s.blue + old_pixel_ne.blue +
old_pixel_e.blue + old_pixel_se.blue) // 6
elif y == 0 and x != 0 and x != old_img.width - 1: # Top except for head and tail
old_pixel_x0 = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_x0.red + old_pixel_w.red + old_pixel_s.red + old_pixel_sw.red +
old_pixel_e.red + old_pixel_se.red) // 6
blur_pixel.green = (old_pixel_x0.green + old_pixel_w.green + old_pixel_s.green + old_pixel_sw.green +
old_pixel_e.green + old_pixel_se.green) // 6
blur_pixel.blue = (old_pixel_x0.blue + old_pixel_w.blue + old_pixel_s.blue + old_pixel_sw.blue +
old_pixel_e.blue + old_pixel_se.blue) // 6
elif x == old_img.width - 1 and y != 0 and y != old_img.height - 1: # right side except for head and tail
old_pixel_wy = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_wy.red + old_pixel_n.red + old_pixel_s.red + old_pixel_nw.red +
old_pixel_w.red + old_pixel_sw.red) // 6
blur_pixel.green = (old_pixel_wy.green + old_pixel_n.green + old_pixel_s.green + old_pixel_nw.green +
old_pixel_w.green + old_pixel_sw.green) // 6
blur_pixel.blue = (old_pixel_wy.blue + old_pixel_n.blue + old_pixel_s.blue + old_pixel_nw.blue +
old_pixel_w.blue + old_pixel_sw.blue) // 6
elif y == old_img.height - 1 and x != 0 and x != old_img.width - 1: # Bottom except for head and tail
old_pixel_xh = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_xh.red + old_pixel_w.red + old_pixel_nw.red + old_pixel_n.red +
old_pixel_e.red + old_pixel_ne.red) // 6
blur_pixel.green = (old_pixel_xh.green + old_pixel_w.green + old_pixel_nw.green + old_pixel_n.green +
old_pixel_e.green + old_pixel_ne.green) // 6
blur_pixel.blue = (old_pixel_xh.blue + old_pixel_w.blue + old_pixel_nw.blue + old_pixel_n.blue +
old_pixel_e.blue + old_pixel_ne.blue) // 6
else: # middle parts having 8 neighbors
old_pixel_xy = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_xy.red + old_pixel_w.red + old_pixel_nw.red + old_pixel_n.red +
old_pixel_e.red + old_pixel_ne.red + old_pixel_s.red + old_pixel_sw.red +
old_pixel_se.red) // 9
blur_pixel.green = (old_pixel_xy.green + old_pixel_w.green + old_pixel_nw.green + old_pixel_n.green +
old_pixel_e.green + old_pixel_ne.green + old_pixel_s.green + old_pixel_sw.green +
old_pixel_se.green) // 9
blur_pixel.blue = (old_pixel_xy.blue + old_pixel_w.blue + old_pixel_nw.blue + old_pixel_n.blue +
old_pixel_e.blue + old_pixel_ne.blue + old_pixel_s.blue + old_pixel_sw.blue +
old_pixel_se.blue) // 9
return blur_img | 771a6e906ea8b485d4166de311c17a441f469158 | 2,550 |
import re
def generate_table_row(log_file, ancestry, official_only, code):
""" Takes an imported log and ancestry and converts it into a properly formatted pandas table.
Keyword arguments:
log_file -- output from import_log()
ancestry -- a single ancestry code
official_only -- a boolean indicating if all fields should be imported
into the table, or only the official ones.
Returns:
dict of arguments: new values
"""
# verify that ancestry is correct
matches = [l for l in log_file if re.search('Searching for ancestry: ' + \
ancestry, l)]
if len(matches) == 0:
raise ValueError('ALERT: Incorrect ancestry passed in for code ' + code +
'. Passed in value: ' + ancestry)
dict_of_vals = {'ancestry': ancestry, 'phenotype_code': code}
nrow_orig = num_cols = None
for line in log_file:
nrow_orig = _parse_single_term(nrow_orig, 'Original number of rows: ([0-9]*)',
line, int)
num_cols = _parse_single_term(num_cols, 'Found ([0-9]*) ancestry specific columns:',
line, int)
dict_of_vals.update({'original_nrow': nrow_orig,
'ancestry_specific_ncols': num_cols})
if dict_of_vals['ancestry_specific_ncols'] != 0:
tf_boundary = [idx for idx, l in enumerate(log_file) if re.search('Now running LDSC in (vanilla|stratified) mode.',l)]
log_file_official = log_file[(tf_boundary[0]+1):(len(log_file)+1)]
log_file_unofficial = log_file[0:tf_boundary[0]]
if not official_only:
unofficial_dict = _parse_unofficial_log(log_file_unofficial)
dict_of_vals.update(unofficial_dict)
official_dict, error_str = _parse_official_log(log_file_official)
else:
if not official_only:
unofficial_dict = _parse_unofficial_log(log_file)
dict_of_vals.update(unofficial_dict)
official_dict, _ = _parse_official_log(log_file)
error_str = 'No ' + ancestry + '-specific columns found.'
dict_of_vals.update(official_dict)
if error_str is not None:
dict_of_vals.update({'missing_data_note': error_str})
return pd.DataFrame(dict_of_vals, index=[ancestry + ':' + code]) | c72bdef2aafbc15c54b80337c80f03ae8d8f1e00 | 2,551 |
def read_preflib_file(filename, setsize=1, relative_setsize=None, use_weights=False):
"""Reads a single preflib file (soi, toi, soc or toc).
Parameters:
filename: str
Name of the preflib file.
setsize: int
Number of top-ranked candidates that voters approve.
In case of ties, more than `setsize` candidates are approved.
Paramer `setsize` is ignored if `relative_setsize` is used.
relative_setsize: float in (0, 1]
Indicates which proportion of candidates of the ranking
are approved (rounded up). In case of ties, more
candidates are approved.
E.g., if a voter has 10 approved candidates and `relative_setsize` is 0.75,
then the approval set contains the top 8 candidates.
use_weights: bool
If False, treat vote count in preflib file as the number of duplicate ballots,
i.e., the number of voters that have this approval set.
If True, treat vote count as weight and use this weight in class Voter.
Returns:
profile: abcvoting.preferences.Profile
Preference profile extracted from preflib file,
including names of candidates
"""
if setsize <= 0:
raise ValueError("Parameter setsize must be > 0")
if relative_setsize and (relative_setsize <= 0.0 or relative_setsize > 1.0):
raise ValueError("Parameter relative_setsize not in interval (0, 1]")
with open(filename, "r") as f:
line = f.readline()
num_cand = int(line.strip())
candidate_map = {}
for _ in range(num_cand):
parts = f.readline().strip().split(",")
candidate_map[int(parts[0].strip())] = ",".join(parts[1:]).strip()
parts = f.readline().split(",")
try:
voter_count, _, unique_orders = [int(p.strip()) for p in parts]
except ValueError:
raise PreflibException(
f"Number of voters ill specified ({str(parts)}), should be triple of integers"
)
approval_sets = []
lines = [line.strip() for line in f.readlines() if line.strip()]
if len(lines) != unique_orders:
raise PreflibException(
f"Expected {unique_orders} lines that specify voters in the input, "
f"encountered {len(lines)}"
)
for line in lines:
parts = line.split(",")
if len(parts) < 1:
continue
try:
count = int(parts[0])
except ValueError:
raise PreflibException(f"Each ranking must start with count/weight ({line})")
ranking = parts[1:] # ranking starts after count
if len(ranking) == 0:
raise PreflibException("Empty ranking: " + str(line))
if relative_setsize:
num_appr = int(ceil(len(ranking) * relative_setsize))
else:
num_appr = setsize
approval_set = _approval_set_from_preflib_datastructures(num_appr, ranking, candidate_map)
approval_sets.append((count, approval_set))
# normalize candidates to 0, 1, 2, ...
cand_names = []
normalize_map = {}
for cand in candidate_map.keys():
cand_names.append(candidate_map[cand])
normalize_map[cand] = len(cand_names) - 1
profile = Profile(num_cand, cand_names=cand_names)
for count, approval_set in approval_sets:
normalized_approval_set = []
for cand in approval_set:
normalized_approval_set.append(normalize_map[cand])
if use_weights:
profile.add_voter(Voter(normalized_approval_set, weight=count))
else:
profile.add_voters([normalized_approval_set] * count)
if use_weights:
if len(profile) != unique_orders:
raise PreflibException("Number of voters wrongly specified in preflib file.")
else:
if len(profile) != voter_count:
raise PreflibException("Number of voters wrongly specified in preflib file.")
return profile | 6feec6e786e47cdc11021021ffa91a1f96597567 | 2,552 |
def get_row(client, instance, file_=None):
"""Get one row of a family table.
Args:
client (obj):
creopyson Client.
instance (str):
Instance name.
`file_` (str, optional):
File name. Defaults is currently active model.
Returns:
(dict):
colid (str):
Column ID.
value (depends on datatype):
Cell value.
datatype (str):
Data type.
coltype (str):
Column Type; a string corresponding to the Creo column type.
"""
data = {"instance": instance}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
return client._creoson_post("familytable", "get_row", data, "columns") | c8e8c90a81d553d06ce9f78eb1be582e5b034ac6 | 2,553 |
def hospitalization_to_removed(clip_low=2, clip_high=32.6, mean=8.6, std=6.7):
"""
Returns the time for someone to either get removed after being
hospitalized in days within range(clip_low, clip_high),
of a truncated_norm(mean, std).
"""
return sample_truncated_norm(clip_low, clip_high, mean, std) | e1da5283e32b5734927436af72fdbd002c0844b1 | 2,554 |
def test_inheritance():
"""
test inheritance from different module
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
sfdm = test_submodule.getter('super_from_diff_mod')
ok_(isinstance(sfdm, doc.MatClass))
eq_(sfdm.bases,['MyAbstractClass', 'MyHandleClass'])
bases = sfdm.getter('__bases__')
eq_(bases['MyAbstractClass'].module, 'test_data')
eq_(bases['MyHandleClass'].module, 'test_data')
return sfdm | 0f29de2ef67318010feed25ea0ffc08e2dc44162 | 2,555 |
from collections import Counter
def split_mon_unmon(data, labels):
"""
Splits into monitored and unmonitored data
If a data point only happens once, we also consider it unmonitored
@return monitored_data, monitored_label, unmonitored_data
"""
occurence = Counter(labels)
monitored_data, unmonitored_data = [], []
monitored_label = []
for d, l in zip(data, labels):
if l == UNKNOWN_WEBPAGE or occurence[l] == 1:
unmonitored_data.append(d)
else:
monitored_data.append(d)
monitored_label.append(l)
return monitored_data, monitored_label, unmonitored_data | b1d0cac2e12f4386bf04eb355811f230b18f38ca | 2,556 |
import contextlib
import os
def supress_stdout(func):
"""Wrapper, makes a function non-verbose.
Args:
func: function to be silenced
"""
def wrapper(*a, **ka):
with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull):
func(*a, **ka)
return wrapper | a617f776df873086c3033416f6ce7bc783fd640b | 2,557 |
def sum_and_count(x, y):
"""A function used for calculating the mean of a list from a reduce.
>>> from operator import truediv
>>> l = [15, 18, 2, 36, 12, 78, 5, 6, 9]
>>> truediv(*reduce(sum_and_count, l)) == 20.11111111111111
True
>>> truediv(*fpartial(sum_and_count)(l)) == 20.11111111111111
True
"""
try:
return (x[0] + y, x[1] + 1)
except TypeError:
return ((x or 0) + (y or 0), len([i for i in [x, y] if i is not None])) | d43cc8dc39fb35afae4f6a4e32d34221d525f5d3 | 2,558 |
def animTempCustom():
"""
Temporarily play a custom animation for a set amount of time.
API should expect a full `desc` obect in json alongside a timelimit, in ms.
"""
colorList = request.form.get('colors').split(',')
colorsString = ""
for colorName in colorList:
c = Color(colorName)
colorsString += "[{},{},{}],".format(
int(255*c.red), int(255*c.green), int(255*c.blue)
)
colorsString = colorsString[0:-1]
print(colorsString)
colors = colorsString
bp.newAnim(
'$bpa.strip.Twinkle',
colors
)
return "Animation animation set to RGB!" | 2d9cea92d7c1c93d73fcf9b325b7b58225b4ba13 | 2,559 |
from unittest.mock import Mock
def mock_stripe_invoice(monkeypatch):
"""Fixture to monkeypatch stripe.Invoice.* methods"""
mock = Mock()
monkeypatch.setattr(stripe, "Invoice", mock)
return mock | a88923ba6d4a6dda9bf3b2fcda3bb717efe36cee | 2,560 |
def read_project(output_dir):
"""Read existing project data
"""
try:
yaml = YAML()
with open(project_yaml_file(output_dir), encoding='utf-8') as project:
project_data = yaml.load(project)
for key, value in project_data.items():
if value == None:
project_data[key] = []
except FileNotFoundError:
project_data = {
'name': "Test Project",
'scenario_sets': [],
'narrative_sets': [],
'region_definitions': [],
'interval_definitions': [],
'units': [],
'scenarios': [],
'narratives': []
}
return project_data | 90cfd7d143176925d8a99f4d577bc7de9eb360e2 | 2,561 |
def bytes_to_int(byte_array: bytes) -> int:
""" Bytes to int """
return int.from_bytes(byte_array, byteorder='big') | 442452faeb1a4e7c346b6f4355095f179701f8f1 | 2,562 |
def ask(query, default=None):
"""Ask a question."""
if default:
default_q = ' [{0}]'.format(default)
else:
default_q = ''
inp = input("{query}{default_q}: ".format(query=query, default_q=default_q)).strip()
if inp or default is None:
return inp
else:
return default | 54564ee00749ddb2e5c409d781552f3ca5fcd847 | 2,563 |
def _clean_kwargs(keep_name=False, **kwargs):
"""
Sanatize the arguments for use with shade
"""
if "name" in kwargs and not keep_name:
kwargs["name_or_id"] = kwargs.pop("name")
return __utils__["args.clean_kwargs"](**kwargs) | 326d849b030a11ebc21e364f6a05eef9ab019637 | 2,564 |
import os
def transfer_file(user, source_endpoint, source_path,
dest_endpoint, dest_path, label):
"""
:param user: Must be a Django user with permissions to initiate the
transfer
:param source_endpoint: Source Endpoint UUID
:param source_path: Source path, including the filename
:param dest_endpoint: Destination Endpoint UUID
:param dest_path: Destination path, including the filename
:param label: Label to use for the transfer
:return: A globus SDK task object.
"""
log.debug('transferring {}:{} to {}'.format(source_endpoint, source_path,
dest_endpoint))
tc = load_transfer_client(user)
tdata = globus_sdk.TransferData(tc, source_endpoint, dest_endpoint,
label=label, sync_level="checksum")
tdata.add_item(source_path,
os.path.join(dest_path, os.path.basename(source_path))
)
return tc.submit_transfer(tdata) | cf3fe2031fac2b06766c213c8c6d5cad477320a9 | 2,565 |
def calculate_pnl_per_equity(df_list):
"""Method that calculate the P&L of the strategy per equity and returns a list of P&L"""
pnl_per_equity = [] # initialize the list of P&L per equity
for df in df_list: # iterates over the dataframes of equities
pnl = df['Strategy Equity'].iloc[-1] - df['Buy and Hold Equity'].iloc[-1] # calculating the difference at the last point
pnl_per_equity.append(pnl)
return pnl_per_equity | 4f6ac1b9f6a949215c6b805f05a65897393f3288 | 2,566 |
import http
def search(q_str: str) -> dict:
"""search in genius
Args:
q_str (str): query string
Returns:
dict: search response
"""
data = {'songs': [], 'lyric': []}
response = http.get(
'https://genius.com/api/search/multi?per_page=5', params={'q': q_str}, headers=headers).json()
sections = response['response']['sections']
if len(sections[1]['hits']) == 0 and len(sections[2]) == 0:
return False
for section in response['response']['sections'][1:3]:
if section['type'] == 'song':
for song in section['hits']:
music = song['result']
# print(music)
if len(data['songs']) == 0:
data['songs'].append(dict_builder(music))
if data['songs'][-1]['api_path'] != music['api_path']:
data['songs'].append(dict_builder(music))
elif section['type'] == 'lyric':
for lyric in section['hits']:
music = lyric['result']
if len(data['lyric']) == 0:
data['lyric'].append(dict_builder(music))
if data['lyric'][-1]['api_path'] != music['api_path']:
data['songs'].append(dict_builder(music))
return data | 7421220e43415fb17b29db26f1fc6902e88144a4 | 2,567 |
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app | 4443d24f4f4341baa3aea22d6f30c6b38eb8af72 | 2,568 |
def geocode(level=None, names=None, countries=None, states=None, counties=None, scope=None) -> NamesGeocoder:
"""
Create a `Geocoder`. Allows to refine ambiguous request with `where()` method,
scope that limits area of geocoding or with parents.
Parameters
----------
level : {'country', 'state', 'county', 'city'}
The level of administrative division. Autodetection by default.
names : list or str
Names of objects to be geocoded.
For 'state' level: 'US-48' returns continental part of United States (48 states)
in a compact form.
countries : list
Parent countries. Should have same size as names. Can contain strings or `Geocoder` objects.
states : list
Parent states. Should have same size as names. Can contain strings or `Geocoder` objects.
counties : list
Parent counties. Should have same size as names. Can contain strings or `Geocoder` objects.
scope : str or `Geocoder`
Limits area of geocoding. If parent country is set then error will be generated.
If type is a string - geoobject should have geocoded scope in parents.
If type is a `Geocoder` - geoobject should have geocoded scope in parents.
Scope should contain only one entry.
Returns
-------
`NamesGeocoder`
Geocoder object specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 5
from IPython.display import display
from lets_plot import *
from lets_plot.geo_data import *
LetsPlot.setup_html()
states = geocode('state').scope('Italy').get_boundaries(6)
display(states.head())
ggplot() + geom_map(data=states)
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 5, 8
from IPython.display import display
from lets_plot import *
from lets_plot.geo_data import *
LetsPlot.setup_html()
states = geocode(level='state', scope='US').get_geocodes()
display(states.head())
names = ['York'] * len(states.state)
cities = geocode(names=names, states=states.state).ignore_not_found().get_centroids()
display(cities.head())
ggplot() + \\
geom_livemap() + \\
geom_point(data=cities, tooltips=layer_tooltips().line('@{found name}'))
"""
return NamesGeocoder(level, names) \
.scope(scope) \
.countries(countries) \
.states(states) \
.counties(counties) | 25ab4ff7887d09a41c19b6ec8ee9057151483b2e | 2,569 |
def fpAbs(x):
"""
Returns the absolute value of the floating point `x`. So:
a = FPV(-3.2, FSORT_DOUBLE)
b = fpAbs(a)
b is FPV(3.2, FSORT_DOUBLE)
"""
return abs(x) | d69f5f07b651ed4466ff768601c77f90232b8827 | 2,570 |
from io import StringIO
import json
def volumes(container:str) -> list:
"""
Return list of 'container' volumes (host,cont)
"""
buf = StringIO()
_exec(
docker, 'inspect', '-f', "'{{json .Mounts}}'", container, _out=buf
)
res = buf.getvalue().strip()
vols_list = json.loads(res[1:-1])
# vols = {d['Source']:d['Destination'] for d in vols_list}
vols = [(d['Source'],d['Destination']) for d in vols_list]
return vols | 5191df9ab4aa58a80fba90872da6091bc58f8be2 | 2,571 |
def names():
"""Return stock summary information"""
helper = SQLHelper()
conn = helper.getConnection()
repo = robinhoodRepository(conn)
stockInfo = repo.getAllStocks()
return json_response(stockInfo, 200) | d543ab5254e95e903e8b74db1ab5b0266859b083 | 2,572 |
def get_bot_group_config(bot_id):
"""Returns BotGroupConfig for a bot with given ID.
Returns:
BotGroupConfig or None if not found.
Raises:
BadConfigError if there's no cached config and the current config at HEAD is
not passing validation.
"""
cfg = _fetch_bot_groups()
gr = cfg.direct_matches.get(bot_id)
if gr is not None:
return gr
for prefix, gr in cfg.prefix_matches:
if bot_id.startswith(prefix):
return gr
return cfg.default_group | 025b2a9a91f2a744668fd6c438db0f5c4edd0a98 | 2,573 |
def add_utm(url_, campaign, source='notification', medium='email'):
"""Add the utm_* tracking parameters to a URL."""
return urlparams(
url_, utm_campaign=campaign, utm_source=source, utm_medium=medium) | d428daf58db7b0b5d5dabfd4bac6f70e900bd311 | 2,574 |
def is_forest(G):
"""Return True if the input graph is a forest
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
True if the input graph is a forest
Notes
-----
For undirected graphs only.
"""
for graph in nx.connected_component_subgraphs(G):
if not nx.is_tree(graph):
return False
return True | 6aade3d2407b8af1cd8662b9efdc604d304341fe | 2,575 |
def package_to_pretty_string(package):
""" Given a PackageMetadata instance, returns a pretty string."""
template = "{0.name} {0.version}"
constraint_kinds = (
(ConstraintKinds.install_requires, package.install_requires),
(ConstraintKinds.conflicts, package.conflicts),
(ConstraintKinds.provides, package.provides),
)
for constraint_kind, constraints in constraint_kinds:
# FIXME: perhaps 'provides' just shouldn't include the package name
if constraint_kind == ConstraintKinds.provides:
constraints = tuple((dist, disjunction)
for dist, disjunction in constraints
if dist != package.name)
if len(constraints) > 0:
string = ', '.join(constraints_to_pretty_strings(constraints))
template += "; {} ({})".format(constraint_kind.value, string)
return template.format(package) | e3c2432560309057996dcef2296b18572df82a35 | 2,576 |
def parse_uri(uri):
""" This implies that we are passed a uri that looks something like:
proto://username:password@hostname:port/database
In most cases, you can omit the port and database from the string:
proto://username:password@hostname
Also, in cases with no username, you can omit that:
proto://:password@hostname:port/database
Also supports additional arguments:
proto://hostname:port/database?arg1=val&arg2=vals
:param str uri: URI to parse
:rtype: dict
:returns: Dictionary with parsed URL components
.. note::
This function may move, as the currently location may not
be optimal. Location will be finalized by 1.0.0 stable release.
"""
proto = uri.split('://')[0]
uri = uri.split('://')[1]
_host = uri.split('@')[-1]
_host = _host.split(':')
if len(_host) == 2:
host = _host[0]
if '/' in _host[1]:
port = int(_host[1].split('/')[0])
else:
port = int(_host[1])
else:
host = _host[0]
if '/' in host:
host = host.split('/')[0]
port = None
if "@" in uri:
_cred = uri[0:uri.rfind(':'.join(_host)) - 1]
_cred = _cred.split(':')
if len(_cred) == 2:
_user = _cred[0]
_pass = _cred[1]
else:
_user = _cred[0]
_pass = None
else:
_user = None
_pass = None
database = uri.split('/')
if len(database) >= 2:
database = database[1]
if '?' in database:
_db = database.split('?')
database = _db[0]
args = parse_qs(_db[1], keep_blank_values = True)
else:
args = None
else:
database = None
args = None
return {
"protocol": proto,
"resource": uri,
"host": host,
"port": port,
"username": _user,
"password": _pass,
"database": database,
"args": args,
"uri": "{}://{}".format(proto, uri),
} | 5204d803a5d0f6995c49883a892bc6b22cef9443 | 2,577 |
def pwgen(pw_len=16):
"""Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(
pw_len, 'abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789'
) | 3c5a07440a6d3eee7c1bc9162089c434cfe6c45d | 2,578 |
import os
def absolute_path(secured_filename: str, curr_file: str = __file__) -> str:
"""
Prepend `secured_filename` with the current path.
Args:
secured_filename (str): Safe file name. Can be a sub path without the first '/'.
curr_file (str): File name of the module.
Returns:
str: String which contains the full path to ``secured_filename``.
"""
return os.path.join(os.path.dirname(os.path.realpath(curr_file)), secured_filename) | 6378df580550438e727240924ad2092167ca5394 | 2,579 |
def compute_tree_distances(tree):
"""
Computes the matrix of pairwise distances between leaves of the tree
"""
num_leaves = len(get_leaves(tree)) - 1
distances = np.zeros([num_leaves, num_leaves])
for leaf in range(num_leaves):
distance_dictionary, tmp = nx.multi_source_dijkstra(tree.to_undirected(), [leaf], weight = 'time')
for target_leaf in range(num_leaves):
distances[leaf, target_leaf] = distance_dictionary[target_leaf]
return distances | b4bdd81e0f4c8d5577813f6e68ece9f0a8992e19 | 2,580 |
def create_rndm_backgr_selections(annotations, files, length, num, no_overlap=False, trim_table=False):
""" Create background selections of uniform length, randomly distributed across the
data set and not overlapping with any annotations, including those labelled 0.
The random sampling is performed without regard to already created background
selections. Therefore, it is in principle possible that some of the created
selections will overlap, although in practice this will only occur with very
small probability, unless the number of requested selections (num) is very
large and/or the (annotation-free part of) the data set is small in size.
To avoid any overlap, set the 'no_overlap' to True, but note that this can
lead to longer execution times.
Args:
annotations: pandas DataFrame
Annotation table.
files: pandas DataFrame
Table with file durations in seconds.
Should contain columns named 'filename' and 'duration'.
length: float
Selection length in seconds.
num: int
Number of selections to be created.
no_overlap: bool
If True, randomly selected segments will have no overlap.
trim_table: bool
Keep only the columns prescribed by the Ketos annotation format.
Returns:
table_backgr: pandas DataFrame
Output selection table.
Example:
>>> import pandas as pd
>>> import numpy as np
>>> from ketos.data_handling.selection_table import select
>>>
>>> #Ensure reproducible results by fixing the random number generator seed.
>>> np.random.seed(3)
>>>
>>> #Load and inspect the annotations.
>>> df = pd.read_csv("ketos/tests/assets/annot_001.csv")
>>> print(df)
filename start end label
0 file1.wav 7.0 8.1 1
1 file1.wav 8.5 12.5 0
2 file1.wav 13.1 14.0 1
3 file2.wav 2.2 3.1 1
4 file2.wav 5.8 6.8 1
5 file2.wav 9.0 13.0 0
>>>
>>> #Standardize annotation table format
>>> df, label_dict = standardize(df, return_label_dict=True)
>>> print(df)
start end label
filename annot_id
file1.wav 0 7.0 8.1 2
1 8.5 12.5 1
2 13.1 14.0 2
file2.wav 0 2.2 3.1 2
1 5.8 6.8 2
2 9.0 13.0 1
>>>
>>> #Enter file durations into a pandas DataFrame
>>> file_dur = pd.DataFrame({'filename':['file1.wav','file2.wav','file3.wav',], 'duration':[18.,20.,15.]})
>>>
>>> #Create randomly sampled background selection with fixed 3.0-s length.
>>> df_bgr = create_rndm_backgr_selections(df, files=file_dur, length=3.0, num=12, trim_table=True)
>>> print(df_bgr.round(2))
start end label
filename sel_id
file1.wav 0 1.06 4.06 0
1 1.31 4.31 0
2 2.26 5.26 0
file2.wav 0 13.56 16.56 0
1 14.76 17.76 0
2 15.50 18.50 0
3 16.16 19.16 0
file3.wav 0 2.33 5.33 0
1 7.29 10.29 0
2 7.44 10.44 0
3 9.20 12.20 0
4 10.94 13.94 0
"""
# compute lengths, and discard segments shorter than requested length
c = files[['filename','duration']]
if 'offset' in files.columns.names: c['offset'] = files['offset']
else: c['offset'] = 0
c.reset_index(drop=True, inplace=True)
c['length'] = c['duration'] - length
c = c[c['length'] >= 0]
# cumulative length
cs = c['length'].cumsum().values.astype(float)
cs = np.concatenate(([0],cs))
# output
filename, start, end = [], [], []
# randomply sample
df = pd.DataFrame()
while (len(df) < num):
times = np.random.random_sample(num) * cs[-1]
for t in times:
idx = np.argmax(t < cs) - 1
row = c.iloc[idx]
fname = row['filename']
start = t - cs[idx] + row['offset']
end = start + length
q = query(annotations, filename=fname, start=start, end=end)
if len(q) > 0: continue
if no_overlap and len(df) > 0:
q = query(df.set_index(df.filename), filename=fname, start=start, end=end)
if len(q) > 0: continue
x = {'start':start, 'end':end}
y = files[files['filename']==fname].iloc[0].to_dict()
z = {**x, **y}
df = df.append(z, ignore_index=True)
if len(df) == num: break
# sort by filename and offset
df = df.sort_values(by=['filename','start'], axis=0, ascending=[True,True]).reset_index(drop=True)
# re-order columns
col_names = ['filename','start','end']
if not trim_table:
names = df.columns.values.tolist()
for name in col_names: names.remove(name)
col_names += names
df = df[col_names]
df['label'] = 0 #add label
# transform to multi-indexing
df = use_multi_indexing(df, 'sel_id')
return df | 01eac8bc0a624b56d419ce3cb75744792af1472f | 2,581 |
import methylprep
import time
from pathlib import Path
def to_BED(stats, manifest_or_array_type, save=True, filename='', genome_build=None, columns=None):
"""Converts & exports manifest and probe p-value dataframe to BED format.
- https://en.wikipedia.org/wiki/BED_(file_format)
- BED format: [ chromosome number | start position | end position | p-values]
Where p-values are the output from diff_meth_pos() comparing probes across two or more
groups of samples for genomic differences in methylation.
This output is required for combined-pvalues library to read and annotate manhattan plots
with the nearest Gene(s) for each significant CpG cluster.
manifest_or_array_type:
either pass in a Manifest instance from methylprep, or a string that defines which
manifest to load. One of {'27k', '450k', 'epic', 'epic+', 'mouse'}.
genome_build:
pass in 'OLD' to use the older genome build for each respective manifest array type.
note: if manifest has probes that aren't mapped to genome, they are omitted in BED file.
TODO: incorporate STRAND and OLD_STRAND in calculations.
returns a BED formatted dataframe if save is False, or the saved filename if save is True.
"""
array_types = {'27k', '450k', 'epic', 'epic+', 'mouse'}
manifest = None
if isinstance(manifest_or_array_type, str) and manifest_or_array_type not in array_types:
raise ValueError(f"Specify array type as one of: {array_types}")
if isinstance(manifest_or_array_type, str) and manifest_or_array_type in array_types:
manifest = methylprep.Manifest(methylprep.ArrayType(manifest_or_array_type))
if not manifest and hasattr(manifest_or_array_type, 'data_frame'):
manifest = manifest_or_array_type
if not manifest:
raise ValueError("Either provide a manifest or specify array_type.")
if not isinstance(stats, pd.DataFrame):
raise TypeError("stats should be a dataframe with either a PValue or a FDR_QValue column")
if not isinstance(manifest.data_frame, pd.DataFrame):
raise AttributeError("Expected manifest_or_array_type to be a methylprep manifest with a data_frame attribute but this does not have one.")
if "FDR_QValue" in stats:
pval = stats['FDR_QValue']
elif "PValue" in stats:
pval = stats['PValue']
else:
raise IndexError("stats did not contain either a PValue or a FDR_QValue column.")
# an unfinished, internal undocumented way to change the column names, if exactly 5 columns in list provided in same order.
if columns is None:
columns = ['chrom','chromStart','chromEnd','pvalue','name']
renamer = {}
else:
renamer = dict(zip(['chrom','chromStart','chromEnd','pvalue','name'],columns))
pval = pval.rename("pvalue")
genes = manifest_gene_map(manifest, genome_build=genome_build)
# finally, inner join and save/return the combined BED data frame.
BED = pd.merge(genes[['chrom','chromStart','chromEnd']], pval, left_index=True, right_index=True, how='inner')
BED = BED.sort_values(['chrom','chromStart'], ascending=True)
BED = BED.reset_index().rename(columns={'index':'name'})
BED = BED[['chrom','chromStart','chromEnd','pvalue','name']] # order matters, so be explicit
# omit unmapped CpGs
unmapped = len(BED[ BED['chromStart'].isna() ])
BED = BED[ ~BED['chromStart'].isna() ]
if renamer != {}:
BED = BED.rename(columns=renamer)
# cpv / combined-pvalues needs a tab-separated .bed file
timestamp = int(time.time())
if save:
if isinstance(filename, type(None)):
BED.to_csv(f"{timestamp}.bed", index=False, sep='\t')
return f"{timestamp}.bed"
if not isinstance(filename, Path):
filename = f"{filename}.bed"
# otherwise, use as is, assuming it is a complete path/filename
BED.to_csv(filename, index=False, sep='\t')
return filename
return BED | 6ce2272c6a20c69e4a0e6e5568d2105a8e2dd82b | 2,582 |
def GetPartition(partition_name, target_os):
"""Return the partition to install to.
Args:
partition_name: partition name from command-line
{'primary', 'secondary', 'other'}
target_os: 'fiberos' or 'android'
Returns:
0 or 1
Raises:
Fatal: if no partition could be determined
"""
if partition_name == 'other':
if target_os == GetOs():
boot = GetBootedPartition()
else:
boot = GetActivePartitionFromHNVRAM(target_os)
assert boot in [None, 0, 1]
if boot is None:
# Policy decision: if we're booted from NFS, install to secondary
return 1
else:
return boot ^ 1
elif partition_name in ['primary', 0]:
return 0
elif partition_name in ['secondary', 1]:
return 1
else:
raise Fatal('--partition must be one of: primary, secondary, other') | b3f030779bd29bbe695ba3769372f4af700d7cb7 | 2,583 |
import aiohttp
import json
async def call_dialogflow(message, config, lang=DEFAULT_LANGUAGE):
"""Call the Dialogflow api and return the response."""
async with aiohttp.ClientSession() as session:
payload = {
"v": DIALOGFLOW_API_VERSION,
"lang": lang,
"sessionId": message.connector.name,
"query": message.text,
}
headers = {
"Authorization": "Bearer " + config["access-token"],
"Content-Type": "application/json",
}
resp = await session.post(
DIALOGFLOW_API_ENDPOINT, data=json.dumps(payload), headers=headers
)
result = await resp.json()
_LOGGER.info(_("Dialogflow response - %s"), json.dumps(result))
return result | e670748dc4d0318d047b0f0ded6d857597112d49 | 2,584 |
def gettgd(sat, eph, type=0):
""" get tgd: 0=E5a, 1=E5b """
sys = gn.sat2prn(sat)[0]
if sys == uGNSS.GLO:
return eph.dtaun * rCST.CLIGHT
else:
return eph.tgd[type] * rCST.CLIGHT | c7231769b0e9be5287b2b2f76c8dcdc7bd409a89 | 2,585 |
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) | b942253c14438c72c19d648a0d0358d8cd280bd0 | 2,586 |
def ones_v(n):
"""
Return the column vector of ones of length n.
"""
return matrix(1, (n,1), 'd') | 46936660025c1b5bd533b78143301d1218b568d7 | 2,587 |
def test_encrypt_and_decrypt_two(benchmark: BenchmarkFixture) -> None:
"""Benchmark encryption and decryption run together."""
primitives.decrypt = pysodium.crypto_aead_xchacha20poly1305_ietf_decrypt
primitives.encrypt = pysodium.crypto_aead_xchacha20poly1305_ietf_encrypt
def encrypt_and_decrypt() -> bytes:
token = version2.encrypt(MESSAGE, KEY, FOOTER)
return version2.decrypt(token, KEY, FOOTER)
plain_text = benchmark(encrypt_and_decrypt)
assert plain_text == MESSAGE | 1b632ae28f147fa4d98dcdda982bf3d17b2c17dd | 2,588 |
import os
def truncate(fh, length):
"""Implementation of perl $fh->truncate method"""
global OS_ERROR, TRACEBACK, AUTODIE
try:
if hasattr(fh, 'truncate'):
fh.truncate(length)
else:
os.truncate(fh, length)
return True
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
if isinstance(fh, str):
cluck(f"truncate({fh}, {length}) failed: {OS_ERROR}",skip=2)
else:
cluck(f"truncate to {length} failed: {OS_ERROR}",skip=2)
if AUTODIE:
raise
return None | c18ede642efcb37de905bf2a88bb3e1be023c1d7 | 2,589 |
def get_fy_parent_nucl(fy_lib):
"""Gets the list of fission parents from a fission yield dictionnary.
Parameters
----------
fy_lib: dict
A fission yield dictionnary
"""
fy_nucl = get_fy_nucl(fy_lib)
fy_parent = []
sample_zamid = fy_nucl[0]
sample = fy_lib[sample_zamid]
for fission_parent in sample:
fy_parent.append(fission_parent)
return fy_parent | feb2ec2adfda4d9df4993cc89545564e4c0d1a54 | 2,590 |
from functools import partial
import array
def initialize ( is_test, no_cam ) :
"""job machine Tableをもとに個体、世代の初期設定"""
jmTable = getJmTable ( is_test )
MAX_JOBS = jmTable.getJobsCount()
MAX_MACHINES = jmTable.getMachinesCount()
# makespan最小化
creator.create ( "FitnessMin", base.Fitness, weights=(-1.0,) )
# 個体はジョブ番号のリスト
#creator.create ( "Individual", list, fitness=creator.FitnessMin )
creator.create ( "Individual", array.array, typecode='b', fitness=creator.FitnessMin ) # 'b' is signed char
toolbox = base.Toolbox()
# ゼロからMAX_MACHINES未満までがMAX_JOBS回ランダムに並ぶ個体と設定
gen_ind = partial ( initIndividual, MAX_JOBS, MAX_MACHINES )
toolbox.register ( "individual", tools.initIterate, creator.Individual, gen_ind )
# 初期世代を生成する関数を登録、初期世代はIndividualのリストとして設定
toolbox.register ( "population", tools.initRepeat, list, toolbox.individual )
# 評価関数を登録
toolbox.register ( "evaluate", schedule.eval, jmTable )
# 交叉関数を登録
toolbox.register ( "mate", schedule.crossover )
# 突然変異を登録
toolbox.register ( "mutate", schedule.mutation )
# ルーレット選択を登録
toolbox.register ( "select", tools.selRoulette )
# 置換操作を登録
if no_cam :
# 通常の置換操作
toolbox.register ( "getArgWorst", schedule.getArgWorst )
else :
# クラスタ平均法(CAM)による置換操作
toolbox.register ( "getArgWorst", schedule.getArgWorstCAM )
return toolbox, jmTable | f306cf9b5400ea92b92709bc6986d6b87ea909b2 | 2,591 |
def perform_variants_query(job, **kwargs):
"""Query for variants.
:param job: API to interact with the owner of the variants.
:type job: :class:`cibyl.sources.zuul.transactions.JobResponse`
:param kwargs: See :func:`handle_query`.
:return: List of retrieved variants.
:rtype: list[:class:`cibyl.sources.zuul.transactions.VariantResponse`]
"""
return job.variants().get() | c779080e2ef8c1900c293f70996e17bae932b142 | 2,592 |
import torch
def get_model(share_weights=False, upsample=False): # pylint: disable=too-many-statements
""" Return a network dict for the model """
block0 = [{'conv1_1': [3, 64, 3, 1, 1]},
{'conv1_2': [64, 64, 3, 1, 1]}, {'pool1_stage1': [2, 2, 0]},
{'conv2_1': [64, 128, 3, 1, 1]},
{'conv2_2': [128, 128, 3, 1, 1]}, {'pool2_stage1': [2, 2, 0]},
{'conv3_1': [128, 256, 3, 1, 1]},
{'conv3_2': [256, 256, 3, 1, 1]},
{'conv3_3': [256, 256, 3, 1, 1]},
{'conv3_4': [256, 256, 3, 1, 1]}, {'pool3_stage1': [2, 2, 0]},
{'conv4_1': [256, 512, 3, 1, 1]},
{'conv4_2': [512, 512, 3, 1, 1]}]
if share_weights:
print("defining network with shared weights")
network_dict = get_shared_network_dict()
else:
network_dict = get_network_dict()
def define_base_layers(block, layer_size):
layers = []
for i in range(layer_size):
one_ = block[i]
for k, v in zip(one_.keys(), one_.values()):
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return layers
def define_stage_layers(cfg_dict):
layers = define_base_layers(cfg_dict, len(cfg_dict) - 1)
one_ = cfg_dict[-1].keys()
k = list(one_)[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers)
# create all the layers of the model
base_layers = define_base_layers(block0, len(block0))
pre_stage_layers = define_base_layers(network_dict['block_pre_stage'], len(network_dict['block_pre_stage']))
blocks = {'block0': nn.Sequential(*base_layers),
'block_pre_stage': nn.Sequential(*pre_stage_layers)}
if share_weights:
shared_layers_s1 = define_base_layers(network_dict['block1_shared'], len(network_dict['block1_shared']))
shared_layers_s2 = define_base_layers(network_dict['block2_shared'], len(network_dict['block2_shared']))
blocks['block1_shared'] = nn.Sequential(*shared_layers_s1)
blocks['block2_shared'] = nn.Sequential(*shared_layers_s2)
for k, v in zip(network_dict.keys(), network_dict.values()):
if 'shared' not in k and 'pre_stage' not in k:
blocks[k] = define_stage_layers(v)
class PoseModel(nn.Module):
""" Pose Model class """
def __init__(self, model_dict, upsample=False):
super(PoseModel, self).__init__()
self.upsample = upsample
self.basemodel = model_dict['block0']
self.pre_stage = model_dict['block_pre_stage']
if share_weights:
self.stage1_shared = model_dict['block1_shared']
self.stage1_1 = model_dict['block1_1']
self.stage2_1 = model_dict['block2_1']
# self.stage3_1 = model_dict['block3_1']
# self.stage4_1 = model_dict['block4_1']
# self.stage5_1 = model_dict['block5_1']
# self.stage6_1 = model_dict['block6_1']
if share_weights:
self.stage2_shared = model_dict['block2_shared']
self.stage1_2 = model_dict['block1_2']
self.stage2_2 = model_dict['block2_2']
# self.stage3_2 = model_dict['block3_2']
# self.stage4_2 = model_dict['block4_2']
# self.stage5_2 = model_dict['block5_2']
# self.stage6_2 = model_dict['block6_2']
def forward(self, *inputs):
out1_vgg = self.basemodel(inputs[0])
out1 = self.pre_stage(out1_vgg)
if share_weights:
out1_shared = self.stage1_shared(out1)
else:
out1_shared = out1
out1_1 = self.stage1_1(out1_shared)
out1_2 = self.stage1_2(out1_shared)
out2 = torch.cat([out1_1, out1_2, out1], 1)
if share_weights:
out2_shared = self.stage2_shared(out2)
else:
out2_shared = out2
out2_1 = self.stage2_1(out2_shared)
out2_2 = self.stage2_2(out2_shared)
# out3 = torch.cat([out2_1, out2_2, out1], 1)
# out3_1 = self.stage3_1(out3)
# out3_2 = self.stage3_2(out3)
# out4 = torch.cat([out3_1, out3_2, out1], 1)
#
# out4_1 = self.stage4_1(out4)
# out4_2 = self.stage4_2(out4)
# out5 = torch.cat([out4_1, out4_2, out1], 1)
#
# out5_1 = self.stage5_1(out5)
# out5_2 = self.stage5_2(out5)
# out6 = torch.cat([out5_1, out5_2, out1], 1)
#
# out6_1 = self.stage6_1(out6)
# out6_2 = self.stage6_2(out6)
if self.upsample:
# parameters to check for up-sampling: align_corners = True, mode='nearest'
upsampler = nn.Upsample(scale_factor=2, mode='bilinear')
out2_1_up = upsampler(out2_1)
out2_2_up = upsampler(out2_2)
return out1_1, out1_2, out2_1, out2_2, out2_1_up, out2_2_up
return out1_1, out1_2, out2_1, out2_2
model = PoseModel(blocks, upsample=upsample)
return model | 364050799adc3312e4a46081e4a82338407f177b | 2,593 |
def bootstrap(config_uri, request=None, options=None):
""" Load a WSGI application from the PasteDeploy config file specified
by ``config_uri``. The environment will be configured as if it is
currently serving ``request``, leaving a natural environment in place
to write scripts that can generate URLs and utilize renderers.
This function returns a dictionary with ``app``, ``root``, ``closer``,
``request``, and ``registry`` keys. ``app`` is the WSGI app loaded
(based on the ``config_uri``), ``root`` is the traversal root resource
of the Pyramid application, and ``closer`` is a parameterless callback
that may be called when your script is complete (it pops a threadlocal
stack).
.. note::
Most operations within :app:`Pyramid` expect to be invoked within the
context of a WSGI request, thus it's important when loading your
application to anchor it when executing scripts and other code that is
not normally invoked during active WSGI requests.
.. note::
For a complex config file containing multiple :app:`Pyramid`
applications, this function will setup the environment under the context
of the last-loaded :app:`Pyramid` application. You may load a specific
application yourself by using the lower-level functions
:meth:`pyramid.paster.get_app` and :meth:`pyramid.scripting.prepare` in
conjunction with :attr:`pyramid.config.global_registries`.
``config_uri`` -- specifies the PasteDeploy config file to use for the
interactive shell. The format is ``inifile#name``. If the name is left
off, ``main`` will be assumed.
``request`` -- specified to anchor the script to a given set of WSGI
parameters. For example, most people would want to specify the host,
scheme and port such that their script will generate URLs in relation
to those parameters. A request with default parameters is constructed
for you if none is provided. You can mutate the request's ``environ``
later to setup a specific host/port/scheme/etc.
``options`` Is passed to get_app for use as variable assignments like
{'http_port': 8080} and then use %(http_port)s in the
config file.
See :ref:`writing_a_script` for more information about how to use this
function.
"""
app = get_app(config_uri, options=options)
env = prepare(request)
env['app'] = app
return env | 608629eb380765ebafa4009946a30b9f46de6ff9 | 2,594 |
def readSegy(filename) :
"""
Data,SegyHeader,SegyTraceHeaders=getSegyHeader(filename)
"""
printverbose("readSegy : Trying to read "+filename,0)
data = open(filename).read()
filesize=len(data)
SH=getSegyHeader(filename)
bps=getBytePerSample(SH)
ntraces = (filesize-3600)/(SH['ns']*bps+240)
# ntraces = 100
printverbose("readSegy : Length of data : " + str(filesize),2)
SH["ntraces"]=ntraces;
ndummy_samples=240/bps
printverbose("readSegy : ndummy_samples="+str(ndummy_samples),6)
printverbose("readSegy : ntraces=" + str(ntraces) + " nsamples="+str(SH['ns']),2)
# GET TRACE
index=3600;
nd=(filesize-3600)/bps
# READ ALL SEGY TRACE HEADRES
SegyTraceHeaders = getAllSegyTraceHeaders(SH,data)
printverbose("readSegy : reading segy data",2)
# READ ALL DATA EXCEPT FOR SEGY HEADER
#Data = zeros((SH['ns'],ntraces))
revision=SH["SegyFormatRevisionNumber"]
if (revision==100):
revision=1
dsf=SH["DataSampleFormat"]
DataDescr=SH_def["DataSampleFormat"]["descr"][revision][dsf]
printverbose("readSegy : SEG-Y revision = "+str(revision),1)
printverbose("readSegy : DataSampleFormat="+str(dsf)+"("+DataDescr+")",1)
if (SH["DataSampleFormat"]==1):
printverbose("readSegy : Assuming DSF=1, IBM FLOATS",2)
Data1 = getValue(data,index,'ibm',endian,nd)
elif (SH["DataSampleFormat"]==2):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 32bit INT",2)
Data1 = getValue(data,index,'l',endian,nd)
elif (SH["DataSampleFormat"]==3):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 16bit INT",2)
Data1 = getValue(data,index,'h',endian,nd)
elif (SH["DataSampleFormat"]==5):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", IEEE",2)
Data1 = getValue(data,index,'float',endian,nd)
elif (SH["DataSampleFormat"]==8):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 8bit CHAR",2)
Data1 = getValue(data,index,'B',endian,nd)
else:
printverbose("readSegy : DSF=" + str(SH["DataSampleFormat"]) + ", NOT SUPORTED",2)
Data = Data1[0]
printverbose("readSegy : - reshaping",2)
Data=reshape(Data,(ntraces,SH['ns']+ndummy_samples))
printverbose("readSegy : - stripping header dummy data",2)
Data=Data[:,ndummy_samples:(SH['ns']+ndummy_samples)]
printverbose("readSegy : - transposing",2)
Data=transpose(Data)
# SOMEONE NEEDS TO IMPLEMENT A NICER WAY DO DEAL WITH DSF=8
if (SH["DataSampleFormat"]==8):
for i in arange(ntraces):
for j in arange(SH['ns']):
if Data[i][j]>128:
Data[i][j]=Data[i][j]-256
printverbose("readSegy : read data",2)
return Data,SH,SegyTraceHeaders | 5e3920255aa49c70e0e898b2d3915c05afc7f869 | 2,595 |
def planar_transform(imgs, masks, pixel_coords_trg, k_s, k_t, rot, t, n_hat, a):
"""transforms imgs, masks and computes dmaps according to planar transform.
Args:
imgs: are L X [...] X C, typically RGB images per layer
masks: L X [...] X 1, indicating which layer pixels are valid
pixel_coords_trg: [...] X H_t X W_t X 3;
pixel (u,v,1) coordinates of target image pixels.
k_s: intrinsics for source cameras, are [...] X 3 X 3 matrices
k_t: intrinsics for target cameras, are [...] X 3 X 3 matrices
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: L X [...] X 1 X 3, plane normal w.r.t source camera frame
a: L X [...] X 1 X 1, plane equation displacement
Returns:
imgs_transformed: L X [...] X C images in trg frame
masks_transformed: L X [...] X 1 masks in trg frame
dmaps_trg: L X [...] X 1, indicating per pixel inverse depth
Assumes the first dimension corresponds to layers.
"""
with tf.name_scope('planar_transform'):
n_layers = imgs.get_shape().as_list()[0]
rot_rep_dims = [n_layers]
rot_rep_dims += [1 for _ in range(len(k_s.get_shape()))]
cds_rep_dims = [n_layers]
cds_rep_dims += [1 for _ in range(len(pixel_coords_trg.get_shape()))]
k_s = tf.tile(tf.expand_dims(k_s, axis=0), rot_rep_dims)
k_t = tf.tile(tf.expand_dims(k_t, axis=0), rot_rep_dims)
t = tf.tile(tf.expand_dims(t, axis=0), rot_rep_dims)
rot = tf.tile(tf.expand_dims(rot, axis=0), rot_rep_dims)
pixel_coords_trg = tf.tile(
tf.expand_dims(pixel_coords_trg, axis=0), cds_rep_dims)
ndims_img = len(imgs.get_shape())
imgs_masks = tf.concat([imgs, masks], axis=ndims_img - 1)
imgs_masks_trg = homography.transform_plane_imgs(
imgs_masks, pixel_coords_trg, k_s, k_t, rot, t, n_hat, a)
imgs_trg, masks_trg = tf.split(imgs_masks_trg, [3, 1], axis=ndims_img - 1)
dmaps_trg = homography.trg_disp_maps(pixel_coords_trg, k_t, rot, t, n_hat,
a)
return imgs_trg, masks_trg, dmaps_trg | 18f90706b996ee9ba81ab7142313dcaa761cf773 | 2,596 |
def convertDynamicRenderStates(data, builder):
"""
Converts dynamic render states. The data map is expected to contain the following elements:
- lineWidth: float width for the line. Defaults to 1.
- depthBiasConstantFactor: float value for the depth bias constant factor. Defaults to 0.
- depthBiasClamp: float value for the depth bias clamp. Defaults to 0.
- depthBiasSlopeFactor: float value for the depth bias slope factor. Defaults to 0.
- blendConstants: array of 4 floats for the blend color. Defaults to [0, 0, 0, 0].
- depthBounds: array of 2 floats for the min and max depth value. Defaults to [0, 1].
- stencilCompareMask: int compare mask for both the front and back stencil. Defaults to
0xFFFFFFFF.
- frontStencilCompareMask: int compare mask for just the front stencil.
- backStencilCompareMask: int compare mask for just the back stencil.
- stencilWriteMask: int write mask for both the front and back stencil. Defaults to 0.
- frontStencilWriteMask: int write mask for just the front stencil.
- backStencilWriteMask: int write mask for just the back stencil.
- stencilReference: int reference for both the front and back stencil. Defaults to 0.
- frontStencilReference: int reference for just the front stencil.
- backStencilReference: int reference for just the back stencil.
"""
def readFloat(value, name):
try:
return float(value)
except:
raise Exception('Invalid ' + name + ' float value "' + str(value) + '".')
def readUInt(value, name):
try:
intVal = int(value)
if intVal < 0:
raise Exception()
return intVal
except:
raise Exception('Invalid ' + name + ' unsigned int value "' + str(value) + '".')
lineWidth = readFloat(data.get('lineWidth', 1.0), 'line width')
depthBiasConstantFactor = readFloat(data.get('depthBiasConstantFactor', 0.0),
'depth bias constant factor')
depthBiasClamp = readFloat(data.get('depthBiasClamp', 0.0), 'depth bias clamp')
depthBiasSlopeFactor = readFloat(data.get('depthBiasSlopeFactor', 0.0),
'depth bias slope factor')
colorValue = data.get('blendConstants', [0.0, 0.0, 0.0, 0.0])
try:
if len(colorValue) != 4:
raise Exception()
except:
raise Exception('Blend constants value must be an array of 4 floats.')
blendConstants = []
for c in colorValue:
blendConstants.append(readFloat(c, 'blend constant'))
depthBoundsValue = data.get('depthBounds', [0.0, 1.0])
try:
if len(depthBoundsValue) != 2:
raise Exception()
except:
raise Exception('Depth bounds value must be an array of 2 floats.')
depthBounds = []
for b in depthBoundsValue:
depthBounds.append(readFloat(b, 'depth bounds'))
stencilCompareMask = data.get('stencilCompareMask', 0xFFFFFFFF)
frontStencilCompareMask = readUInt(data.get('frontStencilCompareMask', stencilCompareMask),
'stencil compare mask')
backStencilCompareMask = readUInt(data.get('backStencilCompareMask', stencilCompareMask),
'stencil compare mask')
stencilWriteMask = data.get('stencilWriteMask', 0)
frontStencilWriteMask = readUInt(data.get('frontStencilWriteMask', stencilWriteMask),
'stencil write mask')
backStencilWriteMask = readUInt(data.get('backStencilWriteMask', stencilWriteMask),
'stencil write mask')
stencilReference = data.get('stencilReference', 0)
frontStencilReference = readUInt(data.get('frontStencilReference', stencilReference),
'stencil reference')
backStencilReference = readUInt(data.get('backStencilReference', stencilReference),
'stencil reference')
DynamicRenderStates.Start(builder)
DynamicRenderStates.AddLineWidth(builder, lineWidth)
DynamicRenderStates.AddDepthBiasConstantFactor(builder, depthBiasConstantFactor)
DynamicRenderStates.AddDepthBiasClamp(builder, depthBiasClamp)
DynamicRenderStates.AddDepthBiasSlopeFactor(builder, depthBiasSlopeFactor)
DynamicRenderStates.AddBlendConstants(builder, CreateColor4f(builder, *blendConstants))
DynamicRenderStates.AddDepthBounds(builder, CreateVector2f(builder, *depthBounds))
DynamicRenderStates.AddFrontStencilCompareMask(builder, frontStencilCompareMask)
DynamicRenderStates.AddBackStencilCompareMask(builder, backStencilCompareMask)
DynamicRenderStates.AddFrontStencilWriteMask(builder, frontStencilWriteMask)
DynamicRenderStates.AddBackStencilWriteMask(builder, backStencilWriteMask)
DynamicRenderStates.AddFrontStencilReference(builder, frontStencilReference)
DynamicRenderStates.AddBackStencilReference(builder, backStencilReference)
return DynamicRenderStates.End(builder) | 5c27ebd4401d8b6c0388bfe6f1973c137404ddf5 | 2,597 |
def binary_search(a, search_value):
"""
@name binary_search
@param a array
"""
N = len(a)
l = 0
r = len(a) - 1
while(True):
try:
result = binary_search_iteration(a, l, r, search_value)
l, r = result
except TypeError:
return -1 if not result else result | 5fc2748a76d89c2559cda8bc9dacd16d90b2aa5e | 2,598 |
from typing import Dict
from typing import Any
from typing import cast
def _key_match(d1: Dict[str, Any], d2: Dict[str, Any], key: str) -> bool:
"""
>>> _key_match({"a": 1}, {"a": 2}, "a")
False
>>> _key_match({"a": 1}, {"a": 2}, "b")
True
>>> _key_match({"a": 2}, {"a": 1}, "a")
False
>>> _key_match({"a": 1}, {"a": 1}, "a")
True
>>> _key_match({"a": 2}, {"b": 1}, "a")
False
>>> _key_match({"b": 2}, {"a": 1}, "a")
False
"""
try:
return (key not in d1 and key not in d2) or cast(bool, d1[key] == d2[key])
except KeyError:
return False | 8e76ee70c6209b357b13890a9fcf2b0b7d770c1b | 2,599 |