content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def update_t_new_docker_image_names(main, file):
""" Updates the names of the docker images from lasote to conanio
"""
docker_mappings = {
"lasote/conangcc49": "conanio/gcc49",
"lasote/conangcc5": "conanio/gcc5",
"lasote/conangcc6": "conanio/gcc6",
"lasote/conangcc7": "conanio/gcc7",
"lasote/conangcc8": "conanio/gcc8",
"lasote/conanclang39": "conanio/clang39",
"lasote/conanclang40": "conanio/clang40",
"lasote/conanclang50": "conanio/clang50",
"lasote/conanclang60": "conanio/clang60",
}
found_old_name = False
for old, new in docker_mappings.items():
if main.file_contains(file, old):
main.replace_in_file(file, old, new)
found_old_name = True
if found_old_name:
main.output_result_update(title="Travis: Update Docker image names from lasote/ to conanio/")
return True
return False | 6d1a1dd0f254252cf73d7a89c926dc2476fc89e8 | 2,200 |
def fit(kern, audio, file_name, max_par, fs):
"""Fit kernel to data """
# time vector for kernel
n = kern.size
xkern = np.linspace(0., (n - 1.) / fs, n).reshape(-1, 1)
# initialize parameters
if0 = gpitch.find_ideal_f0([file_name])[0]
init_f, init_v = gpitch.init_cparam(y=audio, fs=fs, maxh=max_par, ideal_f0=if0, scaled=False)[0:2]
init_l = np.array([0., 1.])
# optimization
p0 = np.hstack((init_l, init_v, init_f)) # initialize params
pstar = optimize_kern(x=xkern, y=kern, p0=p0)
# compute initial and learned kernel
kern_init = approximate_kernel(p0, xkern)
kern_approx = approximate_kernel(pstar, xkern)
# get kernel hyperparameters
npartials = (pstar.size - 2) / 2
lengthscale = pstar[1]
variance = pstar[2: npartials + 2]
frequency = pstar[npartials + 2:]
params = [lengthscale, variance, frequency]
return params, kern_init, kern_approx | e4b6519f1d9439e3d8ea20c545664cf152ce6a89 | 2,201 |
def find_next_open_date(location_pid, date):
"""Finds the next day where this location is open."""
location = current_app_ils.location_record_cls.get_record_by_pid(
location_pid
)
_infinite_loop_guard = date + timedelta(days=365)
while date < _infinite_loop_guard:
if _is_open_on(location, date):
return date
date += _ONE_DAY_INCREMENT
# Termination is normally guaranteed if there is at least one weekday open
raise IlsException(
description="Cannot find any date for which the location %s is open after the given date %s."
"Please check opening/closures dates."
% (location_pid, date.isoformat())
) | 74a38c39d2e03a2857fa6bbc7e18dc45c0f4e48a | 2,202 |
import functools
def check_dimension(units_in=None, units_out=None):
"""Check dimensions of inputs and ouputs of function.
Will check that all inputs and outputs have the same dimension
than the passed units/quantities. Dimensions for inputs and
outputs expects a tuple.
Parameters
----------
units_in : quantity_like or tuple of quantity_like
quantity_like means an Quantity object or a
numeric value (that will be treated as dimensionless Quantity).
The inputs dimension will be checked with the units_in.
Defaults to None to skip any check.
units_out : quantity_like or tuple of quantity_like
quantity_like means an Quantity object or a
numeric value (that will be treated as dimensionless Quantity).
The outputs dimension will be checked with the units_out.
Default to None to skip any check.
Returns
-------
func:
decorated function with dimension-checked inputs and outputs.
See Also
--------
Other decorators (TODO)
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a Greek symbol like :math:`\omega` inline.
Examples (written in doctest format)
--------
>>> def add_meter(x): return x + 1*m
>>> add_meter = check_dimension((m), (m))(add_meter)
>>> add_meter(1*m)
2 m
>>> add_meter(1*s)
raise DimensionError
"""
# reading args and making them iterable
if units_in:
units_in = _iterify(units_in)
if units_out:
units_out = _iterify(units_out)
# define the decorator
def decorator(func):
# create a decorated func
@functools.wraps(func)
def decorated_func(*args, **kwargs):
# Checking dimension of inputs
args = _iterify(args)
if units_in:
for arg, unit_in in zip(args, units_in):
# make everything dimensions
dim_check_in = dimensionify(unit_in)
dim_arg = dimensionify(arg)
# and checking dimensions
if not dim_arg == dim_check_in:
raise DimensionError(dim_arg, dim_check_in)
# Compute outputs and iterify it
ress = _iterify(func(*args, **kwargs))
# Checking dimension of outputs
if units_out:
for res, unit_out in zip(ress, units_out):
# make everythin dimensions
dim_check_out = dimensionify(unit_out)
dim_res = dimensionify(res)
# and checking dimensions
if not dim_res == dim_check_out:
raise DimensionError(dim_res, dim_check_out)
# still return funcntion outputs
return tuple(ress) if len(ress) > 1 else ress[0]
return decorated_func
return decorator | 68fb2bf7e0858824e5a1e3bdcef69834b08142ac | 2,203 |
def _rowcorr(a, b):
"""Correlations between corresponding matrix rows"""
cs = np.zeros((a.shape[0]))
for idx in range(a.shape[0]):
cs[idx] = np.corrcoef(a[idx], b[idx])[0, 1]
return cs | 21df87b6f3bba58285cac0242b7c9e72b534f762 | 2,204 |
def gff_to_dict(f_gff, feat_type, idattr, txattr, attributes, input_type):
"""
It reads only exonic features because not all GFF files contain gene and trascript features. From the exonic
features it extracts gene names, biotypes, start and end positions. If any of these attributes do not exit
then they are set to NA.
"""
annotation = defaultdict(lambda: defaultdict(lambda: 'NA'))
exon_pos = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
tx_info = defaultdict(lambda: defaultdict(str))
with open(f_gff) as gff_handle:
for rec in GFF.parse(gff_handle, limit_info=dict(gff_type=[feat_type]), target_lines=1):
for sub_feature in rec.features:
start = sub_feature.location.start
end = sub_feature.location.end
strand = strandardize(sub_feature.location.strand)
try:
geneid = sub_feature.qualifiers[idattr][0]
except KeyError:
print("No '" + idattr + "' attribute found for the feature at position "
+ rec.id + ":" + str(start) + ":" + str(end) + ". Please check your GTF/GFF file.")
continue
annotation[geneid]['chr'] = rec.id
annotation[geneid]['strand'] = strand
if annotation[geneid]['start'] == 'NA' or start <= int(annotation[geneid]['start']):
annotation[geneid]['start'] = start
if annotation[geneid]['end'] == 'NA' or end >= int(annotation[geneid]['end']):
annotation[geneid]['end'] = end
for attr in attributes:
if attr in annotation[geneid]:
continue
try:
annotation[geneid][attr] = sub_feature.qualifiers[attr][0]
except KeyError:
annotation[geneid][attr] = 'NA'
# extract exon information only in case of dexseq output
if input_type != "dexseq":
continue
try:
txid = sub_feature.qualifiers[txattr][0]
tx_info[txid]['chr'] = rec.id
tx_info[txid]['strand'] = strand
exon_pos[txid][int(start)][int(end)] = 1
except KeyError:
print("No '" + txattr + "' attribute found for the feature at position " + rec.id + ":" + str(
start) + ":" + str(end) + ". Please check your GTF/GFF file.")
pass
bed_entries = []
# create BED lines only for deseq output
if input_type == "dexseq":
for txid in exon_pos.keys():
starts = sorted(exon_pos[txid])
strand = tx_info[txid]['strand']
if strand == '-':
starts = reversed(starts)
for c, start in enumerate(starts, 1):
ends = sorted(exon_pos[txid][start])
if strand == '-':
ends = reversed(ends)
for end in ends:
bed_entries.append('\t'.join([tx_info[txid]['chr'], str(start), str(end),
txid + ':' + str(c), '0', strand]))
return annotation, bed_entries | f1574cabb40f09f4f7a3b6a4f0e1b0a11d5c585d | 2,205 |
def _subtract_the_mean(point_cloud):
"""
Subtract the mean in point cloud and return its zero-mean version.
Args:
point_cloud (numpy.ndarray of size [N,3]): point cloud
Returns:
(numpy.ndarray of size [N,3]): point cloud with zero-mean
"""
point_cloud = point_cloud - np.mean(point_cloud, axis=0)
return point_cloud | 94866087a31b8268b06250d052fd12983222a066 | 2,206 |
from .load import url_load_list
def ncnr_load(filelist=None, check_timestamps=True):
"""
Load a list of nexus files from the NCNR data server.
**Inputs**
filelist (fileinfo[]): List of files to open.
check_timestamps (bool): verify that timestamps on file match request
**Returns**
output (refldata[]): All entries of all files in the list.
2016-06-29 Brian Maranville
| 2017-08-21 Brian Maranville Change to refldata, force cache invalidate
| 2018-06-18 Brian Maranville Change to nexusref to ignore areaDetector
| 2018-12-10 Brian Maranville get_plottable routines moved to python data container from js
| 2020-03-03 Paul Kienzle Just load. Don't even compute divergence
"""
# NB: used mainly to set metadata for processing, so keep it minimal
# TODO: make a metadata loader that does not send all data to browser
# NB: Fileinfo is a structure with
# { path: "location/on/server", mtime: timestamp }
datasets = []
for data in url_load_list(filelist, check_timestamps=check_timestamps):
datasets.append(data)
return datasets | 97452707da94c356c93d18618f8b43bc8459a63b | 2,207 |
def load_source_dataframe(method, sourcename, source_dict,
download_FBA_if_missing, fbsconfigpath=None):
"""
Load the source dataframe. Data can be a FlowbyActivity or
FlowBySector parquet stored in flowsa, or a FlowBySector
formatted dataframe from another package.
:param method: dictionary, FBS method
:param sourcename: str, The datasource name
:param source_dict: dictionary, The datasource parameters
:param download_FBA_if_missing: Bool, if True will download FBAs from
Data Commons. Default is False.
:param fbsconfigpath, str, optional path to an FBS method outside flowsa
repo
:return: df of identified parquet
"""
if source_dict['data_format'] == 'FBA':
# if yaml specifies a geoscale to load, use parameter
# to filter dataframe
if 'source_fba_load_scale' in source_dict:
geo_level = source_dict['source_fba_load_scale']
else:
geo_level = None
vLog.info("Retrieving Flow-By-Activity for datasource %s in year %s",
sourcename, str(source_dict['year']))
flows_df = flowsa.getFlowByActivity(
datasource=sourcename,
year=source_dict['year'],
flowclass=source_dict['class'],
geographic_level=geo_level,
download_FBA_if_missing=download_FBA_if_missing)
elif source_dict['data_format'] == 'FBS':
vLog.info("Retrieving flowbysector for datasource %s", sourcename)
flows_df = flowsa.getFlowBySector(sourcename)
elif source_dict['data_format'] == 'FBS_outside_flowsa':
vLog.info("Retrieving flowbysector for datasource %s", sourcename)
fxn = source_dict.get("FBS_datapull_fxn")
if callable(fxn):
flows_df = fxn(source_dict, method, fbsconfigpath)
elif fxn:
raise flowsa.exceptions.FBSMethodConstructionError(
error_type='fxn_call')
else:
raise flowsa.exceptions.FBSMethodConstructionError(
message="Data format not specified in method "
f"file for {sourcename}")
return flows_df | 9a928441d790bb35acd0d32efeea105eeb3082c8 | 2,208 |
import json
def unpack_nwchem_basis_block(data):
"""Unserialize a NWChem basis data block and extract components
@param data: a JSON of basis set data, perhaps containing many types
@type data : str
@return: unpacked data
@rtype : dict
"""
unpacked = json.loads(data)
return unpacked | dfa920f80ae8f0caf15441c354802410c8add690 | 2,209 |
def starify(name):
"""
Replace any ints in a dotted key with stars. Used when applying defaults and widgets to fields
"""
newname = []
for key in name.split('.'):
if is_int(key):
newname.append('*')
else:
newname.append(key)
name = '.'.join(newname)
return name | f7c8baf602ecc4cd12088d4ba70524b2a316875e | 2,210 |
import os
def render(states, actions, instantaneous_reward_log, cumulative_reward_log, critic_distributions, target_critic_distributions, projected_target_distribution, bins, loss_log, episode_number, filename, save_directory, time_log, SPOTNet_sees_target_log):
"""
TOTAL_STATE = [relative_x, relative_y, relative_vx, relative_vy, relative_angle, relative_angular_velocity, chaser_x, chaser_y, chaser_theta, target_x, target_y, target_theta, chaser_vx, chaser_vy, chaser_omega, target_vx, target_vy, target_omega] *# Relative pose expressed in the chaser's body frame; everythign else in Inertial frame #*
"""
# Load in a temporary environment, used to grab the physical parameters
temp_env = Environment()
# Checking if we want the additional reward and value distribution information
extra_information = temp_env.ADDITIONAL_VALUE_INFO
# Unpacking state
chaser_x, chaser_y, chaser_theta = states[:,6], states[:,7], states[:,8]
target_x, target_y, target_theta = states[:,9], states[:,10], states[:,11]
# Extracting physical properties
LENGTH = temp_env.LENGTH
DOCKING_PORT_MOUNT_POSITION = temp_env.DOCKING_PORT_MOUNT_POSITION
DOCKING_PORT_CORNER1_POSITION = temp_env.DOCKING_PORT_CORNER1_POSITION
DOCKING_PORT_CORNER2_POSITION = temp_env.DOCKING_PORT_CORNER2_POSITION
ARM_MOUNT_POSITION = temp_env.ARM_MOUNT_POSITION
SHOULDER_POSITION = temp_env.SHOULDER_POSITION
ELBOW_POSITION = temp_env.ELBOW_POSITION
WRIST_POSITION = temp_env.WRIST_POSITION
END_EFFECTOR_POSITION = temp_env.END_EFFECTOR_POSITION
########################################################
# Calculating spacecraft corner locations through time #
########################################################
# All the points to draw of the chaser (except the front-face)
chaser_points_body = np.array([[ LENGTH/2,-LENGTH/2],
[-LENGTH/2,-LENGTH/2],
[-LENGTH/2, LENGTH/2],
[ LENGTH/2, LENGTH/2],
[ARM_MOUNT_POSITION[0],ARM_MOUNT_POSITION[1]],
[SHOULDER_POSITION[0],SHOULDER_POSITION[1]],
[ELBOW_POSITION[0],ELBOW_POSITION[1]],
[WRIST_POSITION[0],WRIST_POSITION[1]],
[END_EFFECTOR_POSITION[0],END_EFFECTOR_POSITION[1]]]).T
# The front-face points on the target
chaser_front_face_body = np.array([[[ LENGTH/2],[ LENGTH/2]],
[[ LENGTH/2],[-LENGTH/2]]]).squeeze().T
# Rotation matrix (body -> inertial)
C_Ib_chaser = np.moveaxis(np.array([[np.cos(chaser_theta), -np.sin(chaser_theta)],
[np.sin(chaser_theta), np.cos(chaser_theta)]]), source = 2, destination = 0) # [NUM_TIMESTEPS, 2, 2]
# Rotating body frame coordinates to inertial frame
chaser_body_inertial = np.matmul(C_Ib_chaser, chaser_points_body) + np.array([chaser_x, chaser_y]).T.reshape([-1,2,1])
chaser_front_face_inertial = np.matmul(C_Ib_chaser, chaser_front_face_body) + np.array([chaser_x, chaser_y]).T.reshape([-1,2,1])
# All the points to draw of the target (except the front-face)
target_points_body = np.array([[ LENGTH/2,-LENGTH/2],
[-LENGTH/2,-LENGTH/2],
[-LENGTH/2, LENGTH/2],
[ LENGTH/2, LENGTH/2],
[DOCKING_PORT_MOUNT_POSITION[0], LENGTH/2], # artificially adding this to make the docking cone look better
[DOCKING_PORT_MOUNT_POSITION[0],DOCKING_PORT_MOUNT_POSITION[1]],
[DOCKING_PORT_CORNER1_POSITION[0],DOCKING_PORT_CORNER1_POSITION[1]],
[DOCKING_PORT_CORNER2_POSITION[0],DOCKING_PORT_CORNER2_POSITION[1]],
[DOCKING_PORT_MOUNT_POSITION[0],DOCKING_PORT_MOUNT_POSITION[1]]]).T
# The front-face points on the target
target_front_face_body = np.array([[[ LENGTH/2],[ LENGTH/2]],
[[ LENGTH/2],[-LENGTH/2]]]).squeeze().T
# Rotation matrix (body -> inertial)
C_Ib_target = np.moveaxis(np.array([[np.cos(target_theta), -np.sin(target_theta)],
[np.sin(target_theta), np.cos(target_theta)]]), source = 2, destination = 0) # [NUM_TIMESTEPS, 2, 2]
# Rotating body frame coordinates to inertial frame
target_body_inertial = np.matmul(C_Ib_target, target_points_body) + np.array([target_x, target_y]).T.reshape([-1,2,1])
target_front_face_inertial = np.matmul(C_Ib_target, target_front_face_body) + np.array([target_x, target_y]).T.reshape([-1,2,1])
#######################
# Plotting the motion #
#######################
# Generating figure window
figure = plt.figure(constrained_layout = True)
figure.set_size_inches(5, 4, True)
if extra_information:
grid_spec = gridspec.GridSpec(nrows = 2, ncols = 3, figure = figure)
subfig1 = figure.add_subplot(grid_spec[0,0], aspect = 'equal', autoscale_on = False, xlim = (0, 3.5), ylim = (0, 2.4))
#subfig1 = figure.add_subplot(grid_spec[0,0], projection = '3d', aspect = 'equal', autoscale_on = False, xlim3d = (-5, 5), ylim3d = (-5, 5), zlim3d = (0, 10), xlabel = 'X (m)', ylabel = 'Y (m)', zlabel = 'Z (m)')
subfig2 = figure.add_subplot(grid_spec[0,1], xlim = (np.min([np.min(instantaneous_reward_log), 0]) - (np.max(instantaneous_reward_log) - np.min(instantaneous_reward_log))*0.02, np.max([np.max(instantaneous_reward_log), 0]) + (np.max(instantaneous_reward_log) - np.min(instantaneous_reward_log))*0.02), ylim = (-0.5, 0.5))
subfig3 = figure.add_subplot(grid_spec[0,2], xlim = (np.min(loss_log)-0.01, np.max(loss_log)+0.01), ylim = (-0.5, 0.5))
subfig4 = figure.add_subplot(grid_spec[1,0], ylim = (0, 1.02))
subfig5 = figure.add_subplot(grid_spec[1,1], ylim = (0, 1.02))
subfig6 = figure.add_subplot(grid_spec[1,2], ylim = (0, 1.02))
# Setting titles
subfig1.set_xlabel("X (m)", fontdict = {'fontsize': 8})
subfig1.set_ylabel("Y (m)", fontdict = {'fontsize': 8})
subfig2.set_title("Timestep Reward", fontdict = {'fontsize': 8})
subfig3.set_title("Current loss", fontdict = {'fontsize': 8})
subfig4.set_title("Q-dist", fontdict = {'fontsize': 8})
subfig5.set_title("Target Q-dist", fontdict = {'fontsize': 8})
subfig6.set_title("Bellman projection", fontdict = {'fontsize': 8})
# Changing around the axes
subfig1.tick_params(labelsize = 8)
subfig2.tick_params(which = 'both', left = False, labelleft = False, labelsize = 8)
subfig3.tick_params(which = 'both', left = False, labelleft = False, labelsize = 8)
subfig4.tick_params(which = 'both', left = False, labelleft = False, right = True, labelright = False, labelsize = 8)
subfig5.tick_params(which = 'both', left = False, labelleft = False, right = True, labelright = False, labelsize = 8)
subfig6.tick_params(which = 'both', left = False, labelleft = False, right = True, labelright = True, labelsize = 8)
# Adding the grid
subfig4.grid(True)
subfig5.grid(True)
subfig6.grid(True)
# Setting appropriate axes ticks
subfig2.set_xticks([np.min(instantaneous_reward_log), 0, np.max(instantaneous_reward_log)] if np.sign(np.min(instantaneous_reward_log)) != np.sign(np.max(instantaneous_reward_log)) else [np.min(instantaneous_reward_log), np.max(instantaneous_reward_log)])
subfig3.set_xticks([np.min(loss_log), np.max(loss_log)])
subfig4.set_xticks([bins[i*5] for i in range(round(len(bins)/5) + 1)])
subfig4.tick_params(axis = 'x', labelrotation = -90)
subfig4.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
subfig5.set_xticks([bins[i*5] for i in range(round(len(bins)/5) + 1)])
subfig5.tick_params(axis = 'x', labelrotation = -90)
subfig5.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
subfig6.set_xticks([bins[i*5] for i in range(round(len(bins)/5) + 1)])
subfig6.tick_params(axis = 'x', labelrotation = -90)
subfig6.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
else:
subfig1 = figure.add_subplot(1, 1, 1, aspect = 'equal', autoscale_on = False, xlim = (0, 3.5), ylim = (0, 2.4), xlabel = 'X Position (m)', ylabel = 'Y Position (m)')
# Defining plotting objects that change each frame
chaser_body, = subfig1.plot([], [], color = 'r', linestyle = '-', linewidth = 2) # Note, the comma is needed
chaser_front_face, = subfig1.plot([], [], color = 'k', linestyle = '-', linewidth = 2) # Note, the comma is needed
target_body, = subfig1.plot([], [], color = 'g', linestyle = '-', linewidth = 2)
target_front_face, = subfig1.plot([], [], color = 'k', linestyle = '-', linewidth = 2)
chaser_body_dot = subfig1.scatter(0., 0., color = 'r', s = 0.1)
if extra_information:
reward_bar = subfig2.barh(y = 0, height = 0.2, width = 0)
loss_bar = subfig3.barh(y = 0, height = 0.2, width = 0)
q_dist_bar = subfig4.bar(x = bins, height = np.zeros(shape = len(bins)), width = bins[1]-bins[0])
target_q_dist_bar = subfig5.bar(x = bins, height = np.zeros(shape = len(bins)), width = bins[1]-bins[0])
projected_q_dist_bar = subfig6.bar(x = bins, height = np.zeros(shape = len(bins)), width = bins[1]-bins[0])
time_text = subfig1.text(x = 0.2, y = 0.91, s = '', fontsize = 8, transform=subfig1.transAxes)
reward_text = subfig1.text(x = 0.0, y = 1.02, s = '', fontsize = 8, transform=subfig1.transAxes)
else:
time_text = subfig1.text(x = 0.1, y = 0.9, s = '', fontsize = 8, transform=subfig1.transAxes)
reward_text = subfig1.text(x = 0.62, y = 0.9, s = '', fontsize = 8, transform=subfig1.transAxes)
episode_text = subfig1.text(x = 0.4, y = 0.96, s = '', fontsize = 8, transform=subfig1.transAxes)
episode_text.set_text('Episode ' + str(episode_number))
# Function called repeatedly to draw each frame
def render_one_frame(frame, *fargs):
temp_env = fargs[0] # Extract environment from passed args
# Draw the chaser body
chaser_body.set_data(chaser_body_inertial[frame,0,:], chaser_body_inertial[frame,1,:])
# Draw the front face of the chaser body in a different colour
chaser_front_face.set_data(chaser_front_face_inertial[frame,0,:], chaser_front_face_inertial[frame,1,:])
# Draw the target body
target_body.set_data(target_body_inertial[frame,0,:], target_body_inertial[frame,1,:])
if SPOTNet_sees_target_log[frame]:
target_body.set_color('y')
else:
target_body.set_color('g')
# Draw the front face of the target body in a different colour
target_front_face.set_data(target_front_face_inertial[frame,0,:], target_front_face_inertial[frame,1,:])
# Drawing a dot in the centre of the chaser
chaser_body_dot.set_offsets(np.hstack((chaser_x[frame],chaser_y[frame])))
# Update the time text
time_text.set_text('Time = %.1f s' %(time_log[frame]))
# Update the reward text
reward_text.set_text('Total reward = %.1f' %cumulative_reward_log[frame])
try:
if extra_information:
# Updating the instantaneous reward bar graph
reward_bar[0].set_width(instantaneous_reward_log[frame])
# And colouring it appropriately
if instantaneous_reward_log[frame] < 0:
reward_bar[0].set_color('r')
else:
reward_bar[0].set_color('g')
# Updating the loss bar graph
loss_bar[0].set_width(loss_log[frame])
# Updating the q-distribution plot
for this_bar, new_value in zip(q_dist_bar, critic_distributions[frame,:]):
this_bar.set_height(new_value)
# Updating the target q-distribution plot
for this_bar, new_value in zip(target_q_dist_bar, target_critic_distributions[frame, :]):
this_bar.set_height(new_value)
# Updating the projected target q-distribution plot
for this_bar, new_value in zip(projected_q_dist_bar, projected_target_distribution[frame, :]):
this_bar.set_height(new_value)
except:
pass
#
# Since blit = True, must return everything that has changed at this frame
return chaser_body_dot, time_text, chaser_body, chaser_front_face, target_body, target_front_face
# Generate the animation!
fargs = [temp_env] # bundling additional arguments
animator = animation.FuncAnimation(figure, render_one_frame, frames = np.linspace(0, len(states)-1, len(states)).astype(int),
blit = False, fargs = fargs)
"""
frames = the int that is passed to render_one_frame. I use it to selectively plot certain data
fargs = additional arguments for render_one_frame
interval = delay between frames in ms
"""
# Save the animation!
if temp_env.SKIP_FAILED_ANIMATIONS:
try:
# Save it to the working directory [have to], then move it to the proper folder
animator.save(filename = filename + '_episode_' + str(episode_number) + '.mp4', fps = 30, dpi = 100)
# Make directory if it doesn't already exist
os.makedirs(os.path.dirname(save_directory + filename + '/videos/'), exist_ok=True)
# Move animation to the proper directory
os.rename(filename + '_episode_' + str(episode_number) + '.mp4', save_directory + filename + '/videos/episode_' + str(episode_number) + '.mp4')
except:
("Skipping animation for episode %i due to an error" %episode_number)
# Try to delete the partially completed video file
try:
os.remove(filename + '_episode_' + str(episode_number) + '.mp4')
except:
pass
else:
# Save it to the working directory [have to], then move it to the proper folder
animator.save(filename = filename + '_episode_' + str(episode_number) + '.mp4', fps = 30, dpi = 100)
# Make directory if it doesn't already exist
os.makedirs(os.path.dirname(save_directory + filename + '/videos/'), exist_ok=True)
# Move animation to the proper directory
os.rename(filename + '_episode_' + str(episode_number) + '.mp4', save_directory + filename + '/videos/episode_' + str(episode_number) + '.mp4')
del temp_env
plt.close(figure) | 5f41390e2ab0f92d648e80cd28e369d3eb06af10 | 2,211 |
def hydrogens(atom: Atom) -> int:
"""Total number of hydrogen atoms (int).
"""
return atom.GetTotalNumHs() | f36e04d2c67eaf81b031651f78bb64db3a1c614c | 2,212 |
def to_field(field_tuple):
"""Create a dataframe_field from a tuple"""
return dataframe_field(*field_tuple) | 6863cc33b8eea458223b9f4b1d4432104784d245 | 2,213 |
def compute_subjobs_for_build(build_id, job_config, project_type):
"""
Calculate subjobs for a build.
:type build_id: int
:type job_config: JobConfig
:param project_type: the project_type that the build is running in
:type project_type: project_type.project_type.ProjectType
:rtype: list[Subjob]
"""
# Users can override the list of atoms to be run in this build. If the atoms_override
# was specified, we can skip the atomization step and use those overridden atoms instead.
if project_type.atoms_override is not None:
atoms_string_list = project_type.atoms_override
atoms_list = [Atom(atom_string_value) for atom_string_value in atoms_string_list]
else:
atoms_list = job_config.atomizer.atomize_in_project(project_type)
# Group the atoms together using some grouping strategy
timing_file_path = project_type.timing_file_path(job_config.name)
grouped_atoms = _grouped_atoms(
atoms_list,
job_config.max_executors,
timing_file_path,
project_type.project_directory
)
# Generate subjobs for each group of atoms
subjobs = []
for subjob_id, subjob_atoms in enumerate(grouped_atoms):
# The atom id isn't calculated until the atom has been grouped into a subjob.
for atom_id, atom in enumerate(subjob_atoms):
atom.id = atom_id
subjobs.append(Subjob(build_id, subjob_id, project_type, job_config, subjob_atoms))
return subjobs | caa16899755fbb19530c27004db3515e3eeab9d6 | 2,214 |
def pymodbus_mocked(mocker):
"""Patch pymodbus to deliver results."""
class ResponseContent:
"""Fake a response."""
registers = [0]
class WriteStatus:
"""Mock a successful response."""
@staticmethod
def isError():
# pylint: disable=invalid-name,missing-function-docstring
return False
# Patch connection function
mocker.patch("pymodbus.client.sync.ModbusTcpClient.connect")
mocker.patch(
"pymodbus.client.sync.ModbusTcpClient.read_holding_registers",
return_value=ResponseContent,
)
mocker.patch(
"pymodbus.client.sync.ModbusTcpClient.write_registers", return_value=WriteStatus
) | fdee663d9a8a80496ab6678aacb0b820251c83e1 | 2,215 |
def user_can_view_assessments(user, **kwargs):
""" Return True iff given user is allowed to view the assessments """
return not appConfig.settings.LOGIN_REQUIRED or user.is_authenticated | 1ef3f41ee311a6504d6e0cff0f5cad68135e0527 | 2,216 |
from typing import List
def get_hashes(root_hash: str) -> List[str]:
""" Return a list with the commits since `root_hash` """
cmd = f"git rev-list --ancestry-path {root_hash}..HEAD"
proc = run(cmd)
return proc.stdout.splitlines() | c0fdb996cf43066b87040b6647b75f42e8f7360f | 2,217 |
import zipfile
def unzip_file(zip_src, dst_dir):
"""
解压zip文件
:param zip_src: zip文件的全路径
:param dst_dir: 要解压到的目的文件夹
:return:
"""
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, "r")
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
return "请上传zip类型压缩文件" | 8b89f41f38cc688f6e0473a77215ae72b163654a | 2,218 |
def abort_multipart_upload(resource, bucket_name, object_name, upload_id):
"""Abort in-progress multipart upload"""
mpupload = resource.MultipartUpload(bucket_name, object_name, upload_id)
return mpupload.abort() | 93535c2404db98e30bd29b2abbda1444ae4d0e8a | 2,219 |
import os
def read_data(input_path):
"""Read pre-stored data
"""
train = pd.read_parquet(os.path.join(input_path, 'train.parquet'))
tournament = pd.read_parquet(os.path.join(input_path, 'tournament.parquet'))
return train, tournament | d55b7b645089af0af2b7b27ec13e0581bf76e21a | 2,220 |
def double(n):
"""
Takes a number n and doubles it
"""
return n * 2 | 8efeee1aa09c27d679fa8c5cca18d4849ca7e205 | 2,221 |
import torch
def Normalize(tensor, mean, std, inplace=False):
"""Normalize a float tensor image with mean and standard deviation.
This transform does not support PIL Image.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(
'Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
if not tensor.is_floating_point():
raise TypeError(
'Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))
if tensor.ndim < 3:
raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
'{}.'.format(tensor.size()))
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
if (std == 0).any():
raise ValueError(
'std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
if mean.ndim == 1:
mean = mean.view(-1, 1, 1)
if std.ndim == 1:
std = std.view(-1, 1, 1)
tensor.sub_(mean).div_(std)
return tensor | 1312955d0db28ae66f6dadd9c29f3034aa21e648 | 2,222 |
from rfpipe import candidates, util
def reproduce_candcollection(cc, data=None, wisdom=None, spec_std=None,
sig_ts=[], kalman_coeffs=[]):
""" Uses candcollection to make new candcollection with required info.
Will look for cluster label and filter only for peak snr, if available.
Location (e.g., integration, dm, dt) of each is used to create
canddata for each candidate, if required.
Can calculates features not used directly for search (as defined in
state.prefs.calcfeatures).
"""
# set up output cc
st = cc.state
cc1 = candidates.CandCollection(prefs=st.prefs, metadata=st.metadata)
if len(cc):
if 'cluster' in cc.array.dtype.fields:
clusters = cc.array['cluster'].astype(int)
cl_rank, cl_count = candidates.calc_cluster_rank(cc)
calcinds = np.unique(np.where(cl_rank == 1)[0]).tolist()
logger.debug("Reproducing cands at {0} cluster peaks"
.format(len(calcinds)))
else:
logger.debug("No cluster field found. Reproducing all.")
calcinds = list(range(len(cc)))
# if candidates that need new feature calculations
if not all([f in cc.array.dtype.fields for f in st.features]):
logger.info("Generating canddata for {0} candidates"
.format(len(calcinds)))
candlocs = cc.locs
snrs = cc.snrtot
normprob = candidates.normprob(snrs, st.ntrials)
snrmax = snrs.max()
logger.info('Zscore/SNR for strongest candidate: {0}/{1}'
.format(normprob[np.where(snrs == snrmax)[0]][0], snrmax))
if ('snrk' in st.features and
'snrk' not in cc.array.dtype.fields and
(spec_std is None or not len(sig_ts) or not len(kalman_coeffs))):
# TODO: use same kalman calc for search as reproduce?
spec_std, sig_ts, kalman_coeffs = util.kalman_prep(data)
# reproduce canddata for each
for i in calcinds:
# TODO: check on best way to find max SNR with kalman, etc
snr = snrs[i]
candloc = candlocs[i]
# kwargs passed to canddata object for plotting/saving
kwargs = {}
if 'cluster' in cc.array.dtype.fields:
logger.info("Cluster {0}/{1} has {2} candidates and max detected SNR {3:.1f} at {4}"
.format(calcinds.index(i), len(calcinds)-1, cl_count[i],
snr, candloc))
# add supplementary plotting and cc info
kwargs['cluster'] = clusters[i]
kwargs['clustersize'] = cl_count[i]
else:
logger.info("Candidate {0}/{1} has detected SNR {2:.1f} at {3}"
.format(calcinds.index(i), len(calcinds)-1, snr,
candloc))
# reproduce candidate and get/calc features
data_corr = pipeline_datacorrect(st, candloc, data_prep=data)
for feature in st.features:
if feature in cc.array.dtype.fields: # if already calculated
kwargs[feature] = cc.array[feature][i]
else: # if desired, but not calculated here or from canddata
if feature == 'snrk':
if 'snrk' not in cc.array.dtype.fields:
spec = data_corr.real.mean(axis=3).mean(axis=1)[candloc[1]]
if np.count_nonzero(spec)/len(spec) > 1-st.prefs.max_zerofrac:
significance_kalman = -kalman_significance(spec, spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
snrk = (2*significance_kalman)**0.5
else:
logger.warning("snrk set to 0, since {0}/{1} are zeroed".format(len(spec)-np.count_nonzero(spec), len(spec)))
snrk = 0.
logger.info("Calculated snrk of {0} after detection. "
"Adding it to CandData.".format(snrk))
kwargs[feature] = snrk
cd = pipeline_canddata(st, candloc, data_corr, spec_std=spec_std,
sig_ts=sig_ts, kalman_coeffs=kalman_coeffs, **kwargs)
if st.prefs.saveplots:
candidates.candplot(cd, snrs=snrs) # snrs before clustering
# regenerate cc with extra features in cd
cc1 += candidates.cd_to_cc(cd)
# if candidates that do not need new featuers, just select peaks
else:
logger.info("Using clustering info to select {0} candidates"
.format(len(calcinds)))
cc1.array = cc.array.take(calcinds)
return cc1 | 6bd57054262bcc0ea5fe05dbe0b2a8f93cf9c7dc | 2,223 |
import scipy
def hilbert(signal, padding='nextpow'):
"""
Apply a Hilbert transform to a `neo.AnalogSignal` object in order to
obtain its (complex) analytic signal.
The time series of the instantaneous angle and amplitude can be obtained
as the angle (`np.angle` function) and absolute value (`np.abs` function)
of the complex analytic signal, respectively.
By default, the function will zero-pad the signal to a length
corresponding to the next higher power of 2. This will provide higher
computational efficiency at the expense of memory. In addition, this
circumvents a situation where, for some specific choices of the length of
the input, `scipy.signal.hilbert` function will not terminate.
Parameters
----------
signal : neo.AnalogSignal
Signal(s) to transform.
padding : int, {'none', 'nextpow'}, or None, optional
Defines whether the signal is zero-padded.
The `padding` argument corresponds to `N` in
`scipy.signal.hilbert(signal, N=padding)` function.
If 'none' or None, no padding.
If 'nextpow', zero-pad to the next length that is a power of 2.
If it is an `int`, directly specify the length to zero-pad to
(indicates the number of Fourier components).
Default: 'nextpow'
Returns
-------
neo.AnalogSignal
Contains the complex analytic signal(s) corresponding to the input
`signal`. The unit of the returned `neo.AnalogSignal` is
dimensionless.
Raises
------
ValueError:
If `padding` is not an integer or neither 'nextpow' nor 'none' (None).
Examples
--------
Create a sine signal at 5 Hz with increasing amplitude and calculate the
instantaneous phases:
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> import matplotlib.pyplot as plt
>>> from elephant.signal_processing import hilbert
>>> t = np.arange(0, 5000) * pq.ms
>>> f = 5. * pq.Hz
>>> a = neo.AnalogSignal(
... np.array(
... (1 + t.magnitude / t[-1].magnitude) * np.sin(
... 2. * np.pi * f * t.rescale(pq.s))).reshape(
... (-1,1)) * pq.mV,
... t_start=0*pq.s,
... sampling_rate=1000*pq.Hz)
...
>>> analytic_signal = hilbert(a, padding='nextpow')
>>> angles = np.angle(analytic_signal)
>>> amplitudes = np.abs(analytic_signal)
>>> print(angles)
[[-1.57079633]
[-1.51334228]
[-1.46047675]
...,
[-1.73112977]
[-1.68211683]
[-1.62879501]]
>>> plt.plot(t, angles)
"""
# Length of input signals
n_org = signal.shape[0]
# Right-pad signal to desired length using the signal itself
if isinstance(padding, int):
# User defined padding
n = padding
elif padding == 'nextpow':
# To speed up calculation of the Hilbert transform, make sure we change
# the signal to be of a length that is a power of two. Failure to do so
# results in computations of certain signal lengths to not finish (or
# finish in absurd time). This might be a bug in scipy (0.16), e.g.,
# the following code will not terminate for this value of k:
#
# import numpy
# import scipy.signal
# k=679346
# t = np.arange(0, k) / 1000.
# a = (1 + t / t[-1]) * np.sin(2 * np.pi * 5 * t)
# analytic_signal = scipy.signal.hilbert(a)
#
# For this reason, nextpow is the default setting for now.
n = 2 ** (int(np.log2(n_org - 1)) + 1)
elif padding == 'none' or padding is None:
# No padding
n = n_org
else:
raise ValueError("Invalid padding '{}'.".format(padding))
output = signal.duplicate_with_new_data(
scipy.signal.hilbert(signal.magnitude, N=n, axis=0)[:n_org])
# todo use flag once is fixed
# https://github.com/NeuralEnsemble/python-neo/issues/752
output.array_annotate(**signal.array_annotations)
return output / output.units | cf07ae43c928d9c8fdb323d40530d0353bc861ee | 2,224 |
def _tagged_mosc_id(kubeconfig, version, arch, private) -> str:
"""determine what the most recently tagged machine-os-content is in given imagestream"""
base_name = rgp.default_imagestream_base_name(version)
base_namespace = rgp.default_imagestream_namespace_base_name()
name, namespace = rgp.payload_imagestream_name_and_namespace(base_name, base_namespace, arch, private)
stdout, _ = exectools.cmd_assert(
f"oc --kubeconfig '{kubeconfig}' --namespace '{namespace}' get istag '{name}:machine-os-content'"
" --template '{{.image.dockerImageMetadata.Config.Labels.version}}'",
retries=3,
pollrate=5,
strip=True,
)
return stdout if stdout else None | 1fa4ebf736b763fb97d644c0d55aa3923020107d | 2,225 |
def load_household_size_by_municipality():
"""Return dataframe, index 'Gemeente', column 'HHsize'."""
dfhh = pd.read_csv('data/huishoudens_samenstelling_gemeentes.csv', comment='#')
dfhh.sort_values('Gemeente', inplace=True)
dfhh.set_index('Gemeente', inplace=True)
# remove rows for nonexistent municipalites
dfhh.drop(index=dfhh.index[dfhh['nHH'].isna()], inplace=True)
# rename municipalities
rename_muns = {
'Beek (L.)': 'Beek',
'Hengelo (O.)': 'Hengelo',
'Laren (NH.)': 'Laren',
'Middelburg (Z.)': 'Middelburg',
'Rijswijk (ZH.)': 'Rijswijk',
'Stein (L.)': 'Stein',
'Groningen (gemeente)': 'Groningen',
'Utrecht (gemeente)': 'Utrecht',
"'s-Gravenhage (gemeente)": "'s-Gravenhage",
}
dfhh.rename(index=rename_muns, inplace=True)
return dfhh | 9e78345a00209135ec4185a806a70deeb10d5bea | 2,226 |
from typing import Dict
from typing import Any
from typing import List
def gcp_iam_service_account_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete service account key.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
service_account_name = argToList(args.get('service_account_name'))
command_results_list: List[CommandResults] = []
for account in service_account_name:
try:
client.gcp_iam_service_account_delete_request(account)
command_results_list.append(CommandResults(
readable_output=f'Service account {account} deleted successfully.'
))
except Exception as exception:
error = CommandResults(
readable_output=f'An error occurred while trying to delete {account}.\n {exception}'
)
command_results_list.append(error)
return command_results_list | eeea44d6cc96b430c63168346849ac30a1247cad | 2,227 |
from typing import Union
from typing import Tuple
def couple_to_string(couple: Union[Span, Tuple[int, int]]) -> str:
"""Return a deduplicated string representation of the given couple or span.
Examples:
>>> couple_to_string((12, 15))
"12-15"
>>> couple_to_string((12, 12))
"12"
>>> couple_to_string(Span(12, 15))
"12-15"
"""
return f"{couple[0]}" + ("" if couple[0] == couple[1] else f"-{couple[1]}") | 8aaa0e2b7dfbdd58e4f9765a56f9057dd2b612f3 | 2,228 |
def create_study(X, y,
storage=None, # type: Union[None, str, storages.BaseStorage]
sample_method=None,
metrics=None,
study_name=None, # type: Optional[str]
direction='maximize', # type: str
load_cache=False, # type: bool
is_autobin=False,
bin_params=dict(),
sample_params=dict(),
trials_list=list(),
export_model_path=None,
precision=np.float64,
):
# type: (...) -> Study
"""Create a new :class:`~diego.study.Study`.
Args:
storage:
Database URL. If this argument is set to None, in-memory storage is used, and the
:class:`~diego.study.Study` will not be persistent.
sampler:
A sampler object that implements background algorithm for value suggestion. See also
:class:`~diego.samplers`.
study_name:
Study's name. If this argument is set to None, a unique name is generated
automatically.
is_auto_bin: do autobinning
bin_params: binning method
precision {[np.dtype]} -- precision:
np.dtypes, float16, float32, float64 for data precision to reduce memory size. (default: {np.float64})
Returns:
A :class:`~diego.study.Study` object.
"""
X, y = check_X_y(X, y, accept_sparse='csr')
storage = get_storage(storage)
try:
study_id = storage.create_new_study_id(study_name)
except basic.DuplicatedStudyError:
# 内存中最好study不要重名,而且可以读取已有的Study。 数据存在storage中。
# if load_if_exists:
# assert study_name is not None
# logger = logging.get_logger(__name__)
# logger.info("Using an existing study with name '{}' instead of "
# "creating a new one.".format(study_name))
# study_id = storage.get_study_id_from_name(study_name)
# else:
raise
study_name = storage.get_study_name_from_id(study_id)
study = Study(
study_name=study_name,
storage=storage,
sample_method=sample_method,
is_autobin=is_autobin,
bin_params=bin_params,
export_model_path=export_model_path,
precision=precision,
metrics=metrics)
if direction == 'minimize':
_direction = basic.StudyDirection.MINIMIZE
elif direction == 'maximize':
_direction = basic.StudyDirection.MAXIMIZE
else:
raise ValueError(
'Please set either \'minimize\' or \'maximize\' to direction.')
if metrics in ['logloss']:
_direction = basic.StudyDirection.MINIMIZE
X = X.astype(dtype=precision, copy=False)
study.storage.direction = _direction
study.storage.set_train_storage(X, y)
return study | bf8ea9c5280c06c4468e9ba015e62401e65ad870 | 2,229 |
import os
import json
import logging
def setup_logging(path='log.config', key=None):
"""Setup logging configuration"""
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(key)
return logger | be4a4e94370f25ea232d5fc36b7d185bae2c5a1f | 2,230 |
import inspect
def test_no_access_to_class_property(db):
"""Ensure the implementation doesn't access class properties or declared
attrs while inspecting the unmapped model.
"""
class class_property:
def __init__(self, f):
self.f = f
def __get__(self, instance, owner):
return self.f(owner)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class ns:
is_duck = False
floats = False
class Witch(Duck):
@declared_attr
def is_duck(self):
# declared attrs will be accessed during mapper configuration,
# but make sure they're not accessed before that
info = inspect.getouterframes(inspect.currentframe())[2]
assert info[3] != "_should_set_tablename"
ns.is_duck = True
@class_property
def floats(self):
ns.floats = True
assert ns.is_duck
assert not ns.floats | ab24f6405675cba44af570282afa60bd1e600dcf | 2,231 |
import re
def get_gb_version(backbone_top_cmake_path):
"""
Find the game backbone version number by searching the top level CMake file
"""
with open(backbone_top_cmake_path, 'r') as file:
cmake_text = file.read()
regex_result = re.search(gb_version_regex, cmake_text)
return regex_result.group(1) | a4855c5fd82579b5b1f4f777ee0c040227432947 | 2,232 |
from typing import Optional
from typing import List
async def get_processes(name: Optional[str] = None) -> List[Process]:
"""
Get all processes.
Args:
name (Optional[str], optional): Filter by process name. Defaults to None.
Returns:
List[Process]: A list of processes.
"""
if name:
return get_processes_by_name(name)
return get_all_processes() | 5133a81dcda079e6b5ed649443f9befed72be953 | 2,233 |
def plot_day_of_activation(df, plotname):
"""
Plots Aggregate of Day of Activation.
"""
# todo sort order in logical day order
dotw = {0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday'}
df2 = df[df['adults_first_use'] == 1][['user_id', 'day_of_week']]
df2 = df2.groupby('user_id', as_index=False).mean()['day_of_week'].map(dotw).to_frame()
df2 = df2['day_of_week'].value_counts().to_frame()
# todo fix the X axis labeling so it's not hardcoded!
trace = go.Bar(x=['Tuesday', 'Wednesday', 'Friday', 'Thursday', 'Satuday', 'Sunday', 'Monday'],
y=df2.day_of_week,
marker=dict(color='#CC171D'))
layout = go.Layout(
title="Day of Firenze Card Activation",
xaxis=dict(
title='Day of the Week',
nticks=7,
ticks='outside',
),
yaxis=dict(
title='Number of Cards Activated',
ticks='outside',
)
)
fig = go.Figure(data=go.Data([trace]), layout=layout)
plot_url = py.iplot(fig, plotname, sharing='private', auto_open=False)
return df2, plot_url | 3c50196d1cd63972c3263bb695967f3552739fd1 | 2,234 |
def get_rucio_redirect_url(lfn, scope):
"""
get_rucio_redirect_url: assemble Rucio redirect URL
@params: lfn ... one filename
e.g. user.gangarbt.62544955._2108356106.log.tgz
scope ... scope of the file with lfn
e.g. user.gangarbt, or valid1
returns: the Rucio redirect URL
"""
redirectUrl = ''
### compose the redirecURL
redirectUrl = '%(redirecthost)s/redirect/%(scope)s/%(filename)s%(suffix)s' % \
{\
'redirecthost': get_rucio_redirect_host(), \
'scope': scope, \
'filename': lfn, \
'suffix': '' \
}
_logger.info('get_rucio_redirect_url: redirectUrl=(%s)' % redirectUrl)
### return the redirectURL
return redirectUrl | 26547c18d9699d0ab3d6d963382bf14c067b982f | 2,235 |
async def _getRequest(websession, url):
"""Send a GET request."""
async with websession.get(url, headers=HEADER) as response:
if response.status == 200:
data = await response.json(content_type=None)
else:
raise Exception('Bad response status code: {}'.format(response.status))
return data | 6511926b2ce753f5233778c702a11142e6cad5a3 | 2,236 |
def interval_seconds():
"""returns the time interval in seconds
Returns:
int
"""
return int(interval_to_milliseconds(interval())/1000) | 831c24cb113dab2b39fc068c397c41c3cc1131b5 | 2,237 |
import subprocess
def get_current_git_branch():
"""Get current git branch name.
Returns:
str: Branch name
"""
branch_name = "unknown"
try:
branch_name = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('ascii').strip()
except subprocess.CalledProcessError:
pass
return branch_name | 6d677d0f4e15532c20774e479b49a23093dad09a | 2,238 |
from typing import Optional
from typing import Sequence
def get_autonomous_db_versions(compartment_id: Optional[str] = None,
db_workload: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetAutonomousDbVersionsFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutonomousDbVersionsResult:
"""
This data source provides the list of Autonomous Db Versions in Oracle Cloud Infrastructure Database service.
Gets a list of supported Autonomous Database versions.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_autonomous_db_versions = oci.database.get_autonomous_db_versions(compartment_id=var["compartment_id"],
db_workload=var["autonomous_db_version_db_workload"])
```
:param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
:param str db_workload: A filter to return only autonomous database resources that match the specified workload type.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['dbWorkload'] = db_workload
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getAutonomousDbVersions:getAutonomousDbVersions', __args__, opts=opts, typ=GetAutonomousDbVersionsResult).value
return AwaitableGetAutonomousDbVersionsResult(
autonomous_db_versions=__ret__.autonomous_db_versions,
compartment_id=__ret__.compartment_id,
db_workload=__ret__.db_workload,
filters=__ret__.filters,
id=__ret__.id) | e10770e9db891079dda251578f851bdb7a0ade8e | 2,239 |
def azimuthal_average(image, center=None, stddev=True, binsize=0.5, interpnan=False):
"""
Calculate the azimuthally averaged radial profile.
Modified based on https://github.com/keflavich/image_tools/blob/master/image_tools/radialprofile.py
Parameters:
imgae (numpy 2-D array): image array.
center (list): [x, y] pixel coordinates. If None, use image center.
Note that x is horizontal and y is vertical, y, x = image.shape.
stdev (bool): if True, the stdev of profile will also be returned.
binsize (float): size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large.
interpnan (bool): Interpolate over NAN values, i.e. bins where there is no data?
Returns:
:
If `stdev == True`, it will return [radius, profile, stdev];
else, it will return [radius, profile].
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0])
r = np.hypot(x - center[0], y - center[1])
# The 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize) + 1)
maxbin = nbins * binsize
bins = np.linspace(0, maxbin, nbins + 1)
# We're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:] + bins[:-1]) / 2.0
# There are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.histogram(r, bins)[0] # nr is how many pixels are within each bin
# Radial profile itself
nan_flag = np.isnan(image) # get rid of nan
#profile = np.histogram(r, bins, weights=image)[0] / nr
profile = np.histogram(r[~nan_flag], bins, weights=image[~nan_flag])[0] / nr
if interpnan:
profile = np.interp(bin_centers, bin_centers[~np.isnan(profile)],
profile[~np.isnan(profile)])
if stddev:
# Find out which radial bin each point in the map belongs to
# recall that bins are from 1 to nbins
whichbin = np.digitize(r.ravel(), bins)
profile_std = np.array([np.nanstd(image.ravel()[whichbin == b]) for b in range(1, nbins + 1)])
profile_std /= np.sqrt(nr) # Deviation of the mean!
return [bin_centers, profile, profile_std]
else:
return [bin_centers, profile] | f5b5e5b4b21af71c50f0a0a9947d3a5cc203bdf0 | 2,240 |
from typing import OrderedDict
def get_setindices(header, setnames):
"""From header like ---ID, coverage, set1_q-value set2_q-value---
this returns indices for different sets {'q-value': {'set1': 2, 'set2: 3}}
"""
setindices = OrderedDict()
for index, field in enumerate(header):
for setname in setnames:
if field.startswith('{}_'.format(setname)):
fieldname = field[len(setname) + 1:]
try:
setindices[fieldname][setname] = index
except KeyError:
setindices[fieldname] = {setname: index}
return setindices | 1bdbda0528098a55438b4cb24ca22358fae7e682 | 2,241 |
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
Example
-------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
if groups.lastindex != 2:
raise ValueError("Could not evaluate %s" % freqstr)
stride = groups.group(1)
if len(stride):
stride = int(stride)
else:
stride = 1
base = groups.group(2)
return (base, stride) | a562b0a49f2e5a1d4f49119a424dbfd1588d9c97 | 2,242 |
def generate_label(input_x,threshold):
"""
generate label with input
:param input_x: shape of [batch_size, sequence_length]
:return: y:[batch_size]
"""
batch_size,sequence_length=input_x.shape
y=np.zeros((batch_size,2))
for i in range(batch_size):
input_single=input_x[i]
sum=np.sum(input_single)
if i == 0:print("sum:",sum,";threshold:",threshold)
y_single=1 if sum>threshold else 0
if y_single==1:
y[i]=[0,1]
else: # y_single=0
y[i]=[1,0]
return y | 0091fb0d4c92884af1cf07d8afd248e2afeb92b2 | 2,243 |
import io
def sf_imread(
img_path,
plot=True,
):
"""
Thin wrapper around `skimage.io.imread` that rotates the image if it is
to be used for plotting, but does not if it is to be used for measurements.
Parameters
----------
img_path : str
Path to image
plot : bool
Determines whether or not image will be rotated 90 degrees
Returns
-------
np.array
"""
img_in = io.imread(img_path)
if plot:
img_in = transform.rotate(img_in, -90) # show images going left-right
return img_in | 3eb17fcb5bee144f7c822cfa23d5057c5fecc109 | 2,244 |
def test_plugin_ws_url_attributes(spf, path, query, expected_url):
"""Note, this doesn't _really_ test websocket functionality very well."""
app = spf._app
test_plugin = TestPlugin()
async def handler(request):
return text('OK')
test_plugin.websocket(path)(handler)
spf.register_plugin(test_plugin)
test_client = app.test_client
request, response = test_client.get(path + '?{}'.format(query))
try:
# Sanic 20.3.0 and above
p = test_client.port
except AttributeError:
p = testing.PORT or 0
assert request.url == expected_url.format(testing.HOST, str(p))
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host | f6e1f28f1df1e712ab399db48c8a4e0058d11d11 | 2,245 |
def less_than(x, y, force_cpu=None, cond=None, name=None):
"""
${comment}
Args:
x(Tensor): ${x_comment}.
y(Tensor): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Tensor, optional): Optional output which can be any created Tensor
that meets the requirements to store the result of *less_than*.
if cond is None, a new Tensor will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_comment}.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
result = paddle.less_than(x, y)
print(result) # [True, False, False, False]
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"less_than")
if cond is not None:
check_type(cond, "cond", Variable, "less_than")
if force_cpu != None:
check_type(force_cpu, "force_cpu", bool, "less_than")
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_cpu is not None:
attrs['force_cpu'] = force_cpu
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond | 676cdc38b83c2155bbf166f01f50554c1751585e | 2,246 |
import re
def get_page_namespace(url_response):
"""
:type element: Tag
:rtype: int
"""
keyword = '"wgNamespaceNumber"'
text = url_response
if keyword in text:
beginning = text[text.find(keyword) + len(keyword):]
ending = beginning[:beginning.find(',')]
ints = re.findall('\d+', ending)
if len(ints) > 0:
return int(ints[0]) | f4e61d4a927401995f2435a94170ca691ff9119e | 2,247 |
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)] | a39bf967110b802063a37260147c98ecdf8925bb | 2,248 |
def get_site_camera_data(site_no):
"""An orchestration method that fetches camera data and returns the site dictionary"""
json_raw = get_json_camera_data()
camera = json_raw_to_dictionary(json_raw)
return find_site_in_cameras(site_no, camera) | c9d9febebe8c80dd9de18ece8085853224140d3b | 2,249 |
import argparse
def get_args():
"""
Get arguments to the tool with argparse
:return: The arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("filename", action='store',
help='.xyz file(s) with optimised geometries from which to make .top and .gro files', nargs="+")
parser.add_argument('-id', type=str, default='AAA',
help="Three letter name of the residue/molecule e.g LYS")
parser.add_argument('-c', type=int, default=0,
help="Charge of the molecule Default: %(default)s")
parser.add_argument('-m', type=int, default=1,
help="Multiplicity of the molecule. Default: %(default)s")
parser.add_argument('-notrash', action='store_true', default=False,
help="Don't trash all the output files. Only .gro and .top will be left by default")
return parser.parse_args() | 0085922ad21776521bda2b2ce204983dd7181b89 | 2,250 |
def find_contam(df, contaminant_prevalence=0.5, use_mad_filter=False):
"""Flag taxa that occur in too many samples."""
taxa_counts = {}
for taxa in df['taxa_name']:
taxa_counts[taxa] = 1 + taxa_counts.get(taxa, 0)
thresh = max(2, contaminant_prevalence * len(set(df['sample_name'])))
contaminants = {taxa for taxa, count in taxa_counts.items() if count >= thresh}
if not use_mad_filter or df.shape[0] <= 2:
return df[~df['taxa_name'].isin(contaminants)]
return median_filter(df, contaminants) | 6f5976d97d7585d0dff60b90b2d6e5d6e22b6353 | 2,251 |
def has_level_or_node(level: int, *auth_nodes: str) -> Rule:
"""
:param level: 需要群组权限等级
:param auth_nodes: 需要的权限节点
:return: 群组权限等级大于要求等级或者具备权限节点, 权限节点为deny则拒绝
"""
async def _has_level_or_node(bot: Bot, event: Event, state: T_State) -> bool:
auth_node = '.'.join(auth_nodes)
detail_type = event.dict().get(f'{event.get_type()}_type')
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
# level检查部分
if detail_type != 'group':
level_checker = False
else:
level_res = await DBGroup(group_id=group_id).permission_level()
if level_res.result >= level:
level_checker = True
else:
level_checker = False
# node检查部分
if detail_type == 'private':
user_auth = DBAuth(auth_id=user_id, auth_type='user', auth_node=auth_node)
user_tag_res = await user_auth.tags_info()
allow_tag = user_tag_res.result[0]
deny_tag = user_tag_res.result[1]
elif detail_type == 'group':
group_auth = DBAuth(auth_id=group_id, auth_type='group', auth_node=auth_node)
group_tag_res = await group_auth.tags_info()
allow_tag = group_tag_res.result[0]
deny_tag = group_tag_res.result[1]
else:
allow_tag = 0
deny_tag = 0
if allow_tag == 1 and deny_tag == 0:
return True
elif allow_tag == -2 and deny_tag == -2:
return level_checker
else:
return False
return Rule(_has_level_or_node) | a755b959eeb93caf113c157aa3551c45c1644216 | 2,252 |
import time
import random
import copy
def solve_problem(problem, max_iter_num=MAX_ITER_NUM, max_iter_num_without_adding=MAX_ITER_NUM_WITHOUT_ADDITIONS, iter_num_to_revert_removal=ITER_NUM_TO_REVERT_REMOVAL, remove_prob=ITEM_REMOVAL_PROBABILITY, consec_remove_prob=CONSECUTIVE_ITEM_REMOVAL_PROBABILITY, ignore_removed_item_prob=IGNORE_REMOVED_ITEM_PROBABILITY, modify_prob=PLACEMENT_MODIFICATION_PROBABILITY, calculate_times=False, return_value_evolution=False):
"""Find and return a solution to the passed problem, using an reversible strategy"""
# create an initial solution with no item placed in the container
solution = Solution(problem)
# determine the bounds of the container
min_x, min_y, max_x, max_y = get_bounds(problem.container.shape)
start_time = 0
sort_time = 0
item_discarding_time = 0
item_selection_time = 0
addition_time = 0
removal_time = 0
modification_time = 0
value_evolution_time = 0
if calculate_times:
start_time = time.time()
if return_value_evolution:
value_evolution = list()
else:
value_evolution = None
if calculate_times:
value_evolution_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# sort items by weight, to speed up their discarding (when they would cause the capacity to be exceeded)
items_by_weight = sorted(list(problem.items.items()), key=lambda index_item_tuple: index_item_tuple[1].weight)
if calculate_times:
sort_time += get_time_since(start_time)
iter_count_since_addition = 0
iter_count_since_removal = 0
solution_before_removal = None
if calculate_times:
start_time = time.time()
# discard the items that would make the capacity of the container to be exceeded
items_by_weight = items_by_weight[:get_index_after_weight_limit(items_by_weight, problem.container.max_weight)]
ignored_item_index = -1
if calculate_times:
item_discarding_time += get_time_since(start_time)
# placements can only be possible with capacity and valid items
if problem.container.max_weight and items_by_weight:
# try to add items to the container, for a maximum number of iterations
for i in range(max_iter_num):
if calculate_times:
start_time = time.time()
# perform a random choice of the next item to try to place
list_index, item_index = select_item(items_by_weight)
if calculate_times:
item_selection_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# try to add the item in a random position and with a random rotation; if it is valid, remove the item from the pending list
if solution.add_item(item_index, (random.uniform(min_x, max_x), random.uniform(min_y, max_y)), random.uniform(0, 360)):
if calculate_times:
addition_time += get_time_since(start_time)
# find the weight that can still be added
remaining_weight = problem.container.max_weight - solution.weight
# stop early if the capacity has been exactly reached
if not remaining_weight:
break
# remove the placed item from the list of pending items
items_by_weight.pop(list_index)
if calculate_times:
start_time = time.time()
# discard the items that would make the capacity of the container to be exceeded
items_by_weight = items_by_weight[:get_index_after_weight_limit(items_by_weight, remaining_weight)]
if calculate_times:
item_discarding_time += get_time_since(start_time)
# stop early if it is not possible to place more items, because all have been placed or all the items outside would cause the capacity to be exceeded
if not items_by_weight:
break
# reset the potential convergence counter, since an item has been added
iter_count_since_addition = 0
else:
if calculate_times:
addition_time += get_time_since(start_time)
# register the fact of being unable to place an item this iteration
iter_count_since_addition += 1
# stop early if there have been too many iterations without changes
if iter_count_since_addition == max_iter_num_without_adding:
break
if calculate_times:
start_time = time.time()
# if there are items in the container, try to remove an item with a certain probability (different if there was a recent removal)
if solution.weight > 0 and random.uniform(0., 1.) < (consec_remove_prob if solution_before_removal else remove_prob):
# if there is no solution prior to a removal with pending re-examination
if not solution_before_removal:
# save the current solution before removing, just in case in needs to be restored later
solution_before_removal = copy.deepcopy(solution)
# reset the counter of iterations since removal, to avoid reverting earlier than needed
iter_count_since_removal = 0
# get the index of the removed item, which is randomly chosen
removed_index = solution.remove_random_item()
# with a certain probability, only if not ignoring any item yet, ignore placing again the removed item until the operation gets reverted or permanently accepted
if ignored_item_index < 0 and items_by_weight and random.uniform(0., 1.) < ignore_removed_item_prob:
ignored_item_index = removed_index
# otherwise, add the removed item to the weight-sorted list of pending-to-add items
else:
items_by_weight.insert(get_index_after_weight_limit(items_by_weight, problem.items[removed_index].weight), (removed_index, problem.items[removed_index]))
# if there is a recent removal to be confirmed or discarded after some time
if solution_before_removal:
# re-examine a removal after a certain number of iterations
if iter_count_since_removal == iter_num_to_revert_removal:
# if the value in the container has improved since removal, accept the operation in a definitive way
if solution.value > solution_before_removal.value:
# if an item had been ignored, make it available for placement again
if ignored_item_index >= 0:
items_by_weight.insert(get_index_after_weight_limit(items_by_weight, problem.items[ignored_item_index].weight), (ignored_item_index, problem.items[ignored_item_index]))
# otherwise, revert the solution to the pre-removal state
else:
solution = solution_before_removal
# after reverting a removal, have some margin to try to add items
iter_count_since_addition = 0
# reset removal data
solution_before_removal = None
iter_count_since_removal = 0
ignored_item_index = -1
# the check will be done after more iterations
else:
iter_count_since_removal += 1
if calculate_times:
removal_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# if there are still items in the container (maybe there was a removal), modify existing placements with a certain probability
if solution.weight > 0 and random.uniform(0., 1.) < modify_prob:
# perform a random choice of the item to try to affect
_, item_index = select_item(items_by_weight)
# move to a random position of the container with a probability of 50%
if random.uniform(0., 1.) < 0.5:
solution.move_item_to(item_index, (random.uniform(min_x, max_x), random.uniform(min_y, max_y)))
# otherwise, perform a random rotation
else:
solution.rotate_item_to(item_index, random.uniform(0, 360))
if calculate_times:
modification_time += get_time_since(start_time)
if return_value_evolution:
if calculate_times:
start_time = time.time()
value_evolution.append(solution.value)
if calculate_times:
value_evolution_time += get_time_since(start_time)
# in the end, revert the last unconfirmed removal if it did not improve the container's value
if solution_before_removal and solution.value < solution_before_removal.value:
solution = solution_before_removal
if return_value_evolution:
if calculate_times:
start_time = time.time()
value_evolution[-1] = solution.value
if calculate_times:
value_evolution_time += get_time_since(start_time)
# encapsulate all times informatively in a dictionary
if calculate_times:
approx_total_time = sort_time + item_selection_time + item_discarding_time + addition_time + removal_time + modification_time + value_evolution_time
time_dict = {"Weight-sort": (sort_time, sort_time / approx_total_time), "Stochastic item selection": (item_selection_time, item_selection_time / approx_total_time), "Item discarding": (item_discarding_time, item_discarding_time / approx_total_time), "Addition (with geometric validation)": (addition_time, addition_time / approx_total_time), "Removal and reverting-removal": (removal_time, removal_time / approx_total_time), "Placement modification (with geometric validation)": (modification_time, modification_time / approx_total_time), "Keeping value of each iteration": (value_evolution_time, value_evolution_time / approx_total_time)}
if return_value_evolution:
return solution, time_dict, value_evolution
return solution, time_dict
if return_value_evolution:
return solution, value_evolution
return solution | b240e0129e35c4066ec46d9dde68b012a821e319 | 2,253 |
import PIL
def _pil_apply_edit_steps_mask(image, mask, edit_steps, inplace=False):
"""
Apply edit steps from unmasking method on a PIL image.
Args:
image (PIL.Image): The input image.
mask (Union[int, tuple[int, int, int], PIL.Image]): The mask to apply on the image, could be a single grey
scale intensity [0, 255], an RBG tuple or a PIL Image.
edit_steps (list[EditStep]): Edit steps to be drawn.
inplace (bool): True to draw on the input image, otherwise draw on a cloned image.
Returns:
PIL.Image, the result image.
"""
if not inplace:
image = image.copy()
if isinstance(mask, PIL.Image.Image):
for step in edit_steps:
box = step.to_coord_box()
cropped = mask.crop(box)
image.paste(cropped, box=box)
else:
if isinstance(mask, int):
mask = (mask, mask, mask)
draw = ImageDraw.Draw(image)
for step in edit_steps:
draw.rectangle(step.to_coord_box(), fill=mask)
return image | c2bf05c282039ab5ff7eebefd8d9b3b635e9f74c | 2,254 |
def do_let_form(expressions, env):
"""Evaluate a let form."""
check_form(expressions, 2)
let_env = make_let_frame(expressions.first, env)
return eval_all(expressions.second, let_env) | e291880a21c99fcc05d8203dfd73dccc9084a72b | 2,255 |
def bubble(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a bubble plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'size':np.random.randint(1,100,n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories}) | 77ba74f9bf6c09c49db8c4faa5934bd502995a5b | 2,256 |
def get_coherence(model, token_lists, measure='c_v'):
"""
Get model coherence from gensim.models.coherencemodel
:param model: Topic_Model object
:param token_lists: token lists of docs
:param topics: topics as top words
:param measure: coherence metrics
:return: coherence score
"""
if model.method == 'LDA':
cm = CoherenceModel(model=model.ldamodel, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
else:
topics = get_topic_words(token_lists, model.cluster_model.labels_)
cm = CoherenceModel(topics=topics, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
return cm.get_coherence() | 1e8632eb901fc5219a4070d8e0b8e390612f7338 | 2,257 |
def run_vscode_command(
command_id: str,
*args: str,
wait_for_finish: bool = False,
return_command_output: bool = False,
):
"""Runs a VSCode command, using command server if available
Args:
command_id (str): The ID of the VSCode command to run
wait_for_finish (bool, optional): Whether to wait for the command to finish before returning. Defaults to False.
return_command_output (bool, optional): Whether to return the output of the command. Defaults to False.
Raises:
Exception: If there is an issue with the file-based communication, or
VSCode raises an exception
Returns:
Object: The response from the command, if requested.
"""
# NB: This is a hack to work around the fact that talon doesn't support
# variable argument lists
args = [x for x in args if x is not NotSet]
communication_dir_path = get_communication_dir_path()
if not communication_dir_path.exists():
if args or return_command_output:
raise Exception("Must use command-server extension for advanced commands")
print("Communication dir not found; falling back to command palette")
run_vscode_command_by_command_palette(command_id)
return
request_path = communication_dir_path / "request.json"
response_path = communication_dir_path / "response.json"
# Generate uuid that will be mirrored back to us by command server for
# sanity checking
uuid = str(uuid4())
request = Request(
command_id=command_id,
args=args,
wait_for_finish=wait_for_finish,
return_command_output=return_command_output,
uuid=uuid,
)
# First, write the request to the request file, which makes us the sole
# owner because all other processes will try to open it with 'x'
write_request(request, request_path)
# We clear the response file if it does exist, though it shouldn't
if response_path.exists():
print("WARNING: Found old response file")
robust_unlink(response_path)
# Then, perform keystroke telling VSCode to execute the command in the
# request file. Because only the active VSCode instance will accept
# keypresses, we can be sure that the active VSCode instance will be the
# one to execute the command.
actions.user.trigger_command_server_command_execution()
try:
decoded_contents = read_json_with_timeout(response_path)
finally:
# NB: We remove response file first because we want to do this while we
# still own the request file
robust_unlink(response_path)
robust_unlink(request_path)
if decoded_contents["uuid"] != uuid:
raise Exception("uuids did not match")
for warning in decoded_contents["warnings"]:
print(f"WARNING: {warning}")
if decoded_contents["error"] is not None:
raise Exception(decoded_contents["error"])
actions.sleep("25ms")
return decoded_contents["returnValue"] | 0bf1cfed5d2e02cf618ba8c7c9347d0408ef0ee3 | 2,258 |
import os
def clc_prepare(reference, outdir, source):
"""
create a CLC subset resampled to a reference image.
Parameters
----------
reference: str
the reference file with the target CRS and extent
outdir: str
the directory to write the new file to;
new files are named clc{index}.tif, e.g. clc1.tif.
source: str
the original product to be subsetted
Returns
-------
str
the name of the file written to `outdir`
"""
with Raster(reference) as ras:
xRes, yRes = ras.res
epsg = ras.epsg
ext = ras.extent
#########################################################################
warp_opts = {'options': ['-q'], 'format': 'GTiff', 'multithread': True,
'dstNodata': -99, 'resampleAlg': 'mode'}
if not os.path.isdir(outdir):
os.makedirs(outdir)
clc_subs = finder(outdir, ['clc[0-9].tif'], regex=True)
match = False
if len(clc_subs) > 0:
for j, sub in enumerate(clc_subs):
with Raster(sub) as ras:
if ras.extent == ext:
clc_sub = sub
match = True
if not match:
clc_sub = os.path.join(outdir, 'clc{}.tif'.format(len(clc_subs)))
print('creating', clc_sub)
warp_opts['dstSRS'] = 'EPSG:{}'.format(epsg)
warp_opts['xRes'] = xRes
warp_opts['yRes'] = yRes
warp_opts['outputBounds'] = (ext['xmin'], ext['ymin'],
ext['xmax'], ext['ymax'])
gdalwarp(src=source, dst=clc_sub, options=warp_opts)
return clc_sub | 0ebf57a5c11684e1575fb0eb3b69bde62d807431 | 2,259 |
from imucal.management import find_calibration_info_for_sensor # noqa: F401
from typing import Optional
from typing import Callable
from typing import List
from pathlib import Path
def find_calibrations_for_sensor(
sensor_id: str,
folder: Optional[path_t] = None,
recursive: bool = True,
filter_cal_type: Optional[str] = None,
custom_validator: Optional[Callable[["CalibrationInfo"], bool]] = None,
ignore_file_not_found: Optional[bool] = False,
) -> List[Path]:
"""Find possible calibration files based on the filename.
As this only checks the filenames, this might return false positives depending on your folder structure and naming.
Parameters
----------
sensor_id :
The for 4 letter/digit identifier of a sensor_type, as obtained from
:py:meth:`nilspodlib.header.Header.sensor_id`
folder :
Basepath of the folder to search. If None, tries to find a default calibration
recursive :
If the folder should be searched recursive or not.
filter_cal_type :
Whether only files obtain with a certain calibration type should be found.
This will look for the `CalType` inside the json file and could cause performance issues with many calibration
files.
If None, all found files will be returned.
For possible values, see the `imucal` library.
custom_validator :
A custom function that will be called with the CalibrationInfo object of each potential match.
This needs to load the json file of each match and could cause performance issues with many calibration files.
ignore_file_not_found :
If True this function will not raise an error, but rather return an empty list, if no
calibration files were found for the specific sensor_type.
Returns
-------
list_of_cals
List of paths pointing to available calibration objects.
"""
if not folder:
folder = _check_ref_cal_folder()
return find_calibration_info_for_sensor(
sensor_id=sensor_id,
folder=folder,
recursive=recursive,
filter_cal_type=filter_cal_type,
custom_validator=custom_validator,
ignore_file_not_found=ignore_file_not_found,
) | b28a6deb1348fd86c93ae6fd0b292626a99c2149 | 2,260 |
def reward_displacement(navenv):
""" Reward = distance to previous position"""
r = dist(navenv.current_pos, navenv.old_pos)
return r | f2d9f5bf78a93967c6e74a4fba0e25109fa1fb3b | 2,261 |
import uuid
def MakeLinuxFirmware(save=True, **kwargs):
"""Create and return a LinuxFirmware for test."""
defaults = {
'manufacturer': 'Lonovo',
'serial': 'blah',
'password': '123456789',
'machine_uuid': str(uuid.uuid4()).upper(),
'owner': 'someone',
'asset_tags': ['12345'],
'hostname': 'zerocool.example.com',
}
defaults.update(kwargs)
entity = firmware.LinuxFirmwarePassword(**defaults)
if save:
entity.put()
return entity | 2b4381035ae55ffc5996d06e5a9455c7ca148a85 | 2,262 |
def get_unity_filesystem_parameters():
"""This method provide parameters required for the ansible filesystem
module on Unity"""
return dict(
filesystem_name=dict(required=False, type='str'),
filesystem_id=dict(required=False, type='str'),
nas_server_name=dict(required=False, type='str'),
nas_server_id=dict(required=False, type='str'),
description=dict(required=False, type='str'),
pool_name=dict(required=False, type='str'),
pool_id=dict(required=False, type='str'),
size=dict(required=False, type='int'),
cap_unit=dict(required=False, type='str', choices=['GB', 'TB']),
is_thin=dict(required=False, type='bool'),
data_reduction=dict(required=False, type='bool'),
supported_protocols=dict(required=False, type='str',
choices=['NFS', 'CIFS', 'MULTIPROTOCOL']),
smb_properties=dict(type='dict', options=dict(
is_smb_sync_writes_enabled=dict(type='bool'),
is_smb_notify_on_access_enabled=dict(type='bool'),
is_smb_op_locks_enabled=dict(type='bool'),
is_smb_notify_on_write_enabled=dict(type='bool'),
smb_notify_on_change_dir_depth=dict(type='int')
)),
access_policy=dict(required=False, type='str',
choices=['NATIVE', 'UNIX', 'WINDOWS']),
locking_policy=dict(required=False, type='str',
choices=['ADVISORY', 'MANDATORY']),
tiering_policy=dict(required=False, type='str', choices=[
'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
snap_schedule_name=dict(required=False, type='str'),
snap_schedule_id=dict(required=False, type='str'),
quota_config=dict(required=False, type='dict', options=dict(
grace_period=dict(required=False, type='int'),
grace_period_unit=dict(required=False, type='str', choices=['minutes', 'hours', 'days']),
default_hard_limit=dict(required=False, type='int'),
default_soft_limit=dict(required=False, type='int'),
is_user_quota_enabled=dict(required=False, type='bool'),
quota_policy=dict(required=False, type='str', choices=['FILE_SIZE', 'BLOCKS']),
cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
), mutually_exclusive=[['is_user_quota_enabled', 'quota_policy']]),
state=dict(required=True, type='str', choices=['present', 'absent'])
) | 2cbbe284a8345341abf80948d659c2f2625b6e8f | 2,263 |
def get_time_difference(row, start_col, end_col, start_format, end_format, unit='days'):
"""
Returns a Series object of days
Unit can be D for Days, or Y for Years
"""
start_date = row[start_col]
end_date = row[end_col]
if pd.isnull(start_date) or pd.isnull(end_date):
return np.nan
else:
time_delta = get_time_delta(start_date, end_date, start_format, end_format)
if unit == 'days':
return time_delta.days
elif unit == 'years':
return float(time_delta.days)/365 | a73fea6bebc777ec8ff4ff89118e940f8dfdfcf1 | 2,264 |
def get_account(account_name, password):
"""Displays account data from the wallet.
--- Definitions ---
{"name": "account_name", "prompt": "Alias of account", "default": "Myaccount"}
{"name": "password", "prompt": "Password to decrypt private key", "default": "Mypassword"}
"""
db = get_wallet_db()
account = db.execute(
'SELECT * FROM testaccount WHERE name = ?', (account_name,)
).fetchone()
if account is None:
return None
private_key = Account.decrypt(account["privatekey"], password)
acc = Account.from_key(private_key)
return acc | 09791696c4a6c3ddfc0ceae20e50abf7f15893f5 | 2,265 |
def invert(img):
"""
Function to invert colors of an image
"""
r, g, b, a = colorsys_getRGBA(img) # Get r, g, b, a
r, g, b = 255 - r, 255 - g, 255 - b # Invert all colors
img_arr = np.dstack((r, g, b, a))
return img_arr | 013d708fb434b450404879346f25ad0d7088e3cf | 2,266 |
def spearman_kendall_test(df, item, alpha=0.05, increasing=True,
rank_in='Rank',
category_in='category',
dataset_in='dataset',
userid_in='userid'
):
"""
Do spearman's and kendall's test for the increasing or decreasing trend.
:param df: dataframe, it should include both column 'item' and column 'ranking'
:param item: string, column of target's label
:param rank_in:string, column of rank's label
:param category_in: string, column of category's label
:param userid_in: string, column of userid's label
:param dataset_in: string, column of dataset's label
:param alpha: significant level
:param increasing: bool, test for increasing trend or decreasing trend
:return: dataframe filled in all test results
"""
category = sorted(list(set(df[category_in].tolist())))
dataset = sorted(list(set(df[dataset_in].tolist())))
test_result = []
for ds in dataset:
for cat in category:
count_sm, count_kd = 0, 0
df_temp = df[(df[dataset_in] == ds) & (df[category_in] == cat)]
ur_ds = df_temp[userid_in].unique().tolist()
for user in ur_ds:
rank = df_temp[df_temp[userid_in] == user][rank_in].tolist()
item_specify = df_temp[df_temp[userid_in] == user][item].tolist()
coef_sm, p_sm = spearmanr(rank, item_specify)
coef_kd, p_kd = kendalltau(rank, item_specify)
if increasing:
if (coef_sm > 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd > 0) & (p_kd < alpha):
count_kd += 1
else:
if (coef_sm < 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd < 0) & (p_kd < alpha):
count_kd += 1
test_result.append([ds, cat,
count_sm, count_sm / len(ur_ds),
count_kd, count_kd / len(ur_ds),
len(ur_ds)]
)
stats_test = pd.DataFrame(test_result, columns=[dataset_in,
category_in,
'SpN', 'SpP', 'Kn', 'Kp',
'total']
).sort_values([dataset_in, category_in])
return stats_test | d8c85f20866a68a7070be89c57cdb18d2b33c828 | 2,267 |
def array2string(array, _depth=0):
"""
Recursively create a initializer list style string from an iterable with
multiple dimensions.
Args:
array (iterable): input iterable which is expected to have elements that
can be converted to strings with `str()`.
_depth (int): variable tracking the current recursion depth
"""
if hasattr(array, 'name'):
return array.name
elif not hasattr(array, '__len__'):
return float_nsf(array)
else:
string = ''
array_len = len(array)
for i in range(array_len):
string += array2string(array[i], _depth=_depth + 1) + ', '
if (array_len > 1) or (_depth == 0) :
return '{' + string[0:-2] + '}'
else:
return string[0:-2] | 948bfe9e63f16c588001707125afe8d2867ff6b6 | 2,268 |
def rail_help_wrapper(prog):
""" So formatter_class's max_help_position can be changed. """
return RailHelpFormatter(prog, max_help_position=40) | aa821f68ea1587a051be59f52187dbf9b1dd2d91 | 2,269 |
def project_dashboard(request):
"""
The function calling Project Dashboard page.
:param request:
:return:
"""
global all_vuln, \
total_web, \
all_high, \
total_network, \
all_medium, \
all_low, \
all_web_high, \
all_web_medium, \
all_network_medium, \
all_web_low, \
all_network_low, \
all_network_high
all_project = project_db.objects.all()
return render(request,
'project_dashboard.html',
{'all_project': all_project}) | 0459776f846a8f089fefd528605d2e850aeced5c | 2,270 |
def getSelfRole(store):
"""
Retrieve the Role which corresponds to the user to whom the given store
belongs.
"""
return getAccountRole(store, userbase.getAccountNames(store)) | d548ba6406ec92df7777498574f86cf44737ba8b | 2,271 |
def forwardCOMDQ(robot, m = 0, symbolic = False):
"""
Using Dual Quaternions, this function computes forward kinematics to m - th center of mass, given joints positions in radians. Robot's kinematic parameters have to be set before using this function
robot: object (robot.jointsPositions, robot.linksLengths)
m: int
"""
framesDQ, fkDQ = forwardDQ(robot, m = m, symbolic = symbolic)
# Initial conditions
framesCOMDQ = [np.array([[1], [0], [0], [0], [0], [0], [0], [0]]) if not symbolic else Matrix([1, 0, 0, 0, 0, 0, 0, 0])]
# Gets Denavit - Hartenberg Matrix
if not symbolic:
if not robot.dhParametersCOM:
comDH = dh.centersOfMass(robot)
else:
comDH = np.array(robot.dhParameters([float(q) for q in robot.jointsPositions], [float(Lcom) for Lcom in robot.centersOfMass]))
else:
comDH = robot.symbolicDHParametersCOM
i = 1
for frame in comDH[1 : , :]:
if i > m:
break
else:
if not symbolic:
# Center of Mass Homogeneous Transformation Matrix
COM = dq.leftOperator(dq.Rz(frame[0])).dot(dq.rightOperator(dq.Rx(frame[3]))).dot(dq.rightOperator(dq.Tx(frame[2]))).dot(dq.Tz(frame[1]))
# Rigid body's Dual Quaternion
B = dq.leftOperator(dq.conjugate(framesDQ[i - 1])).dot(framesDQ[i])
# Forward kinematics to Center of Mass
fkCOMDQ = dq.leftOperator(framesDQ[i]).dot(dq.rightOperator(COM)).dot(dq.conjugate(B))
else:
# Center of Mass Homogeneous Transformation Matrix
COM = dq.symbolicLeftOperator(dq.symbolicRz(frame[0])) * dq.symbolicRightOperator(dq.symbolicRx(frame[3])) * dq.symbolicRightOperator(dq.symbolicTx(frame[2])) * dq.symbolicTz(frame[1])
# Rigid body's Dual Quaternion
B = dq.symbolicLeftOperator(dq.symbolicConjugate(framesDQ[i - 1])) * framesDQ[i]
# Forward kinematics to Center of Mass
fkCOMDQ = nsimplify(simplify(dq.symbolicLeftOperator(framesDQ[i]) * dq.symbolicRightOperator(COM) * dq.symbolicConjugate(B)), tolerance = 1e-10, rational = False)
framesCOMDQ.append(fkCOMDQ)
i += 1
return framesCOMDQ, fkCOMDQ | e197839d98cdbb8b15449d33c8677adc5f9d3e8a | 2,272 |
def gen_mode():
"""获取玩家想要考试的模式"""
while True:
mode = input("如何考试?\n输入1顺序考试\n输入2乱序考试\n>>")
if mode in ("1", "2"):
return mode
else:
print()
print("非法输入,请输入\"1\"或\"2\"")
print("你不需要输入双引号")
print("--------------------------------") | eb3ff4a0812fe088f3acb1302730f5f48c6fbcda | 2,273 |
def find_movers(threshold, timeframe: Timeframe, increasing=True, decreasing=False, max_price=None):
"""
Return a dataframe with row index set to ASX ticker symbols and the only column set to
the sum over all desired dates for percentage change in the stock price. A negative sum
implies a decrease, positive an increase in price over the observation period.
"""
assert threshold >= 0.0
# NB: missing values will be imputed here, for now.
cip = company_prices(all_stocks(), timeframe, fields="change_in_percent", missing_cb=None)
movements = cip.sum(axis=0)
results = movements[movements.abs() >= threshold]
print("Found {} movers before filtering: {} {}".format(len(results), increasing, decreasing))
if not increasing:
results = results.drop(results[results > 0.0].index)
if not decreasing:
results = results.drop(results[results < 0.0].index)
#print(results)
if max_price is not None:
ymd = latest_quotation_date('ANZ')
stocks_lte_max_price = [q.asx_code for q in valid_quotes_only(ymd) if q.last_price <= max_price]
results = results.filter(stocks_lte_max_price)
print("Reporting {} movers after filtering".format(len(results)))
return results | ff1524d74dfe76630fb45b24119f57c3289ba355 | 2,274 |
from bigdl.nano.deps.automl.hpo_api import create_optuna_pl_pruning_callback
def create_pl_pruning_callback(*args, **kwargs):
"""Create PyTorchLightning Pruning Callback. Optuna Only."""
return create_optuna_pl_pruning_callback(*args, **kwargs) | 0698e800ed110d430422b7a3fcc72e100ed87658 | 2,275 |
import requests
def get_all_token_volume_by_direction(chain:str, direction:str):
"""
chain: Allowed: ethereum ┃ avalanche ┃ bsc ┃ polygon ┃ arbitrum ┃ fantom ┃ harmony ┃ boba ┃ optimism ┃ moonriver ┃ aurora
direction: Allowed: in ┃ out
"""
chain = chain.lower()
direction = direction.lower()
chains = ["ethereum", "avalanche", "bsc", "polygon", "arbitrum", "fantom", "harmony", "boba", "optimism", "moonriver", "aurora"]
directions = ["in", "out"]
params_ok = False
if chain in chains and direction in directions:
params_ok = True
if params_ok:
endpoint = f"{server}/api/v1/analytics/volume/{chain}/{direction}"
data = requests.get(endpoint)
if data.status_code == 200:
return data.json()["data"]
else:
print("request failed")
else:
print("wrong parameters") | be3285e4e4ac65075a98aeb853e68b1b5f6658cb | 2,276 |
def _interactively_fix_missing_variables(project, result):
"""Return True if we need to re-prepare."""
if project.problems:
return False
if not console_utils.stdin_is_interactive():
return False
# We don't ask the user to manually enter CONDA_PREFIX
# (CondaEnvRequirement) because it's a bizarre/confusing
# thing to ask.
can_ask_about = [status
for status in result.statuses
if (not status and isinstance(status.requirement, EnvVarRequirement) and not isinstance(
status.requirement, CondaEnvRequirement))]
if can_ask_about:
print("(Use Ctrl+C to quit.)")
start_over = False
values = dict()
for status in can_ask_about:
reply = console_utils.console_input("Value for " + status.requirement.env_var + ": ",
encrypted=status.requirement.encrypted)
if reply is None:
return False # EOF
reply = reply.strip()
if reply == '':
start_over = True
break
values[status.requirement.env_var] = reply
if len(values) > 0:
status = project_ops.set_variables(project, result.env_spec_name, values.items(), result)
if status:
return True
else:
console_utils.print_status_errors(status)
return False
else:
return start_over | 73393e453043132fc340be10411cb83569becf8c | 2,277 |
def build_node_descr(levels, switch=False):
"""
Produces a node description of the above binary trees
"""
num_parents = sum([2**i for i in range(levels-1)])
parents, children = tee(character_iterator(switch))
next(children)
node_descr = []
for parent_ident in islice(parents, num_parents):
node_descr.append((parent_ident, next(children), "L"))
node_descr.append((parent_ident, next(children), "R"))
return node_descr | c682cf5e5946b614563dda31ace76f259df02d47 | 2,278 |
import random
def random_sources(xSize, ySize, zSize, number):
""" returns a list of random positions in the grid where the sources of nutrients (blood vessels) will be """
src = []
for _ in range(number):
x = random.randint(0, xSize-1)
y = random.randint(0, ySize-1)
z = random.randint(0, zSize-1)
if (x, y, z) not in src:
src.append((x,y,z))
return src | 17dab43ea2468a11e3720ff0f7eb33b605371496 | 2,279 |
import numpy
def sqeuclidean(
x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: tensorflow array with ndim=2
:param y_mat: tensorflow array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
with _get_tf_device(device):
return tf.reduce_sum(
(tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2
).numpy() | 3265a062d5a0eece85a3c83e99ad2f9ab27c62cb | 2,280 |
from typing import Optional
def get(*, db_session, tag_id: int) -> Optional[Tag]:
"""Gets a tag by its id."""
return db_session.query(Tag).filter(Tag.id == tag_id).one_or_none() | 752da542eef22ebd977b27c922341690bac2f5ab | 2,281 |
def plot_lines(axes, xdata, ydata, yerrors=None, cdata=None, cmap=None, line_spec='-o', *args, **kwargs):
"""
Plot lines on given matplotlib axes subplot
Uses matplotlib.plot or matplotlib.errorbar if yerrors is not None
:param axes: matplotlib figure or subplot axes, None uses current axes
:param xdata: array[n] data on x axis
:param ydata: list[n] of array[n] data on y axis
:param yerrors: list[m] of array[n] errors on y axis (or None)
:param cdata: list[n] of values to define line colour
:param cmap: name of colormap to generate colour variation in lines
:param line_spec: str or list[m] of str matplotlib.plot line_spec
:param args: additional arguments
:param kwargs: additional arguments
:return: output of plt.plot [line], or plt.errorbar [line, xerrors, yerrors]
"""
if axes is None:
axes = plt.gca()
nplots = len(ydata)
if xdata is None:
xdata = [range(len(y)) for y in ydata]
elif len(xdata) != nplots:
xdata = [xdata] * nplots
if yerrors is None:
yerrors = [None] * nplots
elif len(yerrors) != nplots:
yerrors = [yerrors] * nplots
if cmap is None:
cmap = 'viridis'
if cdata is None:
cdata = np.arange(nplots)
else:
cdata = np.asarray(cdata)
cnorm = cdata - cdata.min()
cnorm = cnorm / cnorm.max()
cols = plt.get_cmap(cmap)(cnorm)
line_spec = fn.liststr(line_spec)
if len(line_spec) != nplots:
line_spec = line_spec * nplots
print(axes)
print(len(xdata), xdata)
print(len(ydata), ydata)
print(len(yerrors), yerrors)
print(len(line_spec), line_spec)
print(len(cols), cols)
lines = []
for n in range(nplots):
lines += plot_line(axes, xdata[n], ydata[n], yerrors[n], line_spec[n], c=cols[n], *args, **kwargs)
return lines | 5c618745ba503a206d594bbac9cbe831d1124625 | 2,282 |
import math
def conv_binary_prevent_overflow(array, structure):
"""
Make sure structure array has great enough positive bitdepth
to be convolved with binary primary array.
Parameters
----------
array : ndarray of bool or int, 2D
Primary integer array to convolve.
Must be a binary array of only zero/False and one/True.
structure : ndarray of bool or int, 2D
Secondary, smaller integer array to convolve with `array`.
Must be a binary array of only zero/False and one/True.
Returns
-------
structure : ndarray, possible uint cast of `structure`
Either the same `structure` array or a cast or `structure`
to a uint data type with more positive bitdepth than the
input array.
"""
# Get upper bound on minimum positive bitdepth for convolution.
conv_bitdepth_pos = math.log(np.prod(structure.shape)+1, 2)
dtype_bitdepths_pos = (1, 7, 8, 15, 16, 31, 32, 63, 64)
for b in dtype_bitdepths_pos:
if conv_bitdepth_pos <= b:
conv_bitdepth_pos = b
break
# Parse input array and structure data type for bitdepth.
input_bitdepth_pos = 0
for arr in (array, structure):
arr_dtype = arr.dtype
if arr.dtype == np.bool:
arr_posbits = 1
elif np.issubdtype(arr_dtype, np.int):
arr_posbits = int(str(arr.dtype).replace('int', '')) - 1
elif np.issubdtype(arr_dtype, np.uint):
arr_posbits = int(str(arr.dtype).replace('uint', ''))
elif np.issubdtype(arr_dtype, np.floating):
arr_posbits = np.inf
else:
arr_posbits = 0
input_bitdepth_pos = max(input_bitdepth_pos, arr_posbits)
if input_bitdepth_pos == 0:
# Handle unknown data type by casting structure to
# maximum possible bitdepth.
structure = structure.astype(np.uint64)
else:
# If maximum positive bitdepth from inputs is too low,
# cast structure to minimum positive bitdepth for conovlution.
if input_bitdepth_pos < conv_bitdepth_pos:
if (conv_bitdepth_pos % 2) != 0:
conv_bitdepth_pos += 1
structure = structure.astype(eval('np.uint{}'.format(conv_bitdepth_pos)))
return structure | dd82382c1109e2ce9d15bf0abd563f32b8e8b585 | 2,283 |
def filter_freq_and_csq(mt: hl.MatrixTable, data_type: str, max_freq: float, least_consequence: str):
"""
Filters MatrixTable to include variants that:
1. Have a global AF <= `max_freq`
2. Have a consequence at least as severe as `least_consequence` (based on ordering from CSQ_ORDER)
:param MatrixTable mt: Input MT
:param str data_type: One of 'exomes' or 'genomes'
:param float max_freq: Max. AF to keep
:param str least_consequence: Least consequence to keep.
:return: Filtered MT
:rtype: MatrixTable
"""
vep_ht = hl.read_table(annotations_ht_path(data_type, 'vep'))
freq = hl.read_table(annotations_ht_path(data_type, 'frequencies'))
mt = mt.select_rows(
vep=vep_genes_expr(vep_ht[mt.row_key].vep, least_consequence),
af=hl.float32(freq[mt.row_key].freq[0].AF)
)
mt = mt.filter_rows(hl.is_defined(mt.vep) & (hl.len(mt.vep) > 0) & (mt.af > 0) & (mt.af <= max_freq))
mt = mt.explode_rows(mt.vep)
mt = mt.rename({'vep': 'gene_id'})
return mt | a20796ccb6fc4db1f93c91f07f5756f390099bee | 2,284 |
def get_regular_intervals(
pre_sfes: list,
post_sfes: list,
pre_keep_flag: bool,
post_keep_flag: bool,
) -> list:
"""
Calculates the intervals for the "regular" egg laying epoch. If pre_keep_flag,
the "regular" epoch is the pre-breakpoint region. If post_keep_flag, the
"regular" epoch is the post-breakpoint region. If both flags are True,
the whole egg-laying trajectory is considered "regular".
Args:
pre_sfes (list): list of pre region SFES
post_sfes (list): list of post region SFES
pre_keep_flag (bool): True if the pre region intervals are considered regular
post_keep_flag (bool): True if the post region intervals are considered regular
Returns:
A list of intervals considered regular
"""
reg_intervals = []
if pre_keep_flag:
pre_sfes_sec = [(x * 60 * 60) for x in pre_sfes]
pre_intervals = np.diff(pre_sfes_sec, n=1)
pre_intervals = normalize_tiny_intervals(pre_intervals)
reg_intervals.extend(pre_intervals)
if post_keep_flag:
post_sfes_sec = [(x * 60 * 60) for x in post_sfes]
post_intervals = np.diff(post_sfes_sec, n=1)
post_intervals = normalize_tiny_intervals(post_intervals)
reg_intervals.extend(post_intervals)
return reg_intervals | 7d92006fc286e3b8c2977c023070388581f192fa | 2,285 |
async def async_setup(hass, config):
"""Platform setup, do nothing."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=dict(config[DOMAIN])
)
)
return True | be7c81bc7c91251c1c02696d8daa7eaac1f1d326 | 2,286 |
def get_new_bucket(target=None, name=None, headers=None):
"""
Get a bucket that exists and is empty.
Always recreates a bucket from scratch. This is useful to also
reset ACLs and such.
"""
if target is None:
target = targets.main.default
connection = target.connection
if name is None:
name = get_new_bucket_name()
# the only way for this to fail with a pre-existing bucket is if
# someone raced us between setup nuke_prefixed_buckets and here;
# ignore that as astronomically unlikely
bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers)
return bucket | d46156ee36304b6a13ebd2d467373c7873b8b075 | 2,287 |
def angle_rms(ang, axis=None, period=2*np.pi):
"""returns the rms of angles, uses the property that rms(x)**2 = mean(x)**2 + std(x)**2"""
#rms(x)**2 = mean(x)**2 + std(x)**2
#sqrt(E[X**2]) = E[X]**2 + sqrt(E[(X - E[X])**2])
m,s = angle_mean_std(ang,axis,period)
return np.hypot(m, s) | 5b2c8fc865762b7856fc6a8c68e901dbf367690d | 2,288 |
import functools
def check_units(*units_by_pos, **units_by_name):
"""Create a decorator to check units of function arguments."""
def dec(func):
# Match the signature of the function to the arguments given to the decorator
sig = signature(func)
bound_units = sig.bind_partial(*units_by_pos, **units_by_name)
# Convert our specified dimensionality (e.g. "[pressure]") to one used by
# pint directly (e.g. "[mass] / [length] / [time]**2). This is for both efficiency
# reasons and to ensure that problems with the decorator are caught at import,
# rather than runtime.
dims = {name: (orig, units.get_dimensionality(orig.replace('dimensionless', '')))
for name, orig in bound_units.arguments.items()}
defaults = {name: sig.parameters[name].default for name in sig.parameters
if sig.parameters[name].default is not Parameter.empty}
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Match all passed in value to their proper arguments so we can check units
bound_args = sig.bind(*args, **kwargs)
bad = list(_check_argument_units(bound_args.arguments, defaults, dims))
# If there are any bad units, emit a proper error message making it clear
# what went wrong.
if bad:
msg = f'`{func.__name__}` given arguments with incorrect units: '
msg += ', '.join(f'`{arg}` requires "{req}" but given "{given}"'
for arg, given, req in bad)
if 'none' in msg:
msg += ('\nAny variable `x` can be assigned a unit as follows:\n'
' from metpy.units import units\n'
' x = x * units.meter / units.second')
raise ValueError(msg)
return func(*args, **kwargs)
return wrapper
return dec | b7bd0c78d1339032a442c4e3a354cc1fa9e804b2 | 2,289 |
def check_nifti_dim(fname, data, dim=4):
"""
Remove extra dimensions.
Parameters
----------
fname : str
The name of the file representing `data`
data : np.ndarray
The data which dimensionality needs to be checked
dim : int, optional
The amount of dimensions expected/desired in the data.
Returns
-------
np.ndarray
If `len(data.shape)` = `dim`, returns data.
If `len(data.shape)` > `dim`, returns a version of data without the
dimensions above `dim`.
Raises
------
ValueError
If `data` has less dimensions than `dim`
"""
if len(data.shape) < dim:
raise ValueError(f'{fname} does not seem to be a {dim}D file. '
f'Plase provide a {dim}D nifti file.')
if len(data.shape) > dim:
LGR.warning(f'{fname} has more than {dim} dimensions. Removing D > {dim}.')
for ax in range(dim, len(data.shape)):
data = np.delete(data, np.s_[1:], axis=ax)
return np.squeeze(data) | 0f814cc3eaca7242bf3393a1e749df5ebe7b128a | 2,290 |
def bar_chart(x_data=None, y_data=None, title="Chart Title", x_label=None, y_label=None,
color="blue", figsize=(10,5)):
"""
This function requires two Pandas data series for x and y data.
Optionally: the x label, y label, color, title, and size may be set.
This function returns a bar chart with the specified parameters.
"""
if x_data is None or y_data is None:
print("No data passed.")
return None
if x_label is None:
x_label = x_data.name
if y_label is None:
y_label = y_data.name
fig = plt.figure(figsize=figsize) #Sets size of the bar chart.
plt.bar(x_data, y_data, color=color) #Plots x and y and set the color.
plt.title(title) #Sets title of the chart.
plt.xlabel(x_label) #Sets x-axis label.
plt.ylabel(y_label) #Sets y-axis label.
plt.xticks(x_data, rotation='45') #Setting x-tick labels and rotating 45 degrees.
return plt | 23b8d0c1a50ec4909d8b46c29ac75a483b2e221c | 2,291 |
def get_qtobject_for_uipath(pathstr):
""" Returns the QtObject for a Maya UI path.
Ensure that the path starts from the Maya main window and that there are no \
empty elements in it as this will fail.
"""
split_pathstr = pathstr.split("|")
return _find_qobject(get_maya_main_window(), split_pathstr) | fc39ffe792a25af33663c7843a934045df4a91b0 | 2,292 |
def parse_study_with_run(soup):
"""Given a BeautifulSoup object representing a study, parse out relevant
information.
:param soup: a BeautifulSoup object representing a study
:type soup: bs4.BeautifulSoup
:return: a dictionary containing study information and run information
:rtype: dict
"""
accession = soup.find('PRIMARY_ID', text=PROJECT_PARSER).text
title = soup.find('STUDY_TITLE').text
abstract = soup.find('STUDY_ABSTRACT').text
# Returns all of the runs associated with a study
runs = []
run_parsed = soup.find('ID', text=RUN_PARSER)
if run_parsed:
run_ranges = run_parsed.text.split(",")
for run_range in run_ranges:
if '-' in run_range:
runs += parse_run_range(run_range)
else:
runs.append(run_range)
else:
logger.warning(
'Failed to parse run information from ENA XML. Falling back to '
'ENA search...'
)
# Sometimes the SRP does not contain a list of runs (for whatever reason).
# A common trend with such projects is that they use ArrayExpress.
# In the case that no runs could be found from the project XML,
# fallback to ENA SEARCH.
runs = search_ena_study_runs(accession)
return {
'accession': accession,
'title': title,
'abstract': abstract,
'runlist': runs
} | 44405ed14b67c03a44fef876d3d9ed5afc703489 | 2,293 |
def create_embed(**kwargs) -> Embed:
"""Creates a discord embed object."""
embed_type = kwargs.get('type', Embed.Empty)
title = kwargs.get('title', Embed.Empty)
description = kwargs.get('description', Embed.Empty)
color = kwargs.get('color', get_default_color())
timestamp = kwargs.get('timestamp', Embed.Empty)
url = kwargs.get('url', Embed.Empty)
return Embed(
type=embed_type,
title=title,
description=description,
url=url,
color=color,
timestamp=timestamp
) | 4396d03eab15ccc05ceff7cc8cfcd1b93e85894a | 2,294 |
from typing import List
import time
import os
import pkgutil
import importlib
def validate_commit(commit: Commit, out_errors: List[str] = None, ignore_validators: List[str] = None) -> bool:
"""Validates a commit against all validators
:param commit: The commit to validate
:param out_errors: if not None, will populate with the list of errors given by the validators
:param ignore_validators: Optional list of CommitValidator classes to ignore, by class name
:return: True if there are no validation errors, and False otherwise
"""
failed_count = 0
passed_count = 0
start_time = time.time()
# Find all the validators in the validators package (recursively)
validator_classes = []
validators_dir = os.path.join(os.path.dirname(__file__), 'validators')
for _, module_name, is_package in pkgutil.iter_modules([validators_dir]):
if not is_package:
module = importlib.import_module('commit_validation.validators.' + module_name)
validator = module.get_validator()
if ignore_validators and validator.__name__ in ignore_validators:
print(f"Disabled validation for '{validator.__name__}'")
else:
validator_classes.append(validator)
error_summary = {}
# Process validators
for validator_class in validator_classes:
validator = validator_class()
validator_name = validator.__class__.__name__
error_list = []
passed = validator.run(commit, errors = error_list)
if passed:
passed_count += 1
print(f'{validator.__class__.__name__} PASSED')
else:
failed_count += 1
print(f'{validator.__class__.__name__} FAILED')
error_summary[validator_name] = error_list
end_time = time.time()
if failed_count:
print("VALIDATION FAILURE SUMMARY")
for val_name in error_summary.keys():
errors = error_summary[val_name]
if errors:
for error_message in errors:
first_line = True
for line in error_message.splitlines():
if first_line:
first_line = False
print(f'VALIDATOR_FAILED: {val_name} {line}')
else:
print(f' {line}') # extra detail lines do not need machine parsing
stats_strs = []
if failed_count > 0:
stats_strs.append(f'{failed_count} failed')
if passed_count > 0:
stats_strs.append(f'{passed_count} passed')
stats_str = ', '.join(stats_strs) + f' in {end_time - start_time:.2f}s'
print()
print(stats_str)
return failed_count == 0 | 6d02bef635b7100b66ba74cf7bc2b62d71d6607c | 2,295 |
from typing import Any
from typing import List
from typing import Dict
def _safe_types(
*, test_data: Any, cached_data: Any, key_rules: List[KeyRule],
) -> Dict:
"""Convert data and key_rules to safe data types for diffing.
Args:
test_data: data to compare
cached_data: data to compare
key_rules: list of key rules to apply
Returns:
Dict: safe keyword args for diff_with_rules
"""
wrapped_key_rules = []
for key_rule in key_rules:
if isinstance(cached_data, list):
key_rule.pattern = [_WRAP_KEY] + key_rule.pattern
wrapped_key_rules.append(key_rule)
return {
'old_dict': _wrap_data(cached_data),
'new_dict': _wrap_data(test_data),
'key_rules': wrapped_key_rules,
} | bf44aa091fcc751247fbc4ae92e826746c226cfc | 2,296 |
def _flatten_output(attr_dict, skip: list=[]):
"""
flaten output dict node
node_collection is a list to accumulate the nodes that not unfolded
:param skip: is a list of keys (format with parent_key.key) of Dict name that
will not collected into the json file.
For output nodes not being expanded, write down the uuid and datatype for future query.
"""
# do_not_unfold = ["band_parameters", "scf_parameters", "seekpath_parameters"]
for key, value in attr_dict.items():
if key in skip:
continue
if isinstance(value, AttributeDict):
# keep on unfold if it is a namespace
_flatten_output(value, skip)
elif isinstance(value, orm.Dict):
attr_dict[key] = value.get_dict()
elif isinstance(value, orm.Int):
attr_dict[key] = value.value
else:
# node type not handled attach uuid
attr_dict[key] = {
'uuid': value.uuid,
'datatype': type(value),
}
# print(archive_uuids)
return attr_dict | 75e2f41440b819ba939eb1e12c36a9ff6d894708 | 2,297 |
def get_tpu_estimator(
working_dir,
model_fn,
iterations_per_loop=320,
keep_checkpoint_max=20,
use_tpu=False,
train_batch_size=64):
"""Obtain an TPU estimator from a directory.
Args:
working_dir: the directory for holding checkpoints.
model_fn: an estimator model function.
iterations_per_loop: number of steps to run on TPU before outfeeding
metrics to the CPU. If the number of iterations in the loop would exceed
the number of train steps, the loop will exit before reaching
--iterations_per_loop. The larger this value is, the higher
the utilization on the TPU. For CPU-only training, this flag is equal to
`num_epochs * num_minibatches`.
keep_checkpoint_max: the maximum number of checkpoints to save in checkpoint
directory.
use_tpu: if True, training happens on TPU.
train_batch_size: minibatch size for training which is equal to total number
of data // number of batches.
Returns:
Returns a TPU estimator.
"""
# If `TPUConfig.per_host_input_for_training` is `True`, `input_fn` is
# invoked per host rather than per core. In this case, a global batch size
# is transformed a per-host batch size in params for `input_fn`,
# but `model_fn` still gets per-core batch size.
run_config = tf.estimator.tpu.RunConfig(
master=FLAGS.master,
evaluation_master=FLAGS.master,
model_dir=working_dir,
save_checkpoints_steps=iterations_per_loop,
save_summary_steps=iterations_per_loop,
keep_checkpoint_max=keep_checkpoint_max,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
per_host_input_for_training=True,
tpu_job_name=FLAGS.tpu_job_name))
return tf.estimator.tpu.TPUEstimator(
use_tpu=use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=train_batch_size) | 33f78f97aba4011fd8f637ff71a64e8716d5713a | 2,298 |
def rho_err(coeffs, rho, z, density_func):
"""
Returns the difference between the estimated and actual data
"""
soln = density_func(z, coeffs)
return rho - soln | 4a2d7c7243cad062d8568ab72599b4d8be26f874 | 2,299 |