content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_smallerI(x, i):
"""Return true if string x is smaller or equal to i. """
if len(x) <= i:
return True
else:
return False | 1588ef998f4914aa943a063546112766060a9cbf | 3,659,190 |
import re
def _ParseSourceContext(remote_url, source_revision):
"""Parses the URL into a source context blob, if the URL is a git or GCP repo.
Args:
remote_url: The remote URL to parse.
source_revision: The current revision of the source directory.
Returns:
An ExtendedSourceContext suitable for JSON.
"""
# Assume it's a Git URL unless proven otherwise.
context = None
# Now try to interpret the input as a Cloud Repo URL, and change context
# accordingly if it looks like one. Assume any seemingly malformed URL is
# a valid Git URL, since the inputs to this function always come from Git.
#
# A cloud repo URL can take three forms:
# 1: https://<hostname>/id/<repo_id>
# 2: https://<hostname>/p/<project_id>
# 3: https://<hostname>/p/<project_id>/r/<repo_name>
#
# There are two repo ID types. The first type is the direct repo ID,
# <repo_id>, which uniquely identifies a repository. The second is the pair
# (<project_id>, <repo_name>) which also uniquely identifies a repository.
#
# Case 2 is equivalent to case 3 with <repo_name> defaulting to "default".
match = re.match(_CLOUD_REPO_PATTERN, remote_url)
if match:
# It looks like a GCP repo URL. Extract the repo ID blob from it.
id_type = match.group('id_type')
if id_type == 'id':
raw_repo_id = match.group('project_or_repo_id')
# A GCP URL with an ID can't have a repo specification. If it has
# one, it's either malformed or it's a Git URL from some other service.
if not match.group('repo_name'):
context = {
'cloudRepo': {
'repoId': {
'uid': raw_repo_id
},
'revisionId': source_revision}}
elif id_type == 'p':
# Treat it as a project name plus an optional repo name.
project_id = match.group('project_or_repo_id')
repo_name = match.group('repo_name') or 'default'
context = {
'cloudRepo': {
'repoId': {
'projectRepoId': {
'projectId': project_id,
'repoName': repo_name}},
'revisionId': source_revision}}
# else it doesn't look like a GCP URL
if not context:
context = {'git': {'url': remote_url, 'revisionId': source_revision}}
return ExtendContextDict(context) | 3bb14066280e616f103d3aa55710706c967df432 | 3,659,191 |
def decrypt_and_verify(message, sender_key, private_key):
"""
Decrypts and verifies a message using a sender's public key name
Looks for the sender's public key in the public_keys/ directory.
Looks for your private key as private_key/private.asc
The ASN.1 specification for a FinCrypt message resides in asn1spec.py
Raises exceptions if key files are not found, or are malformed.
:param message: Message to decrypt (bytes)
:param private_key: Decrypter's private key (file like object)
:param sender_key: Sender's public key (file like object)
:return: Tuple (decrypted message (bytes), whether the message was verified (boolean))
If message was unable to be decrypted, the tuple will be (None, False)
"""
try:
decryption_key = read_private_key(private_key.read())
except Exception:
raise FinCryptDecodingError('Private key file is malformed.')
try:
sender_key = read_public_key(sender_key.read())
except Exception:
raise FinCryptDecodingError('Sender key file is malformed.')
try:
rsc = reedsolomon.RSCodec(8)
message = bytes(rsc.decode(message)[0])
decoded, _ = decode_ber(message, asn1Spec=FinCryptMessage())
decoded = encode_native(decoded)
except Exception:
return None, False
try:
decrypted_message = decrypt_message(decryption_key['k'], decoded['key'], decoded['message'])
except Exception:
decrypted_message = None
try:
authenticated = authenticate_message(sender_key['kx'], sender_key['ky'], decrypted_message,
decoded['signature'])
except Exception:
authenticated = False
return decrypted_message, authenticated | 9c3d43cc2ee01abd68416eaad4ea21fe066916a7 | 3,659,192 |
def find_best_margin(args):
""" return `best_margin / 0.1` """
set_global_seeds(args['seed'])
dataset = DataLoader(args['dataset'], args)
X_train, X_test, X_val, y_train, y_test, y_val = dataset.prepare_train_test_val(args)
results = []
for margin in MARGINS:
model = Perceptron(feature_dim=X_train.shape[-1], margin=margin)
model.fit(X_train, y_train)
results.append(model.score(X_val, y_val))
return results | 40f3a80c56546e0fc9ae42c70cfc633dc83ba111 | 3,659,193 |
def unfold(raw_log_line):
"""Take a raw syslog line and unfold all the multiple levels of
newline-escaping that have been inflicted on it by various things.
Things that got python-repr()-ized, have '\n' sequences in them.
Syslog itself looks like it uses #012.
"""
lines = raw_log_line \
.replace('#012', '\n') \
.replace('\\n', '\n') \
.splitlines()
return lines | 9e23bdd82ac15086468a383a1ef98989aceee25e | 3,659,195 |
def _mcs_single(mol, mols, n_atms):
"""Get per-molecule MCS distance vector."""
dists_k = []
n_atm = float(mol.GetNumAtoms())
n_incomp = 0 # Number of searches terminated before timeout
for l in range(0, len(mols)):
# Set timeout to halt exhaustive search, which could take minutes
result = FindMCS([mol, mols[l]], completeRingsOnly=True,
ringMatchesRingOnly=True, timeout=10)
dists_k.append(1. - result.numAtoms /
((n_atm + n_atms[l]) / 2))
if result.canceled:
n_incomp += 1
return np.array(dists_k), n_incomp | fd2adf4ee9e3811acd4acb144f3b7861ac4b64ff | 3,659,197 |
def new_transaction():
"""
新的交易
:return:
"""
values = request.get_json()
# 检查 POST 请求中的字段
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
# 创建新的交易
index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'交易将会被添加到区块 {index}'}
return jsonify(response), 201 | 06af06839e6afcaf4188cca724cebc7878455534 | 3,659,198 |
async def get_favicon():
"""Return favicon"""
return FileResponse(path="assets/kentik_favicon.ico", media_type="image/x-icon") | 8597f21ad240cd43f59703624d380e3b879a1a8a | 3,659,199 |
def geocentric_rotation(sphi, cphi, slam, clam):
"""
This rotation matrix is given by the following quaternion operations
qrot(lam, [0,0,1]) * qrot(phi, [0,-1,0]) * [1,1,1,1]/2
or
qrot(pi/2 + lam, [0,0,1]) * qrot(-pi/2 + phi , [-1,0,0])
where
qrot(t,v) = [cos(t/2), sin(t/2)*v[1], sin(t/2)*v[2], sin(t/2)*v[3]]
"""
M = np.zeros(9)
# Local X axis (east) in geocentric coords
M[0] = -slam; M[3] = clam; M[6] = 0;
# Local Y axis (north) in geocentric coords
M[1] = -clam * sphi; M[4] = -slam * sphi; M[7] = cphi;
# Local Z axis (up) in geocentric coords
M[2] = clam * cphi; M[5] = slam * cphi; M[8] = sphi;
return M | 83d37e79e35cab2fc309a640751fb85a9cab0177 | 3,659,200 |
def get_price_sma(
ohlcv: DataFrame,
window: int = 50,
price_col: str = "close",
) -> Series:
"""
Price to SMA.
"""
return pd.Series(
ohlcv[price_col] / get_sma(ohlcv, window),
name="Price/SMA{}".format(window),
) | 7f356610462b9f0fbc13c02c6f093d5ec29d4e76 | 3,659,201 |
def map_to_closest(multitrack, target_programs, match_len=True, drums_first=True):
"""
Keep closest tracks to the target_programs and map them to corresponding
programs in available in target_programs.
multitrack (pypianoroll.Multitrack): Track to normalize.
target_programs (list): List of available programs.
match_len (bool): If True set multitrack track length to length of target_programs.
(return only the len(target_programs) closest tracks in multitrack).
"""
new_multitrack = deepcopy(multitrack)
for track in new_multitrack.tracks:
min_dist = inf
for target in target_programs:
dist = abs(track.program - target)
if dist < min_dist:
min_dist = dist
track.program = target
track.min_dist = min_dist
if match_len:
length = len(target_programs)
new_multitrack.tracks.sort(key=lambda x: x.min_dist)
new_multitrack.tracks = new_multitrack.tracks[:length]
if drums_first:
new_multitrack.tracks.sort(key=lambda x: not x.is_drum)
return new_multitrack | 7fd91726fbc66dd3a3f233be9056c00b1b793f46 | 3,659,202 |
import time
def train_naive(args, X_train, y_train, X_test, y_test, rng, logger=None):
"""
Compute the time it takes to delete a specified number of
samples from a naive model sequentially.
"""
# initial naive training time
model = get_naive(args)
start = time.time()
model = model.fit(X_train, y_train)
before_train_time = time.time() - start
logger.info('\n[{}] before train time: {:.3f}s'.format('naive', before_train_time))
# predictive performance of the naive model
auc, acc, ap = exp_util.performance(model, X_test, y_test, logger=logger, name='naive')
# naive train after deleting data
delete_indices = rng.choice(np.arange(X_train.shape[0]), size=args.n_delete, replace=False)
new_X_train = np.delete(X_train, delete_indices, axis=0)
new_y_train = np.delete(y_train, delete_indices)
# after training time
model = get_naive(args)
start = time.time()
model = model.fit(new_X_train, new_y_train)
after_train_time = time.time() - start
logger.info('[{}] after train time: {:.3f}s'.format('naive', after_train_time))
# interpolate sequential updates
total_time = ((before_train_time + after_train_time) / 2) * args.n_delete
initial_utility = auc, acc, ap
return total_time, initial_utility | 0514df318219a9f69dbd49b65cad2664480e3031 | 3,659,203 |
def helperFunction():
"""A helper function created to return a value to the test."""
value = 10 > 0
return value | 2c4f2e5303aca2a50648860de419e8f94581fee7 | 3,659,204 |
def app_config(app_config):
"""Get app config."""
app_config['RECORDS_FILES_REST_ENDPOINTS'] = {
'RECORDS_REST_ENDPOINTS': {
'recid': '/files'
}
}
app_config['FILES_REST_PERMISSION_FACTORY'] = allow_all
app_config['CELERY_ALWAYS_EAGER'] = True
return app_config | 156f48cfd0937e5717de133fdfdf38c86e66ba71 | 3,659,205 |
from datetime import datetime
def ts(timestamp_string: str):
"""
Convert a DataFrame show output-style timestamp string into a datetime value
which will marshall to a Hive/Spark TimestampType
:param timestamp_string: A timestamp string in "YYYY-MM-DD HH:MM:SS" format
:return: A datetime object
"""
return datetime.strptime(timestamp_string, '%Y-%m-%d %H:%M:%S') | 1902e75ab70c7869686e3a374b22fa80a6dfcf1a | 3,659,206 |
def boxes(frame, data, f, parameters=None, call_num=None):
"""
Boxes places a rotated rectangle on the image that encloses the contours of specified particles.
Notes
-----
This method requires you to have used contours for the tracking and run boxes
in postprocessing.
Parameters
----------
cmap_type
Options are 'static' or 'dynamic'
cmap_column
Name of column containing data to specify colour in dynamic mode,
cmap_max
Specifies max data value for colour map in dynamic mode
cmap_scale
Scale factor for colour map
colour
Colour to be used for static cmap_type (B,G,R) values from 0-255
classifier_column
None selects all particles, column name of classifier values to specify subset of particles
classifier
The value in the classifier column which applies to subset (True or False)
thickness
Thickness of box. -1 fills the box in
Args
----
frame
This is the unmodified frame of the input movie
data
This is the dataframe that stores all the tracked data
f
frame index
parameters
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('boxes', call_num=call_num)
thickness = get_param_val(parameters[method_key]['thickness'])
subset_df = _get_class_subset(data, f, parameters, method=method_key)
box_pts = subset_df[['box_pts']].values
if np.shape(box_pts)[0] == 1:
df_empty = np.isnan(box_pts[0])
if np.all(df_empty):
#0 boxes
return frame
colours = colour_array(subset_df, f, parameters, method=method_key)
sz = np.shape(frame)
for index, box in enumerate(box_pts):
frame = _draw_contours(frame, box, col=colours[index],
thickness=int(get_param_val(parameters[method_key]['thickness'])))
return frame
except Exception as e:
raise BoxesError(e) | 813ef54a8c8b99d003b9ca74b28befc63da2c0b9 | 3,659,207 |
def process_states(states):
"""
Separate list of states into lists of depths and hand states.
:param states: List of states.
:return: List of depths and list of hand states; each pair is from the same state.
"""
depths = []
hand_states = []
for state in states:
depths.append(state[0])
hand_states.append(state[1])
depths = np.array(depths, dtype=np.float32)
hand_states = np.array(hand_states, dtype=np.int32)
return depths, hand_states | 6f71d2471a50a93a3dac6a4148a6e3c6c2aa61e8 | 3,659,208 |
import torch
import math
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes | c3e1d7eeb50b959fe0a075c742e23e5206730748 | 3,659,209 |
from typing import Tuple
def plot_xr_complex_on_plane(
var: xr.DataArray,
marker: str = "o",
label: str = "Data on imaginary plane",
cmap: str = "viridis",
c: np.ndarray = None,
xlabel: str = "Real{}{}{}",
ylabel: str = "Imag{}{}{}",
legend: bool = True,
ax: object = None,
**kwargs,
) -> Tuple[Figure, Axes]:
"""Plots complex data on the imaginary plane. Points are colored by default
according to their order in the array.
Parameters
----------
var
1D array of complex data.
marker
Marker used for the scatter plot.
label
Data label for the legend.
cmap
The colormap to use for coloring the points.
c
Color of the points. Defaults to an array of integers.
xlabel
Label o x axes.
ylabel
Label o y axes.
legend
Calls :meth:`~matplotlib.axes.Axes.legend` if ``True``.
ax
The matplotlib axes. If ``None`` a new axes (and figure) is created.
"""
if ax is None:
_, ax = plt.subplots()
if c is None:
c = np.arange(0, len(var))
ax.scatter(var.real, var.imag, marker=marker, label=label, c=c, cmap=cmap, **kwargs)
unit_str = get_unit_from_attrs(var)
ax.set_xlabel(xlabel.format(" ", var.name, unit_str))
ax.set_ylabel(ylabel.format(" ", var.name, unit_str))
if legend:
ax.legend()
return ax.get_figure(), ax | 8702f14fe0fbc508c3cb5893cc5a4a73bfdd0b85 | 3,659,210 |
def recommend_with_rating(user, train):
"""
用户u对物品i的评分预测
:param user: 用户
:param train: 训练集
:return: 推荐列表
"""
rank = {}
ru = train[user]
for item in _movie_set:
if item in ru:
continue
rank[item] = __predict(user, item)
return rank.iteritems() | 372cd9f77d8123351f4b76eeea241aa8a3bcaf97 | 3,659,213 |
def nl_to_break( text ):
"""
Text may have newlines, which we want to convert to <br />
when formatting for HTML display
"""
text=text.replace("<", "<") # To avoid HTML insertion
text=text.replace("\r", "")
text=text.replace("\n", "<br />")
return text | d2baf1c19fae686ae2c4571416b4cad8be065474 | 3,659,214 |
import requests
import logging
def get_page_state(url):
"""
Checks page's current state by sending HTTP HEAD request
:param url: Request URL
:return: ("ok", return_code: int) if request successful,
("error", return_code: int) if error response code,
(None, error_message: str) if page fetching failed (timeout, invalid URL, ...)
"""
try:
response = requests.head(url, verify=False, timeout=10)
except requests.exceptions.RequestException as exception:
logging.error(exception)
return None, "Error fetching page"
if response.status_code >= 400:
return "error", response.status_code
return "ok", response.status_code | f7b7db656968bed5e5e7d332725e4d4707f2b14b | 3,659,215 |
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = set()
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen.add(seenkey)
result.append(item)
return result | 57c82081d92db74a7cbad15262333053a2acd3a7 | 3,659,216 |
import dateutil
import pytz
def date_is_older(date_str1, date_str2):
"""
Checks to see if the first date is older than the second date.
:param date_str1:
:param date_str2:
:return:
"""
date1 = dateutil.parser.parse(date_str1)
date2 = dateutil.parser.parse(date_str2)
# set or normalize the timezone
target_tz = pytz.timezone('UTC')
if date1.tzinfo is None:
date1 = target_tz.localize(date1)
else:
date1 = target_tz.normalize(date1)
if date2.tzinfo is None:
date2 = target_tz.localize(date2)
else:
date2 = target_tz.normalize(date2)
return date1 < date2 | 48fcf26cde4276e68daa07c1250de33a739bc5cb | 3,659,217 |
def shorten_namespace(elements, nsmap):
"""
Map a list of XML tag class names on the internal classes (e.g. with shortened namespaces)
:param classes: list of XML tags
:param nsmap: XML nsmap
:return: List of mapped names
"""
names = []
_islist = True
if not isinstance(elements, (list, frozenset)):
elements = [elements]
_islist = False
for el in elements:
for key, value in nsmap.items():
if value in el:
if key == "cim":
name = el.split(value)[-1]
name = name[1:] if name.startswith("}") else name
elif "{"+value+"}" in el:
name = el.replace("{"+value+"}", key+"_")
else:
name = el.replace(value, key+"_")
names.append(name)
if el.startswith("#"):
names.append(el.split("#")[-1])
if not _islist and len(names) == 1:
names = names[0]
return names | 73dfc4f24a9b0a73cf7b6af7dae47b880faa3e27 | 3,659,218 |
def list_select_options_stream_points():
""" Return all data_points under data_stream """
product_uid = request.args.get('productID', type=str)
query = DataStream.query
if product_uid:
query = query.filter(DataStream.productID == product_uid)
streams_tree = []
data_streams = query.many()
for data_stream in data_streams:
data_points = []
for data_point in data_stream.dataPoints:
select_option = {
'label': data_point.dataPointName,
'value': data_point.dataPointID
}
data_points.append(select_option)
streams_tree.append({
'label': data_stream.streamName,
'value': data_stream.streamID,
'children': data_points
})
return jsonify(streams_tree) | 34ce7df0ecd241a009b1c8ef44bf33ec64e3d82d | 3,659,219 |
import math
def func2():
"""
:type: None
:rtype: List[float]
"""
return [math.pi, math.pi / 2, math.pi / 4, math.pi / 8] | 62984ba7d8c1efd55569449adbf507e73888a1b7 | 3,659,220 |
def get_playlist_name(pl_id):
"""returns the name of the playlist with the given id"""
sql = """SELECT * FROM playlists WHERE PlaylistId=?"""
cur.execute(sql, (pl_id,))
return cur.fetchone()[1] | 9488eb1c32db8b66f3239dcb454a08b8ea80b8b4 | 3,659,221 |
import random
def weight(collection):
"""Choose an element from a dict based on its weight and return its key.
Parameters:
- collection (dict): dict of elements with weights as values.
Returns:
string: key of the chosen element.
"""
# 1. Get sum of weights
weight_sum = sum([value for value in collection.values()])
# 2. Generate random number between 1 and sum of weights
random_value = random.randint(1, weight_sum)
# 3. Iterate through items
for key, value in collection.items():
# 4. Subtract weight of each item from random number
random_value -= value
# 5. Compare with 0, if <= 0, that item has been chosen
if random_value <= 0:
return key
# 6. Else continue subtracting
# Should not reach here.
raise ValueError("Invalid argument value.") | 383ddadd4a47fb9ac7be0292ecc079fcc59c4481 | 3,659,222 |
def knnsearch(y, x, k) :
""" Finds k closest points in y to each point in x.
Parameters
----------
x : (n,3) float array
A point cloud.
y : (m,3) float array
Another point cloud.
k : int
Number of nearest neighbors one wishes to compute.
Returns
-------
ordered_neighbors : (n,k) int array
List of k nearest neighbors to each point in x.
dist : (n,k) flaot array
List of distances between each nearest neighbor and the corresponding point in x.
"""
x, y = map(np.asarray, (x, y))
tree =spatial.cKDTree(y)
ordered_neighbors = tree.query(x, k)[1] #sz x, k
ID = np.transpose(np.matlib.repmat(np.arange(np.shape(x)[0]), k,1))
dist = np.sum((x[ID,:]-y[ordered_neighbors,:])**2,axis=2)**.5
return ordered_neighbors, dist | 84cc1bf0f960e1fb44dd44ab95eccce1c424ec05 | 3,659,223 |
def segmentation_gaussian_measurement(
y_true,
y_pred,
gaussian_sigma=3,
measurement=keras.losses.binary_crossentropy):
""" Apply metric or loss measurement incorporating a 2D gaussian.
Only works with batch size 1.
Loop and call this function repeatedly over each sample
to use a larger batch size.
# Arguments
y_true: is assumed to be [label, x_img_coord, y_image_coord]
y_pred: is expected to be a 2D array of labels
with shape [1, img_height, img_width, 1].
"""
with K.name_scope(name='grasp_segmentation_gaussian_loss') as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
print('y_pred: ', y_pred)
print('y_true: ', y_true)
# y_true should have shape [batch_size, 3] here,
# label, y_height_coordinate, x_width_coordinate become shape:
# [batch_size, 1]
label = K.expand_dims(y_true[:, 0])
print('label: ', label)
y_height_coordinate = K.expand_dims(y_true[:, 1])
x_width_coordinate = K.expand_dims(y_true[:, 2])
# label = K.reshape(label, [1, 1])
print('label: ', label)
image_shape = tf.Tensor.get_shape(y_pred)
y_true_img = tile_vector_as_image_channels(label, image_shape)
y_true_img = K.cast(y_true_img, 'float32')
loss_img = measurement(y_true_img, y_pred)
y_pred_shape = K.int_shape(y_pred)
if len(y_pred_shape) == 3:
y_pred_shape = y_pred_shape[:-1]
if len(y_pred_shape) == 4:
y_pred_shape = y_pred_shape[1:3]
def batch_gaussian(one_y_true):
# def batch_gaussian(y_height_coord, x_width_coord):
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coord, x_width_coord), sigma=gaussian_sigma)
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coordinate, x_width_coordinate), sigma=gaussian_sigma)
return gaussian_kernel_2D(size=y_pred_shape, center=(one_y_true[0], one_y_true[1]), sigma=gaussian_sigma)
weights = K.map_fn(batch_gaussian, y_true)
loss_img = K.flatten(loss_img)
weights = K.flatten(weights)
weighted_loss_img = tf.multiply(loss_img, weights)
loss_sum = K.sum(weighted_loss_img)
loss_sum = K.reshape(loss_sum, [1, 1])
return loss_sum | 377f2fa7706c166756efdb3047937b8db2047674 | 3,659,224 |
import json
def request_pull(repo, requestid, username=None, namespace=None):
"""View a pull request with the changes from the fork into the project."""
repo = flask.g.repo
_log.info("Viewing pull Request #%s repo: %s", requestid, repo.fullname)
if not repo.settings.get("pull_requests", True):
flask.abort(404, description="No pull-requests found for this project")
request = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo.id, requestid=requestid
)
if not request:
flask.abort(404, description="Pull-request not found")
if request.remote:
repopath = pagure.utils.get_remote_repo_path(
request.remote_git, request.branch_from
)
parentpath = pagure.utils.get_repo_path(request.project)
else:
repo_from = request.project_from
parentpath = pagure.utils.get_repo_path(request.project)
repopath = parentpath
if repo_from:
repopath = pagure.utils.get_repo_path(repo_from)
repo_obj = pygit2.Repository(repopath)
orig_repo = pygit2.Repository(parentpath)
diff_commits = []
diff = None
# Closed pull-request
if request.status != "Open":
commitid = request.commit_stop
try:
for commit in repo_obj.walk(commitid, pygit2.GIT_SORT_NONE):
diff_commits.append(commit)
if commit.oid.hex == request.commit_start:
break
except KeyError:
# This happens when repo.walk() cannot find commitid
pass
if diff_commits:
# Ensure the first commit in the PR as a parent, otherwise
# point to it
start = diff_commits[-1].oid.hex
if diff_commits[-1].parents:
start = diff_commits[-1].parents[0].oid.hex
# If the start and the end commits are the same, it means we are,
# dealing with one commit that has no parent, so just diff that
# one commit
if start == diff_commits[0].oid.hex:
diff = diff_commits[0].tree.diff_to_tree(swap=True)
else:
diff = repo_obj.diff(
repo_obj.revparse_single(start),
repo_obj.revparse_single(diff_commits[0].oid.hex),
)
else:
try:
diff_commits, diff = pagure.lib.git.diff_pull_request(
flask.g.session, request, repo_obj, orig_repo
)
except pagure.exceptions.PagureException as err:
flask.flash("%s" % err, "error")
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
flask.flash(
"Could not update this pull-request in the database", "error"
)
if diff:
diff.find_similar()
form = pagure.forms.MergePRForm()
trigger_ci_pr_form = pagure.forms.TriggerCIPRForm()
# we need to leave out all members of trigger_ci_conf that have
# "meta" set to False or meta["requires_project_hook_attr"] condition
# defined and it's not met
trigger_ci_conf = pagure_config["TRIGGER_CI"]
if not isinstance(trigger_ci_conf, dict):
trigger_ci_conf = {}
trigger_ci = {}
# make sure all the backrefs are set properly on repo
pagure.lib.plugins.get_enabled_plugins(repo)
for comment, meta in trigger_ci_conf.items():
if not meta:
continue
cond = meta.get("requires_project_hook_attr", ())
if cond and not pagure.utils.project_has_hook_attr_value(repo, *cond):
continue
trigger_ci[comment] = meta
committer = False
if request.project_from:
committer = pagure.utils.is_repo_committer(request.project_from)
else:
committer = pagure.utils.is_repo_committer(request.project)
can_rebase_branch = not request.remote_git and committer
can_delete_branch = (
pagure_config.get("ALLOW_DELETE_BRANCH", True) and can_rebase_branch
)
return flask.render_template(
"repo_pull_request.html",
select="requests",
requestid=requestid,
repo=repo,
username=username,
repo_obj=repo_obj,
pull_request=request,
diff_commits=diff_commits,
diff=diff,
mergeform=form,
subscribers=pagure.lib.query.get_watch_list(flask.g.session, request),
tag_list=pagure.lib.query.get_tags_of_project(flask.g.session, repo),
can_rebase_branch=can_rebase_branch,
can_delete_branch=can_delete_branch,
trigger_ci=trigger_ci,
trigger_ci_pr_form=trigger_ci_pr_form,
flag_statuses_labels=json.dumps(pagure_config["FLAG_STATUSES_LABELS"]),
) | 7b83c83a236ed840ed5b2eefbf87859d5e120aac | 3,659,227 |
from typing import List
def make_matrix(points: List[float], degree: int) -> List[List[float]]:
"""Return a nested list representation of a matrix consisting of the basis
elements of the polynomial of degree n, evaluated at each of the points.
In other words, each row consists of 1, x, x^2, ..., x^n, where n is the degree,
and x is a value in points.
Preconditions:
- degree < len(points)
>>> make_matrix([1, 2, 3], 2)
[[1, 1, 1], [1, 2, 4], [1, 3, 9]]
"""
matrix = []
for point in points:
row = [point ** index for index in range(degree + 1)]
matrix.append(row)
return matrix | d8fbea3a0f9536cb681b001a852b07ac7b17f6c2 | 3,659,228 |
def verify(request, token, template_name='uaccounts/verified.html'):
"""Try to verify email address using given token."""
try:
verification = verify_token(token, VERIFICATION_EXPIRES)
except VerificationError:
return redirect('uaccounts:index')
if verification.email.profile != request.user.profile:
return redirect('uaccounts:index')
verification.email.verified = True
verification.email.save()
verification.delete()
return render(request, template_name) | 56971aca43d04d7909ea6015fd48b6f30fa5b0ab | 3,659,229 |
from typing import Tuple
import threading
from pydantic_factories import ModelFactory
def run_train(params: dict) -> Tuple[threading.Thread, threading.Thread]:
"""Train a network on a data generator.
params -> dictionary.
Required fields:
* model_name
* generator_name
* dataset_dir
* tile_size
* clf_name
* checkpoints_dir
* summaries_dir
Returns prefetch thread & model.fit thread"""
assert 'model_name' in params
assert 'generator_name' in params
Model = ModelFactory.get_model(params['model_name'])
Generator = GeneratorFactory.get_generator(params['generator_name'])
model = Model(**params)
feed = Generator(**params)
pf = PreFetch(feed)
t1 = threading.Thread(target=pf.fetch)
t2 = threading.Thread(target=model.fit, args=(pf,))
t1.start()
t2.start()
return t1,t2 | c3a96996c3d34c18bfeab89b14836d13829d183e | 3,659,230 |
def adjust_seconds_fr(samples_per_channel_in_frame,fs,seconds_fr,num_frame):
"""
Get the timestamp for the first sample in this frame.
Parameters
----------
samples_per_channel_in_frame : int
number of sample components per channel.
fs : int or float
sampling frequency.
seconds_fr : int or float
seconds for this frame (from frame header)
num_frame : int
frame number (from frame header).
Returns
-------
time_first_frame : float
timestamp [s] corresponding to the first sample of this frame.
"""
seconds_per_frame=samples_per_channel_in_frame/float(fs)
time_first_sample=float(seconds_fr)+num_frame*seconds_per_frame
return(time_first_sample) | a19775db3ebcdbe66b50c30bc531e2980ca10082 | 3,659,232 |
def add_header(unicode_csv_data, new_header):
"""
Given row, return header with iterator
"""
final_iterator = [",".join(new_header)]
for row in unicode_csv_data:
final_iterator.append(row)
return iter(final_iterator) | 1fa50492d786aa28fba6062ac472f1c6470a6311 | 3,659,234 |
def get_most_energetic(particles):
"""Get most energetic particle. If no particle with a non-NaN energy is
found, returns a copy of `NULL_I3PARTICLE`.
Parameters
----------
particles : ndarray of dtyppe I3PARTICLE_T
Returns
-------
most_energetic : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=true_filter, cmp_function=more_energetic,
) | b43e275183b0c2992cfd28239a7e038965b40ccf | 3,659,235 |
def stack(tup, axis=0, out=None):
"""Stacks arrays along a new axis.
Args:
tup (sequence of arrays): Arrays to be stacked.
axis (int): Axis along which the arrays are stacked.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Stacked array.
.. seealso:: :func:`numpy.stack`
"""
return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out) | 5f97bed62c77f28415ae82402cbb379372b4708c | 3,659,237 |
def winning_pipeline(mydata,mytestdata,myfinalmodel,feature_selection_done = True,myfeatures =None,numerical_attributes = None):
"""
If feature _selection has not been performed:
Function performs Cross Validation (with scaling within folds) on the data passed through.
Scales the data with RobustScaler() and Imputes the data with IterativeImputer(). Additionally adds clusters for the cities latitude and longitude
Else:
Performs Cross-Validation given the estimator on a subset of the features of mydata which were passed through to myfeatures
Arguments
@myestimator: sklearn estimator
@mydata: training data with missing values and is not scaled)
@myfolds: number of folds for cross validation
@feature_selection_done: Boolean flag indicating if feature_selection has been done to the data in `mydata`
@myfeatures: list of informative features from features
@checknoise: Whether scoring for Cross-Validation should be Explained Variance
"""
# part 1 create location feature for training data using optics clustering
optics_df = mydata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mydata = pd.concat([mydata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mydata_labels = mydata['med_rental_rate'].copy()
mydata = mydata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mydata = mydata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
imputer = IterativeImputer(max_iter = 10 ,random_state =22,min_value=0)
imputed_dat = imputer.fit_transform(mydata)
#scale only numerical attrbs which are everything but the columns which were appended earlier
imputed_dat = pd.DataFrame(imputed_dat,columns=mydata.columns)
ct = ColumnTransformer(
[('scale1',RobustScaler(),numerical_attributes)],
remainder = 'passthrough')
X_train_prepped = ct.fit_transform(imputed_dat)
#to pickle
processed_training_data = X_train_prepped.copy()
#nowfor the test data
# part 1 create location feature for test data using optics clustering
optics_df = mytestdata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mytestdata = pd.concat([mytestdata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mytest_data_labels = mytestdata['med_rental_rate'].copy()
mytestdata = mytestdata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mytestdata = mytestdata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
#prepare testdata them
imputed_testdata = imputer.transform(mytestdata)
imputed_testdata = pd.DataFrame(imputed_testdata,columns=mytestdata.columns)
mytestdata_prepared = ct.transform(imputed_testdata)
#to pickle
processed_test_data = mytestdata_prepared.copy()
#make final predictions
myfinalmodel.fit(X_train_prepped,mydata_labels)
final_predictions = myfinalmodel.predict(mytestdata_prepared)
final_mse = mean_squared_error(mytest_data_labels,final_predictions)
final_rmse = np.sqrt(final_mse)
final_expvar = explained_variance_score(mytest_data_labels,final_predictions)
return {'final_rmse':final_rmse,'final_predictions':final_predictions,'final_expvar':final_expvar,'myfinalmodel':myfinalmodel,
'processed_training_data':processed_training_data,'processed_test_data':processed_test_data} | 636d922e405842ea338f774dd45b5ff78158bfdf | 3,659,238 |
def calc_single_d(chi_widths, chis, zs, z_widths, z_SN, use_chi=True):
"""Uses single_m_convergence with index starting at 0 and going along the entire line of sight.
Inputs:
chi_widths -- the width of the comoving distance bins.
chis -- the mean comoving distances of each bin.
zs -- the mean redshift of each bin, for the scale factor.
z_SN -- the reshift of the SN.
use_chi -- boolean that determined whether equal comoving distance or redshift bins are used.
"""
comoving_to_SN = b_comoving(0, z_SN)
chi_SN = comoving_to_SN[-1]
convergence = np.linspace(0, 0, len(chis))
mass = MSOL * 10 ** 15
for i in range(0, len(chis)):
if use_chi:
convergence[i] = single_d_convergence(chi_widths, chis, zs, i, 1, chi_SN)
else:
convergence[i] = single_d_convergence_z(z_widths, chis, zs, i, 1, chi_SN)
return convergence | 6cafe9d8d1910f113fdcd8a3417e127f4f1cf5e6 | 3,659,239 |
def ppo(
client, symbol, timeframe="6m", col="close", fastperiod=12, slowperiod=26, matype=0
):
"""This will return a dataframe of Percentage Price Oscillator for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
fastperiod (int): fast period to calculate across
slowperiod (int): slow period to calculate across
matype (int): moving average type (0-sma)
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
ppo = t.PPO(df[col].values, fastperiod, slowperiod, matype)
return pd.DataFrame({col: df[col].values, "ppo": ppo}) | 0b6c48408b810131370500921a7ab2addbccea8b | 3,659,240 |
import random
def getRandomCoin():
""" returns a randomly generated coin """
coinY = random.randrange(20, int(BASEY * 0.6))
coinX = SCREENWIDTH + 100
return [
{'x': coinX, 'y': coinY},
] | 44a5ea7baddc77f8d1b518c3d1adcccd28935108 | 3,659,241 |
def is_success(msg):
"""
Whether message is success
:param msg:
:return:
"""
return msg['status'] == 'success' | 43ecbf3c7ac8d03ce92ab059e7ec902e51505d0a | 3,659,242 |
from bs4 import BeautifulSoup
def scrape_data(url):
"""
scrapes relevant data from given url
@param {string} url
@return {dict} {
url : link
links : list of external links
title : title of the page
description : sample text
}
"""
http = httplib2.Http()
try:
status, response = http.request(url)
except Exception as e:
return None
# get links
links = []
for link in BeautifulSoup(response, "html.parser", parse_only=SoupStrainer('a')):
if link.has_attr('href'):
links.append(link['href'])
# get description
soup = BeautifulSoup(response, "html.parser")
description = soup.find('meta', attrs={'name':'og:description'}) or soup.find('meta', attrs={'property':'description'}) or soup.find('meta', attrs={'name':'description'})
if description:
description = description.get('content')
# return dictionary
return {
"url" : url,
"links" : links,
"title" : BeautifulSoup(response, "html.parser"),
"description" : description
} | 4ab640aad73506e74e3a899467a90c2ddec34308 | 3,659,243 |
def list_strip_comments(list_item: list, comment_denominator: str = '#') -> list:
"""
Strips all items which are comments from a list.
:param list_item: The list object to be stripped of comments.
:param comment_denominator: The character with which comment lines start with.
:return list: A cleaned list object.
"""
_output = list()
for _item in list_item:
if not _item[0] == comment_denominator:
_output.append(_item)
return _output | e5dd6e0c34a1d91586e12e5c39a3a5413746f731 | 3,659,244 |
def guess_number(name):
"""User defined function which performs the all the operations and prints the result"""
guess_limit = 0
magic_number = randint(1, 20)
while guess_limit < 6: # perform the multiple guess operations and print output
user_guess = get_input("Take a guess: ")
if 0 < user_guess <= 20: # condition that allows the numbers only if in between 1 to 20
guess_limit += 1
if user_guess == magic_number:
print(f"Good job, {name}! You guessed my number in {guess_limit} guesses!")
break
elif user_guess < magic_number:
print("Your Guess is too low")
elif user_guess > magic_number:
print("Your Guess is too high")
else:
print("Try again, Your number must have be in the range of 1 to 20!!")
else:
print(f"The number I was thinking of was {magic_number}")
return 0 | 14c81f8adc18f59c29aa37ecec91808b275524e2 | 3,659,245 |
def writerformat(extension):
"""Returns the writer class associated with the given file extension."""
return writer_map[extension] | a2f981a993ba4be25304c0f41b0e6b51bef68d68 | 3,659,246 |
def index_like(index):
"""
Does index look like a default range index?
"""
return not (isinstance(index, pd.RangeIndex) and
index._start == 0 and
index._stop == len(index) and
index._step == 1 and index.name is None) | 91a8e626547121768ee7708e5c7cdcf8265c3991 | 3,659,247 |
def zplsc_c_absorbtion(t, p, s, freq):
"""
Description:
Calculate Absorption coeff using Temperature, Pressure and Salinity and transducer frequency.
This Code was from the ASL MatLab code LoadAZFP.m
Implemented by:
2017-06-23: Rene Gelinas. Initial code.
:param t:
:param p:
:param s:
:param freq: Frequency in KHz
:return: sea_abs
"""
# Calculate relaxation frequencies
t_k = t + 273.0
f1 = 1320.0*t_k * np.exp(-1700/t_k)
f2 = 1.55e7*t_k * np.exp(-3052/t_k)
# Coefficients for absorption equations
k = 1 + p/10.0
a = 8.95e-8 * (1 + t*(2.29e-2 - 5.08e-4*t))
b = (s/35.0)*4.88e-7*(1+0.0134*t)*(1-0.00103*k + 3.7e-7*(k*k))
c = 4.86e-13*(1+t*((-0.042)+t*(8.53e-4-t*6.23e-6)))*(1+k*(-3.84e-4+k*7.57e-8))
freqk = freq*1000
sea_abs = (a*f1*(freqk**2))/((f1*f1)+(freqk**2))+(b*f2*(freqk**2))/((f2*f2)+(freqk**2))+c*(freqk**2)
return sea_abs | af5a7d1ea0ad4fbfacfd1b7142eaf0a31899cb4c | 3,659,248 |
def estimate_quintic_poly(x, y):
"""Estimate degree 5 polynomial coefficients.
"""
return estimate_poly(x, y, deg=5) | be389d9f09208da14d0b5c9d48d3c6d2e6a86e8d | 3,659,249 |
def add(A, B):
"""
Return the sum of Mats A and B.
>>> A1 = Mat([[1,2,3],[1,2,3]])
>>> A2 = Mat([[1,1,1],[1,1,1]])
>>> B = Mat([[2,3,4],[2,3,4]])
>>> A1 + A2 == B
True
>>> A2 + A1 == B
True
>>> A1 == Mat([[1,2,3],[1,2,3]])
True
>>> zero = Mat([[0,0,0],[0,0,0]])
>>> B + zero == B
True
"""
assert A.size == B.size
return Mat([[Acol + Bcol for index, (Acol, Bcol) in enumerate(zip(Arow, Brow))] for index, (Arow, Brow) in enumerate(zip(A.store, B.store))]) | 5b0054397a76a20194b3a34435074fc901a34f6b | 3,659,250 |
def hindu_lunar_event(l_month, tithi, tee, g_year):
"""Return the list of fixed dates of occurrences of Hindu lunar tithi
prior to sundial time, tee, in Hindu lunar month, l_month,
in Gregorian year, g_year."""
l_year = hindu_lunar_year(
hindu_lunar_from_fixed(gregorian_new_year(g_year)))
date1 = hindu_tithi_occur(l_month, tithi, tee, l_year)
date2 = hindu_tithi_occur(l_month, tithi, tee, l_year + 1)
return list_range([date1, date2],
gregorian_year_range(g_year)) | aca03e1a77ff6906d31a64ab50355642f848f9d9 | 3,659,251 |
async def statuslist(ctx, *, statuses: str):
"""Manually make a changing status with each entry being in the list."""
bot.x = 0
statuses = statuses.replace("\n", bot.split)
status_list = statuses.split(bot.split)
if len(status_list) <= 1:
return await bot.send_embed(ctx, f"You cannot have a list with only {len(status_list)} entry.", negative=True)
bot.statuses = status_list
bot.autostatus = True
await bot.send_embed(ctx, "Changed statuslist.") | 99c43ea464759356977bc35ffcd941655763d783 | 3,659,252 |
def kebab(string):
"""kebab-case"""
return "-".join(string.split()) | 24bc29e066508f6f916013fa056ff54408dcd46d | 3,659,253 |
def getUserID(person):
"""
Gets Humhub User ID using name information
:param person: Name of the person to get the Humhub User ID for
:type person: str.
"""
# search for person string in humhub db
# switch case for only one name (propably lastname) or
# two separate strings (firstname + lastname)
firstname = ''
lastname = ''
if len(person.split()) == 1:
# only lastname
lastname = person
else:
firstname = person.split()[0]
lastname = person.split()[1]
global offlinemode
if offlinemode:
return 8
# search in humhub db
cnx = establishDBConnection(dbconfig)
cursor = cnx.cursor()
query = ''
if firstname == '':
query = ("""SELECT user_id FROM profile WHERE lastname = {}
""").format(lastname)
else:
query = ("""SELECT user_id FROM profile WHERE firstname = {}
AND lastname = {}
""").format(firstname, lastname)
cursor.execute(query)
for user_id in cursor:
userid = user_id
cnx.close()
return userid | 31d40b6dd0aec8f6e8481aeaa3252d71c6935c39 | 3,659,254 |
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result | 4757e451106691de3d8805e9f7bdaeb24bd52816 | 3,659,256 |
def get_submission_by_id(request, submission_id):
"""
Returns a list of test results assigned to the submission with the given id
"""
submission = get_object_or_404(Submission, pk=submission_id)
data = submission.tests.all()
serializer = TestResultSerializer(data, many=True)
return Response(serializer.data, status.HTTP_200_OK) | 4504b46a03056cb289bb0b53dc01d58f0c5c986c | 3,659,257 |
import pkg_resources
def get_resource_path(resource_name):
"""Get the resource path.
Args:
resource_name (str): The resource name relative to the project root
directory.
Returns:
str: The true resource path on the system.
"""
package = pkg_resources.Requirement.parse(PACKAGE_NAME)
return pkg_resources.resource_filename(package, resource_name) | 0f95e5f26edc9f351323a93ddc75df920e65375d | 3,659,258 |
def do_cluster(items, mergefun, distfun, distlim):
"""Pairwise nearest merging clusterer.
items -- list of dicts
mergefun -- merge two items
distfun -- distance function
distlim -- stop merging when distance above this limit
"""
def heapitem(d0, dests):
"""Find nearest neighbor for d0 as sortable [distance, nearest, d0]"""
dists = (
Sort0List([distfun(d0, d1), d1, d0])
for d1 in dests if d1 is not d0
)
return min(dists)
heap = [Sort0List([None, None, d]) for d in items]
d0 = d1 = merged = None
while len(heap) > 1:
for item in heap:
# rescan nearest where nearest was merged away, or not yet set
if item[1] in (None, d0, d1):
item[:] = heapitem(item[2], (x[2] for x in heap))
continue
# update others where merged now nearest
if item[2] is not merged:
distance = distfun(item[2], merged)
if item[0] > distance:
item[0:2] = distance, merged
# arrange heap, pop out one end of shortest edge
heapify(heap)
distance, d1, d0 = item = heappop(heap)
# if shortest edge is long enough, unpop and stop
if distance is None or distance > distlim:
heappush(heap, item) # unspill the milk
break
# replace other end with merged destination
merged = mergefun(d0, d1)
for i in range(len(heap)):
if heap[i][2] is d1:
heap[i] = Sort0List([None, None, merged])
break
return [x[2] for x in heap] | afcd32c390c5d9b57eb070d3f923b4abd6f9ac6b | 3,659,259 |
import csv
def read_csv(input_file, quotechar='"'):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f,quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines | 3b789904ae612b9b211a7dac5c49289659c415c5 | 3,659,260 |
def rotate_coordinates(local3d, angles):
"""
Rotate xyz coordinates from given view_angles.
local3d: numpy array. Unit LOCAL xyz vectors
angles: tuple of length 3. Rotation angles around each GLOBAL axis.
"""
cx, cy, cz = np.cos(angles)
sx, sy, sz = np.sin(angles)
mat33_x = np.array([
[1, 0, 0],
[0, cx, sx],
[0, -sx, cx]
], dtype='float')
mat33_y = np.array([
[cy, 0, sy],
[0, 1, 0],
[-sy, 0, cy]
], dtype='float')
mat33_z = np.array([
[cz, sz, 0],
[-sz, cz, 0],
[0, 0, 1]
], dtype='float')
local3d = local3d @ mat33_x @ mat33_y @ mat33_z
return local3d | 3243cc9d82dd08384995f62709d3fabc7b896dce | 3,659,261 |
import torch
def quantize_enumerate(x_real, min, max):
"""
Randomly quantize in a way that preserves probability mass.
We use a piecewise polynomial spline of order 3.
"""
assert min < max
lb = x_real.detach().floor()
# This cubic spline interpolates over the nearest four integers, ensuring
# piecewise quadratic gradients.
s = x_real - lb
ss = s * s
t = 1 - s
tt = t * t
probs = torch.stack([
t * tt,
4 + ss * (3 * s - 6),
4 + tt * (3 * t - 6),
s * ss,
], dim=-1) * (1/6)
logits = safe_log(probs)
q = torch.arange(-1., 3.)
x = lb.unsqueeze(-1) + q
x = torch.max(x, 2 * min - 1 - x)
x = torch.min(x, 2 * max + 1 - x)
return x, logits | d73083d6078c47456aeb64859d8361ad37f7d962 | 3,659,262 |
def counter_format(counter):
"""Pretty print a counter so that it appears as: "2:200,3:100,4:20" """
if not counter:
return "na"
return ",".join("{}:{}".format(*z) for z in sorted(counter.items())) | 992993a590eabb2966eb9de26625077f2597718c | 3,659,263 |
def drot(x, y, c, s):
"""
Apply the Givens rotation {(c,s)} to {x} and {y}
"""
# compute
gsl.blas_drot(x.data, y.data, c, s)
# and return
return x, y | 8554586f2069f04db0116dfee7868d5d0527999a | 3,659,264 |
def _update_dict_within_dict(items, config):
""" recursively update dict within dict, if any """
for key, value in items:
if isinstance(value, dict):
config[key] = _update_dict_within_dict(
value.items(), config.get(key, {})
)
else:
config[key] = value
return config | 75b840b8091568b80f713b2ca7725b1a1f917d3a | 3,659,265 |
def masterProductFieldUpdate(objectId: str):
"""
Submit handler for updating & removing field overrides.
:param objectId: The mongodb master product id.
"""
key = request.form.get("field-key")
value = request.form.get("field-value")
# Clean up and trim tags if being set.
if key == MASTER_PRODUCT_FIELD__TAGS:
tags = value.strip().split(",")
if len(tags):
tags = ", ".join([tag.strip() for tag in set(tags) if tag.strip()])
value = tags
if thk.products.overrideProductField(objectId, key, value):
# If the product is active, mark it for upserting.
product = thk.products.getOneProduct(objectId)
if product and THK_ACTIVE in product and product[THK_ACTIVE]:
thk.products.rebuildActiveProduct(objectId)
flash("Field modified successfully.", "success")
else:
flash("Field could not be modified.", "danger")
return redirect(url_for("products.masterProductEdit", objectId=objectId)) | 3d87cf2de42d5ee0ee9d43116c0bff4181f42da0 | 3,659,266 |
def recalc_Th(Pb, age):
"""Calculates the equivalent amount of ThO_2 that would be required to produce the
measured amount of PbO if there was no UO_2 in the monazite.
INPUTS:
Pb: the concentration of Pb in parts per million
age: the age in million years
"""
return (232. / 208.) * Pb / (np.exp(4.95e-11 * (age * 1e6)) - 1) | 79ba3cc8e9db8adba1d31ec6f9fe3588d3531b97 | 3,659,267 |
def relative_periodic_trajectory_wrap(
reference_point: ParameterVector,
trajectory: ArrayOfParameterVectors,
period: float = 2 * np.pi,
) -> ArrayOfParameterVectors:
"""Function that returns a wrapped 'copy' of a parameter trajectory such that
the distance between the final point of the trajectory and the reference point
is minimal inside the specified period.
The rest of the trajectory is being transformed in the same manner.
NOTE:
It only works as intended if the period is larger than the distance
between the consecutive points in the trajectory.
Args:
reference_point: Reference point for periodic wrapping of the trajectory.
trajectory: Trajectory that is wrapped to a copy of itself such that
the distance between the final point in the trajectory
and the reference point is minimal.
period: Periodicity of each parameter in each point of the trajectory.
Defaults to 2*np.pi.
"""
if not np.all(np.linalg.norm(np.diff(trajectory, axis=0), axis=1) < period):
raise ValueError(
"Distances between consecutive points must be smaller than period."
)
wrapped_trajectory = np.copy(trajectory).astype(float)
wrapped_trajectory[-1] = relative_periodic_wrap(
reference_point, trajectory[-1], period=period
)
for ii in range(2, len(wrapped_trajectory) + 1):
wrapped_trajectory[-ii] = relative_periodic_wrap(
wrapped_trajectory[-ii + 1], trajectory[-ii], period=period
)
return wrapped_trajectory | ee41bdb547367186b82324e3e080b758984b7747 | 3,659,268 |
import warnings
def planToSet(world,robot,target,
edgeCheckResolution=1e-2,
extraConstraints=[],
equalityConstraints=[],
equalityTolerance=1e-3,
ignoreCollisions=[],
movingSubset=None,
**planOptions):
"""
Creates a MotionPlan object that can be called to solve a standard motion
planning problem for a robot in a world. The plan starts from the robot's
current configuration and ends in a target set.
Args:
world (WorldModel): the world in which the robot lives, including
obstacles
robot (RobotModel): the moving robot. The plan starts from
robot.getConfig()
target (function or CSpace): a function f(q) returning a bool which is
True if the configuration q is a goal, OR an instance of a CSpace
subclass where sample() generates a sample in the target set and
feasible(x) tests whether a sample is in the target set.
.. note::
The function should accept vectors of the same dimensionality
as the robot, not the moving subset. Similarly, the CSpace
should have the same dimensionality as the robot.
edgeCheckResolution (float, optional): the resolution at which edges in the path are
checked for feasibility
extraConstraints (list, optional): possible extra constraint functions, each
of which needs to return True if satisfied.
.. note::
Don't put cartesian constraints here! Instead place your function in equalityConstraints.
equalityConstraints (list, optional): a list of IKObjectives or equality
constraints f(x)=0 that must be satisfied during the motion. Equality
constraints may return a float or a list of floats. In the latter case, this
is interpreted as a vector function, in which all entries of the vector must be 0.
equalityTolerance (float, optional): a tolerance to which all the equality constraints
must be satisfied.
ignoreCollisions (list): a list of ignored collisions. Each element may be
a body in the world, or a pair (a,b) where a, b are bodies in the world.
movingSubset (optional): if 'auto', 'all', or None (default), all joints
will be allowed to move. If this is a list, then only these joint
indices will be allowed to move.
planOptions (keywords): keyword options that will be sent to the planner. See
the documentation for MotionPlan.setOptions for more details.
Returns:
MotionPlan: a planner instance that can be called to get a
kinematically-feasible plan. (see :meth:`MotionPlan.planMore` )
The underlying configuration space (a RobotCSpace, ClosedLoopRobotCSpace, or
EmbeddedRobotCSpace) can be retrieved using the "space" attribute of the
resulting MotionPlan object.
"""
q0 = robot.getConfig()
subset = []
if movingSubset == 'auto' or movingSubset == 'all' or movingSubset == None:
subset = list(range(len(q0)))
else:
subset = movingSubset
space = makeSpace(world=world,robot=robot,
edgeCheckResolution=edgeCheckResolution,
extraConstraints=extraConstraints,
equalityConstraints=equalityConstraints,
equalityTolerance=equalityTolerance,
ignoreCollisions=ignoreCollisions,
movingSubset=subset)
if hasattr(space,'lift'): #the planning takes place in a space of lower dimension than #links
plan = EmbeddedMotionPlan(space,q0,**planOptions)
else:
plan = MotionPlan(space,**planOptions)
#convert target to a (test,sample) pair if it's a cspace
if isinstance(target,CSpace):
goal = [(lambda x:target.feasible(x)),(lambda : target.sample())]
else:
if not callable(target):
if not isinstance(target,(tuple,list)) or len(target)!=2 or not callable(target[0]) or not callable(target[1]):
raise TypeError("target must be a predicate function or CSpace object")
goal = target
try:
plan.setEndpoints(q0,goal)
except RuntimeError:
#the start configuration is infeasible, print it out
if space.cspace==None: space.setup()
sfailures = space.cspace.feasibilityFailures(plan.space.project(q0))
warnings.warn("Start configuration fails {}".format(sfailures))
raise
return plan | d03ec2c6c1e00388d1271af1e17a94eda0f50122 | 3,659,269 |
def itkimage_to_json(itkimage, manager=None):
"""Serialize a Python itk.Image object.
Attributes of this dictionary are to be passed to the JavaScript itkimage
constructor.
"""
if itkimage is None:
return None
else:
direction = itkimage.GetDirection()
directionMatrix = direction.GetVnlMatrix()
directionList = []
dimension = itkimage.GetImageDimension()
pixelArr = itk.array_view_from_image(itkimage)
compressor = zstd.ZstdCompressor(level=3)
compressed = compressor.compress(pixelArr.data)
pixelArrCompressed = memoryview(compressed)
for col in range(dimension):
for row in range(dimension):
directionList.append(directionMatrix.get(row, col))
componentType, pixelType = _image_to_type(itkimage)
imageType = dict(
dimension=dimension,
componentType=componentType,
pixelType=pixelType,
components=itkimage.GetNumberOfComponentsPerPixel()
)
return dict(
imageType=imageType,
origin=tuple(itkimage.GetOrigin()),
spacing=tuple(itkimage.GetSpacing()),
size=tuple(itkimage.GetBufferedRegion().GetSize()),
direction={'data': directionList,
'rows': dimension,
'columns': dimension},
compressedData=pixelArrCompressed
) | e55f2da9792e4772de4b145375d1eec1e6ee6e06 | 3,659,270 |
def project(pnt, norm):
"""Projects a point following a norm."""
t = -np.sum(pnt*norm)/np.sum(norm*norm)
ret = pnt+norm*t
return ret/np.linalg.norm(ret) | 865b658862ebc47eccc117f0daebc8afcc99a2ac | 3,659,272 |
def fix_trajectory(traj):
"""Remove duplicate waypoints that are introduced during smoothing.
"""
cspec = openravepy.ConfigurationSpecification()
cspec.AddDeltaTimeGroup()
iwaypoint = 1
num_removed = 0
while iwaypoint < traj.GetNumWaypoints():
waypoint = traj.GetWaypoint(iwaypoint, cspec)
delta_time = cspec.ExtractDeltaTime(waypoint)
if delta_time == 0.0:
traj.Remove(iwaypoint, iwaypoint + 1)
num_removed += 1
else:
iwaypoint += 1
return num_removed | 30e3925c518dd4aff0f38ef7a02aaa9f7ab3680a | 3,659,273 |
def select_report_data(conn):
""" select report data to DB """
cur = conn.cursor()
cur.execute("SELECT * FROM report_analyze")
report = cur.fetchall()
cur.close()
return report | 9d0bf6d4f6758c873bd6643673784239f9bf4557 | 3,659,275 |
import numpy
def func_lorentz_by_h_pv(z, h_pv, flag_z: bool = False, flag_h_pv: bool = False):
"""Gauss function as function of h_pv
"""
inv_h_pv = 1./h_pv
inv_h_pv_sq = numpy.square(inv_h_pv)
z_deg = z * 180./numpy.pi
c_a = 2./numpy.pi
a_l = c_a * inv_h_pv
b_l = 4.*inv_h_pv_sq
z_deg_sq = numpy.square(z_deg)
res = numpy.expand_dims(a_l, axis=-1) /(1+ numpy.expand_dims(b_l, axis=-1) * z_deg_sq)
dder = {}
if flag_z:
dder["z"] = -2.*z_deg*numpy.expand_dims(b_l,axis=-1)*res/(1.+numpy.expand_dims(b_l, axis=-1)*z_deg_sq) * 180./numpy.pi
if flag_h_pv:
dder["h_pv"] = (c_a * (numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq) - \
c_a * numpy.expand_dims(h_pv, axis=-1))/numpy.square(numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq)
return res, dder | 802029e167439471e892fbfbfe4d6fdce8cb1a0e | 3,659,276 |
def get_profile(aid):
"""
get profile image of author with the aid
"""
if 'logged_in' in session and aid ==session['logged_id']:
try:
re_aid = request.args.get("aid")
re = aController.getAuthorByAid(re_aid)
if re != None:
return re
return redirect(url_for('/'))
except KeyError:
return redirect(url_for('/'))
return redirect(url_for('/')) | daa759c1493a15d6a2e300a6ab552aae30f59706 | 3,659,278 |
def SystemSettings_GetMetric(*args, **kwargs):
"""SystemSettings_GetMetric(int index, Window win=None) -> int"""
return _misc_.SystemSettings_GetMetric(*args, **kwargs) | d9d6d00e6cf54f8e2ed8a06c616b17d6b2905526 | 3,659,279 |
from pathlib import Path
def all_files(dir, pattern):
"""Recursively finds every file in 'dir' whose name matches 'pattern'."""
return [f.as_posix() for f in [x for x in Path(dir).rglob(pattern)]] | 45f12cda2e16cb745d99d2c8dfb454b32130e1c8 | 3,659,280 |
def get_identity_groups(ctx):
"""Load identity groups definitions."""
return render_template('identity-groups', ctx) | 820eb3ebf8d141f37a93485e4428e1cd79da6a44 | 3,659,281 |
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import AnonymousUser
from ..django_legacy.django2_0.utils.deprecation import CallableFalse, CallableTrue
def fix_behaviour_contrib_auth_user_is_anonymous_is_authenticated_callability(utils):
"""
Make user.is_anonymous and user.is_authenticated behave both as properties and methods,
by preserving their callability like in earlier Django version.
"""
utils.skip_if_app_not_installed("django.contrib.contenttypes") # BEFORE IMPORTS!
@property
def is_anonymous_for_AbstractBaseUser(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return CallableFalse
@property
def is_authenticated_for_AbstractBaseUser(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return CallableTrue
utils.inject_attribute(AbstractBaseUser, "is_anonymous", is_anonymous_for_AbstractBaseUser)
utils.inject_attribute(AbstractBaseUser, "is_authenticated", is_authenticated_for_AbstractBaseUser)
@property
def is_anonymous_for_AnonymousUser(self):
return CallableTrue
@property
def is_authenticated_for_AnonymousUser(self):
return CallableFalse
utils.inject_attribute(AnonymousUser, "is_anonymous", is_anonymous_for_AnonymousUser)
utils.inject_attribute(AnonymousUser, "is_authenticated", is_authenticated_for_AnonymousUser) | b3f94992c0ada29b82e64d40cac190a567db9013 | 3,659,282 |
def matches(spc, shape_):
"""
Return True if the shape adheres to the spc (spc has optional color/shape
restrictions)
"""
(c, s) = spc
matches_color = c is None or (shape_.color == c)
matches_shape = s is None or (shape_.name == s)
return matches_color and matches_shape | fa9c90ea2be17b0cff7e4e76e63cf2c6a70cc1ec | 3,659,284 |
from typing import Any
def jsonsafe(obj: Any) -> ResponseVal:
"""
Catch the TypeError which results from encoding non-encodable types
This uses the serialize function from my.core.serialize, which handles
serializing most types in HPI
"""
try:
return Response(dumps(obj), status=200, headers={"Content-Type": "application/json"})
except TypeError as encode_err:
return {
"error": "Could not encode response from HPI function as JSON",
"exception": str(encode_err),
}, 400 | 90aaaad3e890eeb09aaa683395a80f80394bba3e | 3,659,285 |
def get_appliances(self) -> list:
"""Get all appliances from Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - appliance
- GET
- /appliance
:return: Returns list of dictionaries of each appliance
:rtype: list
"""
return self._get("/appliance") | 2f1e48869f4494a995efd4adba80a235c4fb1486 | 3,659,286 |
def is_str(element):
"""True if string else False"""
check = isinstance(element, str)
return check | c46b80d109b382de761618c8c9a50d94600af876 | 3,659,287 |
import tempfile
import shutil
def anonymize_and_streamline(old_file, target_folder):
"""
This function loads the edfs of a folder and
1. removes their birthdate and patient name
2. renames the channels to standardized channel names
3. saves the files in another folder with a non-identifyable
4. verifies that the new files have the same content as the old
"""
# load the two csvs with the edfs that we dont process and where the ECG is upside down
pre_coding_discard = [line[0] for line in misc.read_csv(cfg.edfs_discard) if line[2]=='1']
to_invert = [line[0] for line in misc.read_csv(cfg.edfs_invert)]
# Here we read the list of controls and patients with their age and gender
mappings = misc.read_csv(cfg.controls_csv)
mappings.extend(misc.read_csv(cfg.patients_csv))
mappings = dict([[name, {'gender':gender, 'age':age}] for name, gender, age,*_ in mappings])
# old name is the personalized file without file extension, e.g. thomas_smith(1)
old_name = ospath.splitext(ospath.basename(old_file))[0]
# new name is the codified version without extension e.g '123_45678'
new_name = codify(old_name)
# use a temporary file to write and then move it,
# this avoids half-written files that cannot be read later
tmp_name = tempfile.TemporaryFile(prefix='anonymize').name
if old_name in pre_coding_discard:
print('EDF is marked as corrupt and will be discarded')
return
# this is where the anonymized file will be stored
new_file = ospath.join(target_folder, new_name + '.edf')
if ospath.exists(new_file):
print ('New file extists already {}'.format(new_file))
else:
# anonymize
print ('Writing {} from {}'.format(new_file, old_name))
assert ospath.isfile(old_file), f'{old_file} does not exist'
signals, signal_headers, header = sleep_utils.read_edf(old_file,
digital=True,
verbose=False)
# remove patient info
header['birthdate'] = ''
header['patientname'] = new_name
header['patientcode'] = new_name
header['gender'] = mappings[old_name]['gender']
header['age'] = mappings[old_name]['age']
# rename channels to a unified notation, e.g. EKG becomes ECG I
for shead in signal_headers:
ch = shead['label']
if ch in ch_mapping:
ch = ch_mapping[ch]
shead['label'] = ch
# Invert the ECG channel if necessary
if old_name in to_invert:
for i,sig in enumerate(signals):
label = signal_headers[i]['label'].lower()
if label == cfg.ecg_channel.lower():
signals[i] = -sig
# we write to tmp to prevent that corrupted files are not left
print ('Writing tmp for {}'.format(new_file))
sleep_utils.write_edf(tmp_name, signals, signal_headers, header,
digital=True, correct=True)
# verify that contents for both files match exactly
print ('Verifying tmp for {}'.format(new_file))
# embarrasing hack, as dmin/dmax dont in this files after inverting
if not old_name=='B0036':
sleep_utils.compare_edf(old_file, tmp_name, verbose=False)
# now we move the tmp file to its new location.
shutil.move(tmp_name, new_file)
# also copy additional file information ie hypnograms and kubios files
old_dir = ospath.dirname(old_file)
pattern = old_name.replace('_m', '').replace('_w', '') # remove gender from weitere nt1 patients
add_files = ospath.list_files(old_dir, patterns=[f'{pattern}*txt', f'{pattern}*dat', f'{pattern}*mat'])
for add_file in add_files:
# e.g. .mat or .npy etc etc
new_add_file = ospath.join(target_folder,
ospath.basename(add_file.replace(pattern, new_name)))
if ospath.exists(new_add_file):continue
# hypnograms will be copied to .hypno
try:
new_add_file = new_add_file.replace('-Schlafprofil', '')
new_add_file = new_add_file.replace('_sl','')
new_add_file = new_add_file.replace('.txt', '.hypno').replace('.dat', '.hypno')
shutil.copy(add_file, new_add_file)
except Exception as e:
print(e)
return old_name, new_name | 2210c72891c3faec73a9d5ce4b83d56ee9adef38 | 3,659,288 |
def deal_text(text: str) -> str:
"""deal the text
Args:
text (str): text need to be deal
Returns:
str: dealed text
"""
text = " "+text
text = text.replace("。","。\n ")
text = text.replace("?","?\n ")
text = text.replace("!","!\n ")
text = text.replace(";",";\n ")
return text | 8f16e7cd2431dfc53503c877f9d4b5429f738323 | 3,659,289 |
def _find_timepoints_1D(single_stimulus_code):
"""
Find the indexes where the value of single_stimulus_code turn from zero to non_zero
single_stimulus_code : 1-D array
>>> _find_timepoints_1D([5,5,0,0,4,4,4,0,0,1,0,2,0])
array([ 0, 4, 9, 11])
>>> _find_timepoints_1D([0,0,1,2,3,0,1,0,0])
array([2, 6])
>>> _find_timepoints_1D([0,0,1,2,0,1])
array([2, 5])
>>> _find_timepoints_1D([5,0,0,1,2,5])
array([0, 3])
"""
flag = True # whether have seen 0 so far
timepoints = []
for index, timepoint in enumerate(single_stimulus_code):
if timepoint != 0 and flag:
timepoints.append(index)
flag = False
if timepoint == 0 and not flag:
flag = True
return np.array(timepoints) | b2c3d08f229b03f9b9f5278fea4e25c25274d213 | 3,659,291 |
def stiffness_tric(
components: np.ndarray = None,
components_d: dict = None
) -> np.ndarray:
"""Generate triclinic fourth-order stiffness tensor.
Parameters
----------
components : np.ndarray
21 components of triclinic tensor, see
stiffness_component_dict
components_d : dictionary
dictionary with 21 components
of triclinic tensor, see
stiffness_component_dict
Returns
-------
np.ndarray
Fourth-order triclinic tensor with minor
and major symmetries
"""
out = np.zeros(shape=[3, 3, 3, 3])
if not isinstance(components, type(None)):
components_d = stiffness_component_dict(components)
for k, v in components_d.items():
i = [int(s)-1 for s in k]
out[i[0], i[1], i[2], i[3]] = v
# tt_l
out[i[1], i[0], i[2], i[3]] = v
# tt_r
out[i[0], i[1], i[3], i[2]] = v
out[i[1], i[0], i[3], i[2]] = v # + tt_l
# tt_m
out[i[2], i[3], i[0], i[1]] = v
out[i[3], i[2], i[0], i[1]] = v # + tt_l
out[i[2], i[3], i[1], i[0]] = v # + tt_r
out[i[3], i[2], i[1], i[0]] = v # + tt_l + tt_r
return out | f96a2ffb4e0542f56a4329393b77e9a875dc6cd5 | 3,659,292 |
from typing import Optional
from pathlib import Path
def get_dataset(
dataset_name: str,
path: Optional[Path] = None,
regenerate: bool = False,
) -> TrainDatasets:
"""
Get the repository dataset.
Currently only [Retail Dataset](https://archive.ics.uci.edu/ml/datasets/online+retail) is available
Parameters:
dataset_name:
name of the dataset, for instance "retail"
regenerate:
whether to regenerate the dataset even if a local file is present.
If this flag is False and the file is present, the dataset will not
be downloaded again.
path:
where the dataset should be saved
Returns:
dataset obtained by either downloading or reloading from local file.
"""
if path is None:
path = default_dataset_path
dataset_path = materialize_dataset(dataset_name, path, regenerate)
return load_datasets(
metadata=dataset_path,
train=dataset_path / "train",
test=dataset_path / "test",
) | f913f613858c444ddac479d65a169b74a9b4db29 | 3,659,293 |
def get_info_safe(obj, attr, default=None):
"""safely retrieve @attr from @obj"""
try:
oval = obj.__getattribute__(attr)
except:
logthis("Attribute does not exist, using default", prefix=attr, suffix=default, loglevel=LL.WARNING)
oval = default
return oval | 24b4bace8a8cef16d7cddc44238a24dd636f6ca8 | 3,659,294 |
def mkviewcolbg(view=None, header=u'', colno=None, cb=None,
width=None, halign=None, calign=None,
expand=False, editcb=None, maxwidth=None):
"""Return a text view column."""
i = gtk.CellRendererText()
if cb is not None:
i.set_property(u'editable', True)
i.connect(u'edited', cb, colno)
if calign is not None:
i.set_property(u'xalign', calign)
j = gtk.TreeViewColumn(header, i, background=colno)
if halign is not None:
j.set_alignment(halign)
if expand:
if width is not None:
j.set_min_width(width)
j.set_expand(True)
else:
if width is not None:
j.set_min_width(width)
if maxwidth is not None:
j.set_max_width(maxwidth)
view.append_column(j)
if editcb is not None:
i.connect(u'editing-started', editcb)
return i | 7b49154d9d26cc93f5e42116967634eddc06a06e | 3,659,295 |
def list2str(lst, indent=0, brackets=True, quotes=True):
"""
Generate a Python syntax list string with an indention
:param lst: list
:param indent: indention as integer
:param brackets: surround the list expression by brackets as boolean
:param quotes: surround each item with quotes
:return: string
"""
if quotes:
lst_str = str(lst)
if not brackets:
lst_str = lst_str[1:-1]
else:
lst_str = ', '.join(lst)
if brackets:
lst_str = '[' + lst_str + ']'
lb = ',\n' + indent*' '
return lst_str.replace(', ', lb) | ef441632bf59714d3d44ede5e78835625b41f047 | 3,659,296 |
def _roi_pool_shape(op):
"""Shape function for the RoiPool op.
"""
dims_data = op.inputs[0].get_shape().as_list()
channels = dims_data[3]
dims_rois = op.inputs[1].get_shape().as_list()
num_rois = dims_rois[0]
pooled_height = op.get_attr('pooled_height')
pooled_width = op.get_attr('pooled_width')
output_shape = tf.TensorShape([num_rois, pooled_height, pooled_width, channels])
return [output_shape, output_shape] | 9c84aa0054dacacefcdf2fd9066538239668ee66 | 3,659,298 |
from typing import Optional
def get_users(*, limit: int, order_by: str = "id", offset: Optional[str] = None) -> APIResponse:
"""Get users"""
appbuilder = current_app.appbuilder
session = appbuilder.get_session
total_entries = session.query(func.count(User.id)).scalar()
to_replace = {"user_id": "id"}
allowed_filter_attrs = [
"user_id",
'id',
"first_name",
"last_name",
"user_name",
"email",
"is_active",
"role",
]
query = session.query(User)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
users = query.offset(offset).limit(limit).all()
return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries)) | 5ef71bfcc79314f0e9481dfa78a8e079dce14339 | 3,659,299 |
import torch
from typing import Optional
def ppo_clip_policy_loss(
logps: torch.Tensor,
logps_old: torch.Tensor,
advs: torch.Tensor,
clipratio: Optional[float] = 0.2
) -> torch.Tensor:
"""
Loss function for a PPO-clip policy.
See paper for full loss function math: https://arxiv.org/abs/1707.06347
Args:
- logps (torch.Tensor): Action log-probabilities under the current policy.
- logps_old (torch.Tensor): Action log-probabilities under the old (pre-update) policy.
- advs (torch.Tensor): Advantage estimates for the actions taken.
- clipratio (float): Clipping parameter for PPO-clip loss. In general, is fine with being left as default.
Returns:
- ppo_loss (torch.Tensor): Loss term for PPO agent.
- kl (torch.Tensor): KL-divergence estimate between new and old policies.
"""
policy_ratio = torch.exp(logps - logps_old)
clipped_adv = torch.clamp(policy_ratio, 1 - clipratio, 1 + clipratio) * advs
ppo_loss = -(torch.min(policy_ratio * advs, clipped_adv)).mean()
kl = (logps_old - logps).mean().item()
return ppo_loss, kl | 203c4072e1c04db9cceb9fa58f70b9af512ffb1c | 3,659,301 |
from timeseries import timeseries, loadDBstation
from datetime import datetime
def tide_pred_correc(modfile,lon,lat,time,dbfile,ID,z=None,conlist=None):
"""
Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
Applies an amplitude and phase correction based on a time series
"""
print('Calculating tidal correction factors from time series...')
# Load using the timeseries module
t0 = datetime.strftime(time[0],'%Y%m%d.%H%M%S')
t1 = datetime.strftime(time[-1],'%Y%m%d.%H%M%S')
dt = time[1]-time[0]
print(t0, t1, dt.total_seconds())
timeinfo = (t0,t1,dt.total_seconds())
TS,meta = loadDBstation(dbfile,ID,'waterlevel',timeinfo=timeinfo,filttype='low',cutoff=2*3600,output_meta=True)
lonpt=meta['longitude']
latpt=meta['latitude']
print(lonpt,latpt)
# Extract the OTIS tide prediction
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lonpt,latpt)
h_amp = np.abs(h_re+1j*h_im)[:,0]
h_phs = np.angle(h_re+1j*h_im)[:,0]
# Harmonic analysis of observation time series
amp, phs, frq, frqnames, htide = TS.tidefit(frqnames=conlist)
TS_harm = timeseries(time,htide)
residual = TS.y - htide
# Calculate the amp and phase corrections
dphs = phs - h_phs + np.pi
damp = amp/h_amp
# Extract the data along the specified points
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
h_amp = np.abs(h_re+1j*h_im)
h_phs = np.angle(h_re+1j*h_im)
u_amp = np.abs(u_re+1j*u_im)
u_phs = np.angle(u_re+1j*u_im)
v_amp = np.abs(v_re+1j*v_im)
v_phs = np.angle(v_re+1j*v_im)
# Initialise the output arrays
sz = lon.shape
nx = np.prod(sz)
nt = time.shape[0]
h=np.zeros((nt,nx))
u=np.zeros((nt,nx))
v=np.zeros((nt,nx))
# Rebuild the time series
#tsec=TS_harm.tsec - TS_harm.tsec[0]
tsec = othertime.SecondsSince(time,basetime=time[0])
print(tsec[0])
for nn,om in enumerate(omega):
for ii in range(0,nx):
h[:,ii] += damp[nn]*h_amp[nn,ii] * np.cos(om*tsec - (h_phs[nn,ii] + dphs[nn]))
u[:,ii] += damp[nn]*u_amp[nn,ii] * np.cos(om*tsec - (u_phs[nn,ii] + dphs[nn]))
v[:,ii] += damp[nn]*v_amp[nn,ii] * np.cos(om*tsec - (v_phs[nn,ii] + dphs[nn]))
szo = (nt,)+sz
return h.reshape(szo), u.reshape(szo), v.reshape(szo), residual | 883ac85787a700a785a0b0a08f521aaf6ad821d1 | 3,659,303 |
def generate_new_filename(this_key):
"""Generates filename for processed data from information in this_key."""
[_, _, source_id, experiment_id, _, _] = this_key.split('.')
this_fname = THIS_VARIABLE_ID+'_'+experiment_id+'_'+source_id
return this_fname | 2e9edb4730257e8fc4c68bbcbb32cce133041bb8 | 3,659,304 |
def get_connection(user, pwd):
""" Obtiene la conexion a Oracle """
try:
connection = cx_Oracle.connect(user + '/' + pwd + '@' +
config.FISCO_CONNECTION_STRING)
connection.autocommit = False
print('Connection Opened')
return connection
except Exception as e:
print('Exception: ' + str(e)) | ba7ca784f0778fb06843e3c68af63e6348406735 | 3,659,305 |
from re import T
def _generate_select_expression_for_extended_string_unix_timestamp_ms_to_timestamp(source_column, name):
"""
More robust conversion from StringType to TimestampType. It is assumed that the
timezone is already set to UTC in spark / java to avoid implicit timezone conversions.
Is able to additionally handle (compared to implicit Spark conversion):
* Unix timestamps in milliseconds
* Preceding whitespace
* Trailing whitespace
* Preceeding and trailing whitespace
Hint
----
Please have a look at the tests to get a better feeling how it behaves under
tests/unit/transformer/test_mapper_custom_data_types.py::TestExtendedStringConversions and
tests/data/test_fixtures/mapper_custom_data_types_fixtures.py
Example
-------
>>> from spooq.transformer import Mapper
>>>
>>> input_df.head(3)
[Row(input_string="2020-08-12T12:43:14+0000"),
Row(input_string="1597069446000"),
Row(input_string="2020-08-12")]
>>> mapping = [("output_value", "input_string", "extended_string_to_timestamp")]
>>> output_df = Mapper(mapping).transform(input_df)
>>> output_df.head(3)
[Row(input_string=datetime.datetime(2020, 8, 12, 12, 43, 14)),
Row(input_string=datetime.datetime(2020, 8, 10, 14, 24, 6)),
Row(input_string=datetime.datetime(2020, 8, 12, 0, 0, 0))]
"""
return (
F.when(
F.trim(source_column).cast(T.LongType()).isNotNull(), (F.trim(source_column) / 1000).cast(T.TimestampType())
)
.otherwise(F.trim(source_column).cast(T.TimestampType()))
.alias(name)
) | 22a1220c260406c82ede127b87fa23356d2f5192 | 3,659,306 |
import xmlrpclib
import getpass
def get_webf_session():
"""
Return an instance of a Webfaction server and a session for authentication
to make further API calls.
"""
server = xmlrpclib.ServerProxy("https://api.webfaction.com/")
print("Logging in to Webfaction as %s." % env.user)
if env.password is None:
env.password = getpass(
"Enter Webfaction password for user %s: " % env.user)
session, account = server.login(env.user, env.password)
print("Succesfully logged in as %s." % env.user)
return server, session, account | e6ebe8ad51cdf4a33fcfa70e5f1983ede6f66d31 | 3,659,307 |