content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def endorsement_services():
"""Return endorsement service list
Loads all defined service modules unless settings specifies otherwise
"""
global ENDORSEMENT_SERVICES
if ENDORSEMENT_SERVICES is None:
ENDORSEMENT_SERVICES = _load_endorsement_services()
return ENDORSEMENT_SERVICES | 543b6c86587a0da58a3e9b8d1756a6d763e60d6a | 1,200 |
def select(arrays, index):
"""
Index each array in a tuple of arrays.
If the arrays tuple contains a ``None``, the entire tuple will be returned
as is.
Parameters
----------
arrays : tuple of arrays
index : array
An array of indices to select from arrays.
Returns
-------
indexed_arrays : tuple of arrays
Examples
--------
>>> import numpy as np
>>> select((np.arange(5), np.arange(-3, 2, 1)), [1, 3])
(array([1, 3]), array([-2, 0]))
>>> select((None, None, None, None), [1, 2])
(None, None, None, None)
"""
if arrays is None or any(i is None for i in arrays):
return arrays
return tuple(i.ravel()[index] for i in arrays) | 70109fbda58055d9712295dff261a95d99caac03 | 1,201 |
def waypoint(waypoint_id):
"""view a book page"""
wp = Waypoint.query.filter_by(id=waypoint_id).first()
options = Option.query.filter_by(sourceWaypoint_id=waypoint_id)
if wp is None:
abort(404)
return render_template('books/waypoint.html', book=wp.book_of, waypoint=wp, options=options) | 520883bdcb29f3e273f7836c4db128c859d9347f | 1,202 |
def encode_big_endian_16(i):
"""Take an int and return big-endian bytes"""
return encode_big_endian_32(i)[-2:] | c26557a6ac30f54746a8a5a1676cec22e3b3b197 | 1,203 |
from typing import List
import requests
from bs4 import BeautifulSoup
import re
def get_comments_from_fawm_page(
url: str,
username: str,
password: str,
) -> List[Response]:
"""Extract comments from a given FAWM page."""
response = requests.get(url, auth=(username, password))
response.encoding = "UTF-8"
html = response.text
soup = BeautifulSoup(html, "html.parser")
responses = []
# there are non-comments with the class "comment-item", so we need to narrow down
for el in soup.find_all("li", {"class": "comment-item", "id": re.compile(r"c\d+")}):
responses.append(get_response_from_li(url, el))
return responses | 68ec542bf909fc543c97836209c2803cd6e0f119 | 1,204 |
def send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
message = (service.users().messages().send(userId=user_id, body=message)
.execute())
print ('Message Id: %s' % message['id'])
return message
except errors.HttpError, error:
print ('An error occurred: %s' % error) | 6bbb3935e596d7d19669f5a0094f58542dd764d3 | 1,205 |
def get_supported_solvers():
"""
Returns a list of solvers supported on this machine.
:return: a list of SolverInterface sub-classes :list[SolverInterface]:
"""
return [sv for sv in builtin_solvers if sv.supported()] | b8fb9e9d780158ab0f45565c05e42fe47ae0d9f2 | 1,206 |
def _length_hint(obj):
"""Returns the length hint of an object."""
try:
return len(obj)
except (AttributeError, TypeError):
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if hint is NotImplemented or \
not isinstance(hint, (int, long)) or \
hint < 0:
return None
return hint | 226ede41ec49fef4b43df92f425eb7f5135041ea | 1,207 |
def chimeric_data():
"""Example containing spanning + junction reads from single fusion."""
return _build_chimeric_data(
[('1', 300, 1, 'T2onc', 420, 1, 2, '100M2208p38M62S', '62M38S', 'R1'),
('1', 300, 1, 'T2onc', 420, 1, 1, '100M2208p52M48S', '48M52S', 'R2'),
('1', 301, 1, 'T2onc', 420, 1, 1, '100M2208p52M48S', '48M52S', 'R3'),
('1', 300, 1, 'T2onc', 421, 1, 1, '100M2208p52M48S', '48M52S', 'R4'),
('1', 280, 1, 'T2onc', 435, 1, -1, '100M', '97M3S', 'S1'),
('1', 270, 1, 'T2onc', 445, 1, -1, '100M', '98M2S', 'S2'),
('1', 275, 1, 'T2onc', 435, 1, -1, '100M', '98M2S', 'S3')]) | 79277d820c0d3e28708d9ead49a55cbe4f51c4e3 | 1,208 |
def _get_merge_for_alias_key(database, key):
"""Return the Alias record of the merged player.
Allow for value.merge on the record with key srkey being any value.
Return the record if value.merge is None True or False.
Otherwise assume value.merge is integer and use it to retreive and
return a record.
return None if get_alias() returns None.
"""
r = resultsrecord.get_alias(database, key)
if r is None:
return
elif r.value.merge is None:
return r
elif r.value.merge is True:
return r
elif r.value.merge is False:
return r
r = resultsrecord.get_alias(database, r.value.merge)
if r is None:
return
return r | 2384e9db49af07512c86f58b5c9eb964f9e9b1b2 | 1,209 |
import plistlib
import sys
import os
def saveusers(argv):
"""Save stdin to users.plist."""
try:
plist = plistlib.readPlist(sys.stdin)
except:
print >>sys.stderr, "Malformed users.plist"
return 2
os.unlink(users_path)
plistlib.writePlist(plist, users_path)
return 0 | d2498478cc7827c6514652f214e9c2f3027f968f | 1,210 |
def get_bucket(self):
"""
Documentation:
---
Description:
Use bucket name to return a single S3 bucket object.
---
Returns:
bucket : S3 bucket
S3 bucket object
"""
# return
# 6 dictionary containing Name tag / EC2 instance object
buckets = self.get_buckets()
# check that there is an instance with that name
assert self.bucket_name in self.get_bucket_names(), "\nNo S3 bucket with that name.\n"
# filter instances by instance_name
bucket = buckets[self.bucket_name]
return bucket | 0d8ed3c8557e57fb8094524bc4cb4dcae09fe384 | 1,211 |
def euclidean_distance(x, y, weight=None):
"""Computes the Euclidean distance between two time series.
If the time series do not have the same length, an interpolation is performed.
Parameters
----------
x : nd-array
Time series x.
y : nd-array
Time series y.
weight: nd-array (Default: None)
query weight values.
Returns
-------
float
Euclidean distance value.
"""
p = 2
if len(x) != len(y):
x, y = interpolation(x, y)
if weight is None:
ed = np.linalg.norm(x - y, p)
else:
if len(np.shape(x)) > 1:
distance = _lnorm_multidimensional(x, y, weight, p=p)
else:
distance = _lnorm_unidimensional(x, y, weight, p=p)
ed = np.sum(distance)
return ed | 03a1cb557d7d295a6ebd89b0bd1dab937206c8e0 | 1,212 |
def path(artifactory_server, artifactory_auth):
"""ArtifactoryPath with defined server URL and authentication"""
def f(uri):
return artifactory.ArtifactoryPath(
artifactory_server + uri, auth=artifactory_auth
)
return f | 0eabd46b50812ce219affae5ce0d70ee66c7adc5 | 1,213 |
def get_outmost_polygon_boundary(img):
"""
Given a mask image with the mask describes the overlapping region of
two images, get the outmost contour of this region.
"""
mask = get_mask(img)
mask = cv2.dilate(mask, np.ones((2, 2), np.uint8), iterations=2)
cnts, hierarchy = cv2.findContours(
mask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
# get the contour with largest aera
C = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True)[0]
# polygon approximation
polygon = cv2.approxPolyDP(C, 0.009 * cv2.arcLength(C, True), True)
return polygon | 73896a69809259f3bf395895097d1fb81e05706e | 1,214 |
from apex.parallel import DistributedDataParallel as apex_DDP
def check_ddp_wrapped(model: nn.Module) -> bool:
"""
Checks whether model is wrapped with DataParallel/DistributedDataParallel.
"""
parallel_wrappers = nn.DataParallel, nn.parallel.DistributedDataParallel
# Check whether Apex is installed and if it is,
# add Apex's DistributedDataParallel to list of checked types
try:
parallel_wrappers = parallel_wrappers + (apex_DDP,)
except ImportError:
pass
return isinstance(model, parallel_wrappers) | e14b0b1c09c088b310574e4a772c7dc3bec83ddf | 1,215 |
def adminRecords(request):
"""
管理租赁记录
:param request:
:return: html page
"""
token = request.COOKIES.get('admintoken')
if token is None:
return redirect('/adminLogin/')
result = MysqlConnector.get_one('YachtClub', 'select adminname from admincookies where token=%s', token)
if result is None:
return redirect('/adminLogin/')
return render(request, 'adminRecords.html') | 75a4d1da4e7556de46a455c1304cc80a5660c9ce | 1,216 |
def _make_fold(draw):
"""
Helper strategy for `test_line_fold` case.
The shape of the content will be the same every time:
a
b
c
But the chars and size of indent, plus trailing whitespace on each line
and number of line breaks will all be fuzzed.
"""
return (
draw(make_interspace(symbol_a, 0)),
draw(make_interspace(symbol_b, 1)),
draw(make_interspace(symbol_c, 1)),
) | 1488cafd51b7000ac0fe111b445fcc706876da00 | 1,217 |
import requests
def get_user_jwt() -> str:
"""
Returns:
str: The JWT token of the user
"""
login_data = check_login()
if not login_data:
token = requests.get(
'https://formee-auth.hackersreboot.tech/visitor').json()['token']
return token
if login_data:
token = requests.get('https://formee-auth.hackersreboot.tech/', json={
'username': login_data['username'], 'password': login_data['password']}).json()['token']
return token | 1e52921ba88dfefcf320895f98420a73af3f86ee | 1,218 |
def site_pressure(dset):
"""Get atmospheric pressure from local site measurements
If local atmospheric pressure measurements on a site are not available an alternative model given in configuration
file is used to determine atmospheric pressure.
TODO:
So far only gridded VMF1 model is used, if local pressure data are not available. Which alternative is used,
should be decided via the configuration file. How to check after an alternative model in configuration file?
model_list = config.tech[MODEL].meteorological_data.list???
Args:
dset (Dataset): A Dataset containing model data.
Returns:
numpy.ndarray: Atmospheric pressure for each observation in [hPa]
"""
pressure = np.zeros(dset.num_obs)
i_given = np.zeros(dset.num_obs, dtype=bool)
if "pressure" + (dset.default_field_suffix or "") in dset.fields:
i_given[np.logical_not(np.isnan(dset.pressure))] = True
pressure[i_given] = dset.pressure[i_given]
i_missing = np.logical_not(i_given)
if i_missing.any():
pressure[i_missing] = vmf1_gridded_pressure(dset)[i_missing]
return pressure | 9305afd5eacd265d774d1607e0d7a5bf530b3ce0 | 1,219 |
def add_gradient_penalty(critic, C_input_gp, C_input_fake):
"""Helper Function: Add gradient penalty to enforce Lipschitz continuity
Interpolates = Real - alpha * ( Fake - Real )
Parameters
----------
critic : tf.Sequential
Critic neural network
C_input_gp : np.matrix
Critic input for gradient penalty. Mean values of all similar samples
provided by the Sampler.
C_input_fake : tf.Tensor
Critic input Generator(X)
Returns
-------
tf.tensor(dtype=tf.Float64)
Gradient Penalty
"""
alpha = tf.random.uniform(
shape=[1, int(C_input_fake.shape[1])], minval=0.0, maxval=1.0, dtype=tf.float64
)
interpolates = C_input_gp + alpha * (C_input_fake - C_input_gp)
disc_interpolates = critic(interpolates)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients)))
return tf.reduce_mean((slopes - 1) ** 2) | e015cc7d5e168293c2dd741c95b38fc8aac4fbc8 | 1,220 |
from datetime import datetime
import pytz
def parse_airomon_datetime(airomon_dt: str) -> datetime:
"""Parse string used by airomon and also make timezone aware."""
aileen_tz = pytz.timezone(settings.TIME_ZONE)
try:
dt: datetime = datetime.strptime(airomon_dt, "%Y-%m-%d %H:%M:%S")
dt = dt.astimezone(aileen_tz)
except ValueError:
print(
"%s Warning: could not parse datetime %s, using 1-1-1970 for this one!"
% (settings.TERM_LBL, airomon_dt)
)
dt = datetime(1970, 1, 1, 1, 1, 1, tzinfo=aileen_tz)
return dt | fd91e09ebef4f8af55686d38ef3e763ec546a844 | 1,221 |
import array
def i2nm(i):
"""
Return the n and m orders of the i'th zernike polynomial
========= == == == == == == == == == == == == == == == ===
i 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ...
n-order 0 1 1 2 2 2 3 3 3 3 4 4 4 4 4 ...
m-order 0 -1 1 -2 0 2 -3 -1 1 3 -4 -2 0 2 4 ...
========= == == == == == == == == == == == == == == == ===
"""
ia = array(i)
n = (1 + (sqrt(8 * (ia) + 1) - 3) / 2).astype(int)
ni = n * (n + 1) / 2
m = -n + 2 * (i - ni)
return n, m | 6da858f4cb58a10c480641ce736398d66baf0a7a | 1,222 |
from typing import Dict
from typing import Any
def update_ftov_msgs(
ftov_msgs: jnp.ndarray, updates: Dict[Any, jnp.ndarray], fg_state: FactorGraphState
) -> jnp.ndarray:
"""Function to update ftov_msgs.
Args:
ftov_msgs: A flat jnp array containing ftov_msgs.
updates: A dictionary containing updates for ftov_msgs
fg_state: Factor graph state
Returns:
A flat jnp array containing updated ftov_msgs.
Raises: ValueError if:
(1) provided ftov_msgs shape does not match the expected ftov_msgs shape.
(2) provided name is not valid for ftov_msgs updates.
"""
for names in updates:
data = updates[names]
if names in fg_state.variable_group.names:
variable = fg_state.variable_group[names]
if data.shape != (variable.num_states,):
raise ValueError(
f"Given belief shape {data.shape} does not match expected "
f"shape {(variable.num_states,)} for variable {names}."
)
var_states_for_edges = np.concatenate(
[
wiring_by_type.var_states_for_edges
for wiring_by_type in fg_state.wiring.values()
]
)
starts = np.nonzero(
var_states_for_edges == fg_state.vars_to_starts[variable]
)[0]
for start in starts:
ftov_msgs = ftov_msgs.at[start : start + variable.num_states].set(
data / starts.shape[0]
)
else:
raise ValueError(
"Invalid names for setting messages. "
"Supported names include a tuple of length 2 with factor "
"and variable names for directly setting factor to variable "
"messages, or a valid variable name for spreading expected "
"beliefs at a variable"
)
return ftov_msgs | f54150e5f310905e37820e225e8d29ab6d9e9717 | 1,223 |
from typing import Optional
def normalize_features(
current: np.ndarray,
previous: Optional[np.ndarray],
normalize_samples: int,
method: str = NORM_METHODS.MEAN.value,
clip: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""Normalize features with respect to the past number of normalize_samples.
Parameters
----------
current : numpy array
current features to normalize.
previous : numpy array or None
previous features, not normalized. Used for normalization of current features.
normalize_samples : int
number of past samples considered for normalization
method : str | default is 'mean'
data is normalized via subtraction of the 'mean' or 'median' and
subsequent division by the 'mean' or 'median'. For z-scoring enter
'zscore'.
clip : int | float, optional
value at which to clip on the lower and upper end after normalization.
Useful for artifact rejection and handling of outliers.
Returns
-------
current : numpy array
normalized current features
previous : numpy array
previous features, not normalized.
Raises
------
ValueError
returned if method is not 'mean', 'median' or 'zscore'
"""
if previous is None:
return np.zeros_like(current), current
previous = np.vstack((previous, current))
previous = _transform_previous(
previous=previous, normalize_samples=normalize_samples
)
current, previous = _normalize_and_clip(
current=current,
previous=previous,
method=method,
clip=clip,
description="feature",
)
return current, previous | 8b18f12272eb92ae60631b0c4dcdb138a5596d44 | 1,224 |
def anim(filename, rows: int, cols: int ,
frame_duration: float = 0.1, loop=True) -> Animation:
"""Create Animation object from image of regularly arranged subimages.
+filename+ Name of file in resource directory of image of subimages
regularly arranged over +rows+ rows and +cols+ columns.
+frame_duration+ Seconds each frame of animation should be displayed.
"""
img = pyglet.resource.image(filename)
image_grid = pyglet.image.ImageGrid(img, rows, cols)
animation = image_grid.get_animation(frame_duration, True)
centre_animation(animation)
return animation | 2ced01a961d05e6968c14023a935623bc2011069 | 1,225 |
def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
register_swift_info('vertigo')
conf = global_conf.copy()
conf.update(local_conf)
vertigo_conf = dict()
vertigo_conf['devices'] = conf.get('devices', '/srv/node')
vertigo_conf['execution_server'] = conf.get('execution_server')
vertigo_conf['mc_timeout'] = conf.get('mc_timeout', 5)
vertigo_conf['mc_pipe'] = conf.get('mc_pipe', 'vertigo_pipe')
# vertigo_conf['api_pipe'] = conf.get('mc_pipe', 'api_pipe')
vertigo_conf['metadata_visibility'] = conf.get('metadata_visibility', True)
vertigo_conf['mc_dir'] = conf.get('mc_dir', '/home/docker_device/vertigo/scopes')
vertigo_conf['cache_dir'] = conf.get('cache_dir', '/home/docker_device/cache/scopes')
vertigo_conf['mc_container'] = conf.get('mc_container', 'microcontroller')
vertigo_conf['mc_dependency'] = conf.get('mc_dependency', 'dependency')
''' Load storlet parameters '''
configParser = RawConfigParser()
configParser.read(conf.get('__file__'))
storlet_parameters = configParser.items('filter:storlet_handler')
for key, val in storlet_parameters:
vertigo_conf[key] = val
""" Load Storlets Gateway configuration """
configParser = RawConfigParser()
configParser.read(vertigo_conf['storlet_gateway_conf'])
additional_items = configParser.items("DEFAULT")
for key, val in additional_items:
vertigo_conf[key] = val
""" Load Storlets Gateway class """
module_name = vertigo_conf.get('storlet_gateway_module', 'stub')
gateway_class = load_gateway(module_name)
vertigo_conf['storlets_gateway_module'] = gateway_class
"""
Register Lua script to retrieve policies in a single redis call
"""
vertigo_conf['redis_host'] = conf.get('redis_host', 'controller')
vertigo_conf['redis_port'] = int(conf.get('redis_port', 6379))
vertigo_conf['redis_db'] = int(conf.get('redis_db', 0))
if vertigo_conf['execution_server'] == 'proxy':
r = redis.StrictRedis(vertigo_conf['redis_host'],
vertigo_conf['redis_port'],
vertigo_conf['redis_db'])
lua = """
local t = {}
if redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])
elseif redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])
end
return t"""
lua_sha = r.script_load(lua)
vertigo_conf['LUA_get_mc_sha'] = lua_sha
def swift_vertigo(app):
return VertigoHandlerMiddleware(app, global_conf, vertigo_conf)
return swift_vertigo | ca9c72f2237cfb2054ffc2b38038c75c96679ade | 1,226 |
import statistics
def get_review_score_fields(call, proposals):
"""Return a dictionary of the score banner fields in the reviews.
Compute the score means and stdevs. If there are more than two score
fields, then also compute the mean of the means and the stdev of the means.
This is done over all finalized reviews for each proposal.
Store the values in the proposal document.
"""
fields = dict([(f['identifier'], f)
for f in call['review']
if f.get('banner') and f['type'] == constants.SCORE])
for proposal in proposals:
reviews = utils.get_docs_view('reviews', 'proposal',
proposal['identifier'])
# Only include finalized reviews in the calculation.
reviews = [r for r in reviews if r.get('finalized')]
scores = dict([(id, list()) for id in fields])
for review in reviews:
for id in fields:
value = review['values'].get(id)
if value is not None: scores[id].append(float(value))
proposal['scores'] = dict()
for id in fields:
proposal['scores'][id] = d = dict()
d['n'] = len(scores[id])
try:
d['mean'] = round(statistics.mean(scores[id]), 1)
except statistics.StatisticsError:
d['mean'] = None
try:
d['stdev'] = round(statistics.stdev(scores[id]), 1)
except statistics.StatisticsError:
d['stdev'] = None
if len(fields) >= 2:
mean_scores = [d['mean'] for d in proposal['scores'].values()
if d['mean'] is not None]
try:
mean_means = round(statistics.mean(mean_scores), 1)
except statistics.StatisticsError:
mean_means = None
proposal['scores']['__mean__'] = mean_means
try:
stdev_means = round(statistics.stdev(mean_scores), 1)
except statistics.StatisticsError:
stdev_means = None
proposal['scores']['__mean__'] = mean_means
proposal['scores']['__stdev__'] = stdev_means
return fields | bda5899a105942a456872d0bacadaf124832cd65 | 1,227 |
def tokenize(text):
"""
Tokenize and normalize
"""
tokens = nltk.word_tokenize(text)
lemmatizer = nltk.WordNetLemmatizer()
clean_tokens = [lemmatizer.lemmatize(w).lower().strip() for w in tokens]
return clean_tokens | 2485181433208ee871e881312400806539d5bc73 | 1,228 |
def _makeSSDF(row, minEvents):
"""
Function to change form of TRDF for subpace creation
"""
index = range(len(row.Clust))
columns = [x for x in row.index if x != 'Clust']
DF = pd.DataFrame(index=index, columns=columns)
DF['Name'] = ['SS%d' % x for x in range(len(DF))] # name subspaces
# Initialize columns for future use
DF['Events'] = object
DF['AlignedTD'] = object
DF['SVD'] = object
DF['UsedSVDKeys'] = object
DF['FracEnergy'] = object
DF['SVDdefined'] = False
DF['SampleTrims'] = [{} for x in range(len(DF))]
DF['Threshold'] = np.float
DF['SigDimRep'] = object
DF['FAS'] = object
DF['NumBasis'] = int
DF['Offsets'] = object
DF['Stats'] = object
DF['MPtd'] = object
DF['MPfd'] = object
DF['Channels'] = object
DF['Station'] = row.Station
DF = DF.astype(object)
for ind, row2 in DF.iterrows():
evelist = row.Clust[ind]
evelist.sort()
DF['Events'][ind] = evelist
DF['numEvents'][ind] = len(evelist)
DF['MPtd'][ind] = _trimDict(row, 'MPtd', evelist)
DF['MPfd'][ind] = _trimDict(row, 'MPfd', evelist)
DF['Stats'][ind] = _trimDict(row, 'Stats', evelist)
DF['Channels'][ind] = _trimDict(row, 'Channels', evelist)
# only keep subspaces that meet min req, dont renumber
DF = DF[[len(x) >= minEvents for x in DF.Events]]
# DF.reset_index(drop=True, inplace=True)
return DF | 77fb59b0e385d51d06fac1ff64ba12331d514d1d | 1,229 |
def concatenate_constraints(original_set, additional_set):
"""
Method for concatenating sets of linear constraints.
original_set and additional_set are both tuples of
for (C, b, n_eq). Output is a concatenated tuple of
same form.
All equality constraints are always kept on top.
"""
C_org, b_org, n_org = original_set
C_add, b_add, n_add = additional_set
if n_add > 0:
C_out = np.insert(C_org, n_org, C_add[:n_add, :], axis=0)
C_out = np.concatenate((C_out, C_add[n_add:, :]))
b_out = np.insert(b_org, n_org, b_add[:n_add])
b_out = np.concatenate((b_out, b_add[n_add:]))
else:
C_out = np.concatenate((C_org, C_add))
b_out = np.concatenate((b_org, b_add))
n_out = n_org + n_add
return C_out, b_out, n_out | 6b0cc1c75d00ae7b3737638c45a21bc8c609ddb1 | 1,230 |
import signal
def _isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL | 10b814bba12c04cbc6fec08c7783581876f56b6b | 1,231 |
import scipy
def downsampling(conversion_rate,data,fs):
"""
ダウンサンプリングを行う.
入力として,変換レートとデータとサンプリング周波数.
アップサンプリング後のデータとサンプリング周波数を返す.
"""
# 間引くサンプル数を決める
decimationSampleNum = conversion_rate-1
# FIRフィルタの用意をする
nyqF = (fs/conversion_rate)/2.0 # 変換後のナイキスト周波数
cF = (fs/conversion_rate/2.0-500.)/nyqF # カットオフ周波数を設定(変換前のナイキスト周波数より少し下を設定)
taps = 511 # フィルタ係数(奇数じゃないとだめ)
b = scipy.signal.firwin(taps, cF) # LPFを用意
#フィルタリング
data = scipy.signal.lfilter(b,1,data)
#間引き処理
downData = []
for i in range(0,len(data),decimationSampleNum+1):
downData.append(data[i])
return (downData,fs/conversion_rate) | add6768d32cc7675eaecf1e37c5d901f1244702a | 1,232 |
from typing import Union
def get_client_cache_key(
request_or_attempt: Union[HttpRequest, AccessBase], credentials: dict = None
) -> str:
"""
Build cache key name from request or AccessAttempt object.
:param request_or_attempt: HttpRequest or AccessAttempt object
:param credentials: credentials containing user information
:return cache_key: Hash key that is usable for Django cache backends
"""
if isinstance(request_or_attempt, AccessBase):
username = request_or_attempt.username
ip_address = request_or_attempt.ip_address
user_agent = request_or_attempt.user_agent
else:
username = get_client_username(request_or_attempt, credentials)
ip_address = get_client_ip_address(request_or_attempt)
user_agent = get_client_user_agent(request_or_attempt)
filter_kwargs_list = get_client_parameters(username, ip_address, user_agent)
return make_cache_key_list(filter_kwargs_list) | 8d9a128b326a8ab7c320f73f49a313f30d6cd268 | 1,233 |
def loadMaterials(matFile):
"""
Loads materials into Tom's code from external file of all applicable materials.
These are returned as a dictionary.
"""
mats = {}
name, no, ne, lto, lte, mtype = np.loadtxt(matFile, dtype=np.str, unpack=True)
no = np.array(list(map(np.float, no)))
ne = np.array(list(map(np.float, ne)))
lto = 1.0e-4 * np.array(list(map(np.float, lto)))
lte = 1.0e-4 * np.array(list(map(np.float, lte)))
for (i,n) in enumerate(name):
mats[n] = tm.material(no[i], ne[i], lto[i], lte[i], n, mtype[i])
return mats | 650b04a27741777e5e344696e123d4fd669a5b28 | 1,234 |
def prepend_with_baseurl(files, base_url):
"""prepend url to beginning of each file
Parameters
------
files (list): list of files
base_url (str): base url
Returns
------
list: a list of files with base url pre-pended
"""
return [base_url + file for file in files] | 4c29b3e9230239c1ff8856c707253608ce2503cd | 1,235 |
def streamplot(
x,
y,
u,
v,
p=None,
density=1,
color="#1f77b4",
line_width=None,
alpha=1,
arrow_size=7,
min_length=0.1,
start_points=None,
max_length=4.0,
integration_direction="both",
arrow_level="underlay",
**kwargs,
):
"""Draws streamlines of a vector field.
Parameters
----------
x, y : 1d arrays
an evenly spaced grid.
u, v : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
p : bokeh.plotting.Figure instance, default None
Figure to populate with glyphs. If None, create a new figure.
density : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 30x30 grid---density linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
color : str or 2d array, default '#1f77b4' (Bokeh default color)
Streamline color. When given an array with the same shape as
velocities, color values are converted to colors using cmap.
line_width : numeric or 2d array, default None
vary linewidth when given a 2d array with the same shape as velocities. If None, scale linewidth with speed.
arrow_size : float
Factor scale arrow size.
min_length : float
Minimum length of streamline in axes coordinates.
start_points: Nx2 array
Coordinates of starting points for the streamlines.
In data coordinates, the same as the ``x`` and ``y`` arrays.
max_length : float
Maximum length of streamline in axes coordinates.
integration_direction : ['forward', 'backward', 'both']
Integrate the streamline in forward, backward or both directions.
arrow_level : str
Either 'underlay' or 'overlay'.
kwargs :
All other kwargs are passed to bokeh.plotting.figure() when
generating the figure.
Returns
-------
bokeh.plotting.Figure instance populated with streamplot.
Notes
-----
.. Adapted from matplotlib.streamplot.streamplot.py.
"""
if p is None:
p = _baseplot(p, **kwargs)
# Ensure plot fits stream lines
p.x_range = bokeh.models.Range1d(x[0], x[-1])
p.y_range = bokeh.models.Range1d(y[0], y[-1])
if line_width is None:
# Compute speed
speed = np.sqrt(u ** 2 + v ** 2)
# Make linewidths proportional to speed, with min width 0.5 and max 3
line_width = 0.5 + 2.5 * speed / speed.max()
xs, ys, line_widths, arrowtails, arrowheads = _streamlines(
x,
y,
u,
v,
density=density,
line_width=line_width,
min_length=min_length,
start_points=start_points,
max_length=max_length,
integration_direction=integration_direction,
)
def _draw_arrows():
for tail, head in zip(arrowtails, arrowheads):
p.add_layout(
bokeh.models.Arrow(
line_alpha=0,
end=bokeh.models.NormalHead(fill_color=color, line_alpha=0, size=7),
x_start=tail[0],
y_start=tail[1],
x_end=head[0],
y_end=head[1],
)
)
if arrow_level == "underlay":
_draw_arrows()
p.multi_line(xs, ys, color=color, line_width=line_widths, line_alpha=alpha)
else:
p.multi_line(xs, ys, color=color, line_width=line_widths, line_alpha=alpha)
_draw_arrows()
return p | 01a35e266e14ac7228734ddb9d673e2f789d3e08 | 1,236 |
def _loc(df, start, stop, include_right_boundary=True):
"""
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> _loc(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> _loc(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> _loc(df, 1, 3, include_right_boundary=False)
x
1 10
2 20
2 30
"""
result = df.loc[start:stop]
if not include_right_boundary:
right_index = result.index.get_slice_bound(stop, 'left', 'loc')
result = result.iloc[:right_index]
return result | 1cc3c2b507ed18d18659fb097765ab450972c05a | 1,237 |
def compare_system_and_attributes_faulty_systems(self):
"""compare systems and associated attributes"""
# compare - systems / attributes
self.assertTrue(System.objects.filter(system_name='system_csv_31_001').exists())
self.assertTrue(System.objects.filter(system_name='system_csv_31_003').exists())
self.assertTrue(System.objects.filter(system_name='system_csv_31_006').exists())
# compare - systems / attributes
self.assertEqual(
System.objects.get(system_name='system_csv_31_001').analysisstatus,
Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_003').analysisstatus,
Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_006').analysisstatus,
Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_001').systemstatus,
Systemstatus.objects.get(systemstatus_name='systemstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_003').systemstatus,
Systemstatus.objects.get(systemstatus_name='systemstatus_1'),
)
self.assertEqual(
System.objects.get(system_name='system_csv_31_006').systemstatus,
Systemstatus.objects.get(systemstatus_name='systemstatus_1'),
)
# return to test function
return self | 459b853eba3a2705450ac9a33fe14844940bf4c8 | 1,238 |
def get_regions(contig,enzymes):
"""return loci with start and end locations"""
out_sites = []
enz_1 = [enz for enz in Restriction.AllEnzymes if "%s"%enz == enzymes[0]][0]
enz_2 = [enz for enz in Restriction.AllEnzymes if "%s"%enz == enzymes[1]][0]
enz_1_sites = enz_1.search(contig.seq)
enz_2_sites = enz_2.search(contig.seq)
combined_sites = sorted(enz_1_sites + enz_2_sites)
for i in range(len(combined_sites)):
site_A = combined_sites[i]
try:
site_B = combined_sites[i+1]
except IndexError:
break
if site_B - site_A < 30:
continue
if site_A in enz_1_sites and site_B in enz_2_sites:
out_sites.append((site_A + 1, site_B - len(enz_2.site)))
elif site_A in enz_2_sites and site_B in enz_1_sites:
out_sites.append((site_A + 1, site_B - len(enz_1.site)))
return out_sites | b34cab04e0b790b01418555c74c4d810b7184e47 | 1,239 |
def getHighContrast(j17, j18, d17, d18):
"""
contrast enhancement through stacking
"""
summer = j17 + j18
summer = summer / np.amax(summer)
winter = d17 + d18
winter = winter / np.amax(winter)
diff = winter * summer
return diff | cd462aac5c0568f84d64f3020718ec601063044f | 1,240 |
def get_bounding_box(dataframe, dataIdentifier):
"""Returns the rectangle in a format (min_lat, max_lat, min_lon, max_lon)
which bounds all the points of the ´dataframe´.
Parameters
----------
dataframe : pandas.DataFrame
the dataframe with the data
dataIdentifier : DataIdentifier
the identifier of the dataframe to be used
"""
b_box = (getattr(dataframe, dataIdentifier.latitude).min(),
getattr(dataframe, dataIdentifier.latitude).max(),
getattr(dataframe, dataIdentifier.longitude).min(),
getattr(dataframe, dataIdentifier.longitude).max())
return b_box | 6989118af8db36cc38fd670f5cd7506859d2150e | 1,241 |
def get_file_download_response(dbfile):
"""
Create the HttpResponse for serving a file.
The file is not read our output - instead, by setting `X-Accel-Redirect`-
header, the web server (nginx) directly serves the file.
"""
mimetype = dbfile.mimeType
response = HttpResponse(content_type=mimetype)
response["Content-Disposition"] = "inline; filename={0}".format(
to_safe_name(dbfile.name)
)
response['X-Accel-Redirect'] = "/{0}".format(dbfile.path)
return response | 93f7e57daaec6a11e5241682ba976e8d68a91acf | 1,242 |
import time
def keyWait():
"""Waits until the user presses a key.
Then returns a L{KeyDown} event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
@rtype: L{KeyDown}
"""
while 1:
for event in get():
if event.type == 'KEYDOWN':
return event
if event.type == 'QUIT':
# convert QUIT into alt+F4
return KeyDown('F4', '', True, False, True, False, False)
time.sleep(.001) | 60ee6ad29c215585aef03237a23f15581deb8f5e | 1,243 |
from datetime import datetime
def create_comentarios_instancia(id_instancia):
"""
@retorna un ok en caso de que se halla ejecutado la operacion
@except status 500 en caso de presentar algun error
"""
if request.method == 'POST':
try:
values = json.loads( request.data.decode('8859') )
mensaje = values['com_mensaje']
autor = values['com_usuario']
fecha = datetime.today()
comentario = comentarios_instancia_curso(instancias_curso_id = id_instancia , mensaje = mensaje , autor = autor, fecha = fecha)
session.add(comentario)
session.commit()
except Exception, e:
session.rollback()
return "Operacion No se pudo llevar a cabo", 500
return "ok"
else:
return "Operacion No se pudo llevar a cabo", 500 | 58a49f4c76976bf0a13f07f6e6de73f358f34e4a | 1,244 |
import argparse
def callEvalGen(args: argparse.Namespace):
"""
Method for evaluation of the keywords generation task on the Inspec dataset.
:param args: User arguments.
:type args: argparse.Namespace
"""
return evalOn(args, "uncontr") | 50af90b6ec0bbbe745db0530715b27c780e74a48 | 1,245 |
async def osfrog(msg, mobj):
"""
Patch 7.02: help string was removed from Captain's Mode
"""
osfrogs = [
"Added Monkey King to the game",
"Reduced Lone Druid's respawn talent -50s to -40s",
]
return await client.send_message(mobj.channel, choice(osfrogs)) | f1b5907cad42d7e6d6021e447ab8cd6dd91429e5 | 1,246 |
def _add_normalizing_vector_point(mesh, minpt, maxpt):
"""
This function allows you to visualize all meshes in their size relative to each other
It is a quick simple hack: by adding 2 vector points at the same x coordinates at the
extreme left and extreme right of the largest .stl mesh, all the meshes are displayed
with the same scale.
input: [mesh], minpoint coordinates, maxpoint coordinates
output: [mesh] with 2 added coordinate points
"""
newmesh = Mesh(np.zeros(mesh.vectors.shape[0]+2, dtype=Mesh.dtype))
# newmesh.vectors = np.vstack([mesh.vectors,
# np.array([ [[0,maxpt,0], [0,maxpt,0], [0,maxpt,0]],
# [[0,minpt,0], [0,minpt,0], [0,minpt,0]] ], float) ])
newmesh.vectors = np.vstack([mesh.vectors,
np.array([ [[0,0,maxpt], [0,0,maxpt], [0,0,maxpt]],
[[0,0,minpt], [0,0,minpt], [0,0,minpt]] ], float) ])
return newmesh | a60e1f0dd4bc6c60e40096cb4412b47a5a3d139a | 1,247 |
import numpy
def radii_ratio(collection):
"""
The Flaherty & Crumplin (1992) index, OS_3 in Altman (1998).
The ratio of the radius of the equi-areal circle to the radius of the MBC
"""
ga = _cast(collection)
r_eac = numpy.sqrt(pygeos.area(ga) / numpy.pi)
r_mbc = pygeos.minimum_bounding_radius(ga)
return r_eac / r_mbc | 16008df64f999b615f855e92f727638813434e98 | 1,248 |
from datetime import datetime
def create_jwt(project_id, private_key_file, algorithm):
"""Create a JWT (https://jwt.io) to establish an MQTT connection."""
token = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'aud': project_id
}
with open(private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm) | e365cc67a3587a64b38276ce95e8e0c389e54314 | 1,249 |
def games(engine1, engine2, number_of_games):
"""Let engine1 and engine2 play several games against each other.
Each begin every second game."""
engine1_wins = 0
engine2_wins = 0
draws = 0
for n in range(number_of_games):
if n % 2:
result = game(engine1, engine2, True)
else:
result = game(engine1, engine2, False)
if result == "engine1":
engine1_wins += 1
elif result == "engine2":
engine2_wins += 1
else:
draws += 1
return ("engine1 wins: " + str(engine1_wins) +
" engine2 wins: " + str(engine2_wins) + " draws: " + str(draws)) | f520a08214d1ad063f747b01582b2dbfc94d5d9e | 1,250 |
def tissue2line(data, line=None):
"""tissue2line
Project tissue probability maps to the line by calculating the probability of each tissue type in each voxel of the 16x720 beam and then average these to get a 1x720 line. Discrete tissues are assigned by means of the highest probability of a particular tissue type.
Parameters
----------
data: list,numpy.ndarray,str
for tissue data: list of three numpy array/nifti images/strings describing the probability of white matter/gray matter and CSF
line: str,nibabel.Nifti1Image,numpy.ndarray
used for the direction of the line and should have the same dimensions as `data`. Generally this is the output from create_line_from_slice
Returns
----------
numpy.ndarray
(1,720) array of your `data` in the line
"""
# load in reference line data
if isinstance(line, str):
ref = nb.load(line).get_fdata()
elif isinstance(line, nb.Nifti1Image):
ref = line.get_fdata()
elif isinstance(line, np.ndarray):
ref = line
else:
raise ValueError("Unknown input type for line; should be a string, nifti-image, or numpy array")
if isinstance(data, list):
# we have receive a list, assuming tissue probability maps.
if len(data) > 3:
raise ValueError(f'Data contains {len(data)} items, this should be three: 1) WM prob, 2) GM prob, 3) CSF prob')
if isinstance(data[0], str):
input = [nb.load(i).get_fdata() for i in data]
elif isinstance(data[0], nb.Nifti1Image):
input = [i.get_fdata() for i in data]
elif isinstance(data[0], np.ndarray):
input = data
# remove existing 4th dimension
input = [np.squeeze(i, axis=3) for i in input if len(i.shape) == 4]
for i in input:
if i.shape != ref.shape:
raise ValueError(f"Dimensions of line [{ref.shape}] do not match dimension of input seg [{i.shape}]")
# put wm/gm/csf in three channels of a numpy array
prob_stack = np.dstack([input[0],input[1],input[2]])
prob_stack_avg = np.average(prob_stack, axis=1)
# normalize averages between 0-1
scaler = MinMaxScaler()
scaler.fit(prob_stack_avg)
avg_norm = scaler.transform(prob_stack_avg)
output = []
lut = {'wm':2,'gm':1,'csf':0}
# avg_norm has 3 columns; 1st = WM, 2nd = GM, 3rd = CSF
for i,r in enumerate(avg_norm):
max_val = np.amax(r)
# check tissue type only if non-zero value. If all probabilities are 0 is should be set to zero regardless
if max_val == 0:
output.append(lut['csf'])
else:
# make list of each row for nicer indexing
idx = list(r).index(max_val)
if idx == 0:
# type = 'wm' = '1' in nighres segmentation
output.append(lut['wm'])
elif idx == 1:
# type = 'gm' = '2' in nighres segmentation
output.append(lut['gm'])
elif idx == 2:
# type = 'csf' = '0' in nighres segmentation
output.append(lut['csf'])
output = np.array(output)[:,np.newaxis]
return output | 3966f789e1093e11e4b31deda0f7d43f753007b0 | 1,251 |
def get_version(pyngrok_config=None):
"""
Get a tuple with the ``ngrok`` and ``pyngrok`` versions.
:param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,
overriding :func:`~pyngrok.conf.get_default()`.
:type pyngrok_config: PyngrokConfig, optional
:return: A tuple of ``(ngrok_version, pyngrok_version)``.
:rtype: tuple
"""
if pyngrok_config is None:
pyngrok_config = conf.get_default()
ngrok_version = process.capture_run_process(pyngrok_config.ngrok_path, ["--version"]).split("version ")[1]
return ngrok_version, __version__ | 256216926a6c91a8000f8b823202cde576af3a67 | 1,252 |
def stat_cleaner(stat: str) -> int:
"""Cleans and converts single stat.
Used for the tweets, followers, following, and likes count sections.
Args:
stat: Stat to be cleaned.
Returns:
A stat with commas removed and converted to int.
"""
return int(stat.replace(",", "")) | cb6b6035ab21871ca5c00d5d39d9efe87e0acc89 | 1,253 |
import os
import json
def load_image_ids(img_root, split_dir):
"""images in the same directory are in the same split"""
pathXid = []
img_root = os.path.join(img_root, split_dir)
for name in os.listdir(img_root):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(img_root, name),
idx))
if split_dir == 'val2014':
print("Place the features of minival in the front of val2014 tsv.")
# Put the features of 5000 minival images in front.
minival_img_ids = set(json.load(open('data/mscoco_imgfeat/coco_minival_img_ids.json')))
a, b = [], []
for item in pathXid:
img_id = item[1]
if img_id in minival_img_ids:
a.append(item)
else:
b.append(item)
assert len(a) == 5000
assert len(a) + len(b) == len(pathXid)
pathXid = a + b
assert len(pathXid) == 40504
return pathXid | 485675489805a0e3f4ac4dcab2eca8a40992c044 | 1,254 |
def do(ARGV):
"""Allow to check whether the exception handlers are all in place.
"""
if len(ARGV) != 3: return False
elif ARGV[1] != "<<TEST:Exceptions/function>>" \
and ARGV[1] != "<<TEST:Exceptions/on-import>>": return False
if len(ARGV) < 3: return False
exception = ARGV[2]
if exception == "KeyboardInterrupt": raise KeyboardInterrupt()
elif exception == "AssertionError": raise AssertionError()
elif exception == "Exception": raise Exception()
# If we did not raise an exception here, we didn't do anything
print("No exception was triggered.")
return False | 56b83d119f74a00f1b557c370d75fb9ff633d691 | 1,255 |
def get_available_language_packs():
"""Get list of registered language packs.
:return list:
"""
ensure_autodiscover()
return [val for (key, val) in registry.registry.items()] | faf3c95ff808c1e970e49c56feb5ad1f61623053 | 1,256 |
import ctypes
def topo_star(jd_tt, delta_t, star, position, accuracy=0):
"""
Computes the topocentric place of a star at 'date', given its
catalog mean place, proper motion, parallax, and radial velocity.
Parameters
----------
jd_tt : float
TT Julian date for topocentric place.
delta_t : float
Difference TT-UT1 at 'date', in seconds of time.
star : CatEntry
Instance of CatEntry type object containing catalog data for
the object in the ICRS.
position : OnSurface
Instance of OnSurface type object specifying the position of
the observer.
accuracy : {0, 1}, optional
Code specifying the relative accuracy of the output
position.
= 0 ... full accuracy (default)
= 1 ... reduced accuracy
Returns
-------
(ra, dec) : tuple of floats
Topocentric (right ascension in hours, declination in
degrees), referred to true equator and equinox of date
'jd_tt'.
References
----------
.. [R1] Bangert, J. et. al. (2011), 'User's Guide to NOVAS
Version C3.1', C62-C63.
.. [R2] Explanatory Supplement to the Astronomical Almanac
(1992), Chapter 3.
"""
if jd_tt < 0.0:
raise ValueError(_neg_err.format(name='jd_tt'))
if accuracy not in [0, 1]:
raise ValueError(_option_err.format(name='accuracy', allowed=[0, 1]))
_topo_star = novaslib.topo_star
_topo_star.argtypes = (ctypes.c_double, ctypes.c_double,
ctypes.POINTER(CatEntry), ctypes.POINTER(OnSurface),
ctypes.c_short, ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double))
_topo_star.restype = ctypes.c_short
_topo_star.errcheck = _check_c_errors
_topo_star.c_errors = {
1: (ValueError, "from C function 'topo_star': Invalid value of 'where' in ctypes.Structure 'location'"),
11: (ValueError, "from C function 'make_object': invalid value of 'type'"),
12: (ValueError, "from C function 'make_object': 'number' out of range"),
13: (InitializationError, "from C function 'make_object': Initialization of 'cel_obj' failed (object name)."),
14: (InitializationError, "from C function 'make_object': Initialization of 'cel_obj' failed (catalog name)."),
15: (ValueError, "from C function 'make_object': 'name' is out of string bounds."),
21: (ValueError, "from C function 'place': invalid value of 'coord_sys'"),
22: (ValueError, "from C function 'place': invalid value of 'accuracy'"),
23: (ValueError, "from C function 'place': Earth is the observed object, and the observer is either at the geocenter or on the Earth's surface (not permitted)")
}
ra = ctypes.c_double()
dec = ctypes.c_double()
_topo_star(jd_tt, delta_t, ctypes.byref(star), ctypes.byref(position),
accuracy, ctypes.byref(ra), ctypes.byref(dec))
return (ra.value, dec.value) | fba937116b5f63b450fb028cc68a26e0e10305ae | 1,257 |
def py_multiplicative_inverse(a, n):
"""Multiplicative inverse of a modulo n (in Python).
Implements extended Euclidean algorithm.
Args:
a: int-like np.ndarray.
n: int.
Returns:
Multiplicative inverse as an int32 np.ndarray with same shape as a.
"""
batched_a = np.asarray(a, dtype=np.int32)
n = np.asarray(n, dtype=np.int32)
batched_inverse = []
for a in np.nditer(batched_a):
inverse = 0
new_inverse = 1
remainder = n
new_remainder = a
while new_remainder != 0:
quotient = remainder // new_remainder
(inverse, new_inverse) = (new_inverse, inverse - quotient * new_inverse)
(remainder, new_remainder) = (new_remainder,
remainder - quotient * new_remainder)
if remainder > 1:
raise ValueError(
'Inverse for {} modulo {} does not exist.'.format(a, n))
if inverse < 0:
inverse += n
batched_inverse.append(inverse)
return np.asarray(batched_inverse, dtype=np.int32).reshape(batched_a.shape) | 87f4e21f9f8b5a9f10dbf4ec80128a37c1fa912c | 1,258 |
import logging
import sys
def _get_simconffile(args):
"""
Get experiment config file name from command line
"""
logger = logging.getLogger('fms')
try:
simconffile = args[1]
except IndexError:
logger.critical("Missing simulation config file name.")
sys.exit(2)
return simconffile | c71e0c58fa5929051f66836ad8fc45c361f94791 | 1,259 |
def resample_nearest_neighbour(input_tif, extents, new_res, output_file):
"""
Nearest neighbor resampling and cropping of an image.
:param str input_tif: input geotiff file path
:param list extents: new extents for cropping
:param float new_res: new resolution for resampling
:param str output_file: output geotiff file path
:return: dst: resampled image
:rtype: ndarray
"""
dst, resampled_proj, src, _ = _crop_resample_setup(extents, input_tif,
new_res, output_file)
# Do the work
gdal.ReprojectImage(src, dst, '', resampled_proj,
gdalconst.GRA_NearestNeighbour)
return dst.ReadAsArray() | 107bcb72aff9060d024ff00d86b164cf41078630 | 1,260 |
def harvester_api_info(request, name):
"""
This function returns the pretty rendered
api help text of an harvester.
"""
harvester = get_object_or_404(Harvester, name=name)
api = InitHarvester(harvester).get_harvester_api()
response = api.api_infotext()
content = response.data[harvester.name].replace('\n', '<br>')
return HttpResponse(content, content_type='text/plain') | 6b02168d7c77414c57ca74104ff93dae1e698e30 | 1,261 |
import sqlite3
def init_db():
"""Open SQLite database, create facebook table, return connection."""
db = sqlite3.connect('facebook.sql')
cur = db.cursor()
cur.execute(SQL_CREATE)
db.commit()
cur.execute(SQL_CHECK)
parse = list(cur.fetchall())[0][0] == 0
return db, cur, parse | 61d8cc968c66aaddfc55ef27ee02dec13c4b28f2 | 1,262 |
import subprocess
def prime_gen():
"""Returns prime based on 172 bit range. Results is 44 char"""
x = subprocess.run(
['openssl', 'prime', '-generate', '-bits', '172', '-hex'],
stdout=subprocess.PIPE)
return x.stdout[:-1] | 24c38f0c183367bac9e1a2e04f11d0d58dd503ab | 1,263 |
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None | bf6bc2f7b0a7bb9eaa23a0c28686bfe16a8e3ced | 1,264 |
def module_for_category( category ):
"""Return the OpenGL.GL.x module for the given category name"""
if category.startswith( 'VERSION_' ):
name = 'OpenGL.GL'
else:
owner,name = category.split( '_',1)
if owner.startswith( '3' ):
owner = owner[1:]
name = 'OpenGL.GL.%s.%s'%( owner,name )
return __import__( name, {}, {}, name.split( '.' )) | 0e88467a1dd7f5b132d46a9bdc99765c274f69f3 | 1,265 |
import os
def check_file_location(file_path, function, file_ext='', exists=False):
"""Function to check whether a file exists and has the correct file extension"""
folder, file, ext = '', '', ''
if file_path == '':
exit_prompt('Error: Could not parse path to {} file'.format(function))
try:
file, ext = os.path.splitext(os.path.basename(file_path))
folder = os.path.dirname(file_path)
except:
exit_prompt('Error: Could not parse path to {} file'.format(function))
if file_ext != '' and ext != file_ext:
exit_prompt('Error: The {} file should have the extension {}'.format(function, file_ext))
if exists and not os.path.isfile(os.path.join(folder, file + ext)):
exit_prompt('Error: The specified {} file cannot be found'.format(function))
return folder, file, ext | 5bdca274d5fe916c73be97b40c0ffcbdebeaacfe | 1,266 |
def timestamp() -> str:
"""generate formatted timestamp for the invocation moment"""
return dt.now().strftime("%d-%m-%Y %H:%M:%S") | 4f5e3de7f8d0027a210055850c4fa2b4764a39b2 | 1,267 |
def sde(trains, events=None, start=0 * pq.ms, stop=None,
kernel_size=100 * pq.ms, optimize_steps=0,
minimum_kernel=10 * pq.ms, maximum_kernel=500 * pq.ms,
kernel=None, time_unit=pq.ms, progress=None):
""" Create a spike density estimation plot.
The spike density estimations give an estimate of the instantaneous
rate. Optionally finds optimal kernel size for given data.
:param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists.
:param dict events: A dictionary (with the same indices as ``trains``)
of Event objects or lists of Event objects. In case of lists,
the first event in the list will be used for alignment. The events
will be at time 0 on the plot. If None, spike trains are used
unmodified.
:param start: The desired time for the start of the first bin. It
will be recalculated if there are spike trains which start later
than this time. This parameter can be negative (which could be
useful when aligning on events).
:type start: Quantity scalar
:param stop: The desired time for the end of the last bin. It will
be recalculated if there are spike trains which end earlier
than this time.
:type stop: Quantity scalar
:param kernel_size: A uniform kernel size for all spike trains.
Only used if optimization of kernel sizes is not used (i.e.
``optimize_steps`` is 0).
:type kernel_size: Quantity scalar
:param int optimize_steps: The number of different kernel sizes tried
between ``minimum_kernel`` and ``maximum_kernel``.
If 0, ``kernel_size`` will be used.
:param minimum_kernel: The minimum kernel size to try in optimization.
:type minimum_kernel: Quantity scalar
:param maximum_kernel: The maximum kernel size to try in optimization.
:type maximum_kernel: Quantity scalar
:param kernel: The kernel function or instance to use, should accept
two parameters: A ndarray of distances and a kernel size.
The total area under the kernel function should be 1.
Automatic optimization assumes a Gaussian kernel and will
likely not produce optimal results for different kernels.
Default: Gaussian kernel
:type kernel: func or :class:`spykeutils.signal_processing.Kernel`
:param Quantity time_unit: Unit of X-Axis.
:param progress: Set this parameter to report progress.
:type progress: :class:`spykeutils.progress_indicator.ProgressIndicator`
"""
if not progress:
progress = ProgressIndicator()
start.units = time_unit
if stop:
stop.units = time_unit
kernel_size.units = time_unit
minimum_kernel.units = time_unit
maximum_kernel.units = time_unit
if kernel is None:
kernel = signal_processing.GaussianKernel(100 * pq.ms)
# Align spike trains
for u in trains:
if events:
trains[u] = rate_estimation.aligned_spike_trains(
trains[u], events)
# Calculate spike density estimation
if optimize_steps:
steps = sp.logspace(sp.log10(minimum_kernel),
sp.log10(maximum_kernel),
optimize_steps) * time_unit
sde, kernel_size, eval_points = \
rate_estimation.spike_density_estimation(
trains, start, stop,
optimize_steps=steps, kernel=kernel,
progress=progress)
else:
sde, kernel_size, eval_points = \
rate_estimation.spike_density_estimation(
trains, start, stop,
kernel_size=kernel_size, kernel=kernel,
progress=progress)
progress.done()
if not sde:
raise SpykeException('No spike trains for SDE!')
# Plot
win_title = 'Kernel Density Estimation'
win = PlotDialog(toolbar=True, wintitle=win_title)
pW = BaseCurveWidget(win)
plot = pW.plot
plot.set_antialiasing(True)
for u in trains:
if u and u.name:
name = u.name
else:
name = 'Unknown'
curve = make.curve(
eval_points, sde[u],
title='%s, Kernel width %.2f %s' %
(name, kernel_size[u], time_unit.dimensionality.string),
color=helper.get_object_color(u))
plot.add_item(curve)
plot.set_axis_title(BasePlot.X_BOTTOM, 'Time')
plot.set_axis_unit(BasePlot.X_BOTTOM, eval_points.dimensionality.string)
plot.set_axis_title(BasePlot.Y_LEFT, 'Rate')
plot.set_axis_unit(BasePlot.Y_LEFT, 'Hz')
l = make.legend()
plot.add_item(l)
win.add_plot_widget(pW, 0)
win.add_custom_curve_tools()
win.add_legend_option([l], True)
win.show()
return win | 0b045ec676a9c31f4e0f89361d5ff8c13a238624 | 1,268 |
def content(obj):
"""Strip HTML tags for list display."""
return strip_tags(obj.content.replace('</', ' </')) | 413eed5f6b9ede0f31ede6a029e111a2910cc805 | 1,269 |
def flux(Q, N, ne, Ap, Am):
"""
calculates the flux between two boundary sides of
connected elements for element i
"""
# for every element we have 2 faces to other elements (left and right)
out = np.zeros((ne, N + 1, 2))
# Calculate Fluxes inside domain
for i in range(1, ne - 1):
out[i, 0, :] = Ap @ (-Q[i - 1, N, :]) + Am @ (-Q[i, 0, :])
out[i, N, :] = Ap @ (Q[i, N, :]) + Am @ (Q[i + 1, 0, :])
# Boundaries
# Left
out[0, 0, :] = Ap @ np.array([0, 0]) + Am @ (-Q[i, 0, :])
out[0, N, :] = Ap @ (Q[0, N, :]) + Am @ (Q[1, 0, :])
# Right
out[ne - 1, 0, :] = Ap @ (-Q[ne - 2, N, :]) + Am @ (-Q[ne - 1, 0, :])
out[ne - 1, N, :] = Ap @ (Q[ne - 1, N, :]) + Am @ np.array([0, 0])
return out | decc1b84cd0f23ac7f437d2c47e76cf6ed961a28 | 1,270 |
import shutil
def cp_dir(src_dir, dest_dir):
"""Function: cp_dir
Description: Copies a directory from source to destination.
Arguments:
(input) src_dir -> Source directory.
(input) dest_dir -> Destination directory.
(output) status -> True|False - True if copy was successful.
(output) err_msg -> Error message from copytree exception or None.
"""
status = True
err_msg = None
try:
shutil.copytree(src_dir, dest_dir)
# Directory permission error.
except shutil.Error as err:
err_msg = "Directory not copied. Perms Error Message: %s" % (err)
status = False
# Directory does not exist.
except OSError as err:
err_msg = "Directory not copied. Exist Error Message: %s" % (err)
status = False
return status, err_msg | 13f82a485fb46e102780c2462f0ab092f0d62df1 | 1,271 |
import torch
def listnet_loss(y_i, z_i):
"""
y_i: (n_i, 1)
z_i: (n_i, 1)
"""
P_y_i = F.softmax(y_i, dim=0)
P_z_i = F.softmax(z_i, dim=0)
return - torch.sum(y_i * torch.log(P_z_i)) | c2b7dd9800ed591af392b17993c70b443f99524c | 1,272 |
def get_volume_parameters(volumes):
"""Create pipeline parameters for volumes to be mounted on pipeline steps.
Args:
volumes: a volume spec
Returns (dict): volume pipeline parameters
"""
volume_parameters = dict()
for v in volumes:
if v['type'] == 'pv':
# FIXME: How should we handle existing PVs?
continue
if v['type'] == 'pvc':
mount_point = v['mount_point'].replace('/', '_').strip('_')
par_name = "vol_{}".format(mount_point)
volume_parameters[par_name] = ('str', v['name'])
elif v['type'] == 'new_pvc':
rok_url = v['annotations'].get("rok/origin")
if rok_url is not None:
par_name = "rok_{}_url".format(v['name'].replace('-', '_'))
volume_parameters[par_name] = ('str', rok_url)
else:
raise ValueError("Unknown volume type: {}".format(v['type']))
return volume_parameters | dbc76312732666c0aeacb4f6cf4338cf7a308097 | 1,273 |
def normalize(data, **kw):
"""Calculates the normalization of the given array. The normalizated
array is returned as a different array.
Args:
data The data to be normalized
Kwargs:
upper_bound The upper bound of the normalization. It has the value
of 1 by default.
lower_bound The lower bound to be used for normalization. It has the
value of 0 by default
dtype The type of the returned ndarray. If the dtype given is an
integer type the returned array values will be truncated after
normalized.
Returns:
An instance of np.array with normalizated values
"""
upper_bound = 1
lower_bound = 0
dtype = np.float64
if 'upper_bound' in kw:
upper_bound = kw['upper_bound']
if 'lower_bound' in kw:
lower_bound = kw['lower_bound']
if 'dtype' in kw:
dtype = kw['dtype']
check_ndarray(data)
newdata = data - data.min()
newdata = newdata / newdata.max()
newdata = newdata * (upper_bound - lower_bound)
newdata += lower_bound
return newdata.astype(dtype) | 2f6f1a28a5bac4eee221923465a022c79ec185af | 1,274 |
def relative_sse(cp_tensor, X, sum_squared_X=None):
"""Compute the relative sum of squared error for a given cp_tensor.
Parameters
----------
cp_tensor : CPTensor or tuple
TensorLy-style CPTensor object or tuple with weights as first
argument and a tuple of components as second argument
X : ndarray
Tensor approximated by ``cp_tensor``
sum_squared_X: float (optional)
If ``sum(X**2)`` is already computed, you can optionally provide it
using this argument to avoid unnecessary recalculation.
Returns
-------
float
The relative sum of squared error, ``sum((X_hat - X)**2)/sum(X**2)``,
where ``X_hat`` is the dense tensor represented by ``cp_tensor``
Examples
--------
Below, we create a random CP tensor and a random tensor and compute
the sum of squared error for these two tensors.
>>> import tensorly as tl
>>> from tensorly.random import random_cp
>>> from component_vis.model_evaluation import relative_sse
>>> rng = tl.check_random_state(0)
>>> cp = random_cp((4, 5, 6), 3, random_state=rng)
>>> X = rng.random_sample((4, 5, 6))
>>> relative_sse(cp, X)
0.4817407254961442
"""
# TODO: tests for relative_sse
if sum_squared_X is None:
sum_squared_x = np.sum(X ** 2)
return sse(cp_tensor, X) / sum_squared_x | 7effab67cf97452956d3c1c4503c8480fa923a15 | 1,275 |
def cc_across_time(tfx, tfy, cc_func, cc_args=()):
"""Cross correlations across time.
Args:
tfx : time-frequency domain signal 1
tfy : time-frequency domain signal 2
cc_func : cross correlation function.
cc_args : list of extra arguments of cc_func.
Returns:
cc_atime : cross correlation at different time.
Note:
If tfx and tfy are not of the same length, the result will be
truncated to the shorter one.
"""
return np.array([cc_func(x, y, *cc_args) for x, y in zip(tfx, tfy)]) | c22670b2f722884b048758dbc20df3bc58cd9b0f | 1,276 |
import chardet
def predict_encoding(file_path, n_lines=20):
"""Predict a file's encoding using chardet"""
# Open the file as binary data
with open(file_path, "rb") as f:
# Join binary lines for specified number of lines
rawdata = b"".join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)["encoding"] | 1ccef9982846fe0c88124b9e583cf68be070e63a | 1,277 |
def redirect_handler(url, client_id, client_secret, redirect_uri, scope):
"""
Convenience redirect handler.
Provide the redirect url (containing auth code)
along with client credentials.
Returns a spotify access token.
"""
auth = ExtendedOAuth(
client_id, client_secret, redirect_uri, scope=scope)
code = auth.parse_response_code(url)
token = auth.get_access_token(code)
return token | c682af3d7da51afdcba9a46aa4b44dd983d3fe40 | 1,278 |
def convert_coordinate(coordinate):
"""
:param coordinate: str - a string map coordinate
:return: tuple - the string coordinate seperated into its individual components.
"""
coord = (coordinate[0], coordinate[1])
return coord | a3852f5b4e4faac066c8f71e945ed7f46fbf2509 | 1,279 |
import re
import os
def tree_falls(geo_index, shps, CHMs,savedir="."):
"""Find predictions that don't match accross years, where there is significant height drop among years
geo_index: NEON geoindex to process
shps: List of shapefiles to search
CHMs: List of canopy height models to search
"""
#Find matching shapefiles
matched_shps = [x for x in shps if geo_index in x]
#Load shapefiles
shapefiles = {}
for shp in matched_shps:
#Load data and give it site and year and tile labels
df = geopandas.read_file(shp)
geo_index = re.search("(\d+_\d+)_image",shp).group(1)
df["shp_path"] = shp
df["geo_index"] = geo_index
df["Year"] = re.search("(\d+)_(\w+)_\d_\d+_\d+_image.shp",shp).group(1)
df["Site"] = re.search("(\d+)_(\w+)_\d_\d+_\d+_image.shp",shp).group(2)
shapefiles[df["Year"].unique()[0]] = df
#Difference in counts
mean_difference_among_years = difference_in_count(shapefiles)
#Join to find predictions that don't match
joined_boxes = sjoin(shapefiles["2018"],shapefiles["2019"])
no_matches = shapefiles["2018"][~(shapefiles["2018"].index.isin(joined_boxes.index))]
#For each tree that does not match, check the 2019 height
CHM = lookup_CHM_path(shapefiles["2018"]["shp_path"].unique()[0], CHMs)
if not os.path.exists(CHM):
raise IOError("{} does not exist".format(CHM))
draped_2019 = rasterstats.zonal_stats(no_matches, CHM, stats="mean")
no_matches["2019_height"] = [x["mean"] for x in draped_2019]
#Keep predictions whose mean height dropped by more than 50%
no_matches["height_frac"] = (no_matches["2019_height"] - no_matches["height"]) / no_matches["height"]
fall_df = no_matches[no_matches["height_frac"] < -0.5]
#Keep predictions whose original height was greater than 5m
#fall_df = fall_df[fall_df.height > 5]
#Write tree fall shapefile
fname = os.path.basename(shapefiles["2019"]["shp_path"].unique()[0])
fname = os.path.splitext(fname)[0]
fname = "{}/{}_treefall.shp".format(savedir,fname)
fall_df.to_file(fname)
#get predictions whose height did not drop by more than 50%, indiciating poor matching
non_fall_df = no_matches[~(no_matches["height_frac"] < -0.5)]
#Keep predictions whose original height was greater than 5m
#fall_df = fall_df[fall_df.height > 5]
#Write tree fall shapefile
fname = os.path.basename(shapefiles["2019"]["shp_path"].unique()[0])
fname = os.path.splitext(fname)[0]
fname = "{}/{}_incorrect_treefall.shp".format(savedir,fname)
non_fall_df.to_file(fname)
#Stablity metrics
#Proportion not matched compared to the earliest year
p_without_match = non_fall_df.shape[0]/shapefiles["2018"].shape[0]
metrics = pd.DataFrame({"Mean_Count_Difference":mean_difference_among_years,"p_without_match":p_without_match})
fname = os.path.basename(shapefiles["2019"]["shp_path"].unique()[0])
fname = os.path.splitext(fname)[0]
fname = "{}/{}_metrics.csv".format(savedir,fname)
metrics.to_csv(fname)
return fname | 448fc3b7b86c1d0075a98a12ce9626c7d309569d | 1,280 |
import os
def split_multi_fasta_into_fasta(fasta_file, output_directory):
"""
Splits a single multi-FASTA-format file into individual FASTA-format files, each containing only one FASTA record.
PARAMETERS
fasta_file (str): the file location of the FASTA file
output_directory (str): the output directory to place all of the individual FASTA files
RETURNS
file_list (list(str)): a list of the locations of the written FASTA files in descending order of sequence length
POST
The output directory will contain a number of FASTA files equal to the number of FASTA records in the
multi-FASTA-format file provided to this function.
"""
count = 0
file_list = []
if not os.path.exists(fasta_file):
raise FileNotFoundError("File not found: " + fasta_file)
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
with open(fasta_file) as file:
for line in file:
# FASTA record header
if line.startswith(">"):
output_file = os.path.join(output_directory, str(count) + ".fasta")
output = open(output_file, "w")
count += 1
information = [output_file, 0] # [filename, number of sequence characters]
file_list.append(information)
output.write(line)
# FASTA record sequence
else:
output.write(line)
file_list[len(file_list) - 1][1] += len(line) # last item in list, increment sequence characters
file_list.sort(key=lambda filename: filename[1], reverse=True)
# Keep only filenames (not number of characters)
for i in range(len(file_list)):
file_list[i] = file_list[i][0]
return file_list | 5ef83b1e5d2b651f44a98af291dad14c1c6c436c | 1,281 |
from typing import List
def get_noun_phrases(doc: Doc) -> List[Span]:
"""Compile a list of noun phrases in sense2vec's format (without
determiners). Separated out to make it easier to customize, e.g. for
languages that don't implement a noun_chunks iterator out-of-the-box, or
use different label schemes.
doc (Doc): The Doc to get noun phrases from.
RETURNS (list): The noun phrases as a list of Span objects.
"""
trim_labels = ("advmod", "amod", "compound")
spans = []
if doc.is_parsed:
for np in doc.noun_chunks:
while len(np) > 1 and np[0].dep_ not in trim_labels:
np = np[1:]
spans.append(np)
return spans | 38d78164147b012437f7c8b8d4c7fe13eb574515 | 1,282 |
import json
def load_file_from_url(url):
"""Load the data from url."""
url_path = get_absolute_url_path(url, PATH)
response = urlopen(url_path)
contents = json.loads(response.read())
return parse_file_contents(contents, url_path.endswith(".mrsys")) | 7eaa3d666c9e1fbdd9bad57047dd1b98712bd22b | 1,283 |
def speedPunisherMin(v, vmin):
"""
:param v:
:param vmin:
:return:
"""
x = fmin(v - vmin, 0)
return x ** 2 | 9e6e929226ea20d70d26f6748f938981885914c7 | 1,284 |
import os
def initialize_ecr_client():
"""
Initializes the ECR CLient. If running in Lambda mode, only the AWS REGION environment variable is needed.
If not running in Lambda mode, the AWS credentials are also needed.
"""
if(os.environ.get('MODE') == 'lambda'):
ecrClient = boto3.client('ecr', region_name = os.environ.get('AWS_REGION'))
else:
ecrClient = boto3.client('ecr',
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY'),
region_name = os.environ.get('AWS_REGION')
)
return ecrClient | 021b90743c915b8efa81a9b07dac95f565ddcbb7 | 1,285 |
def hexagonal_packing_cross_section(nseeds, Areq, insu, out_insu):
""" Make a hexagonal packing and scale the result to be Areq cross section
Parameter insu must be a percentage of the strand radius.
out_insu is the insulation thickness around the wire as meters
Returns:
(wire diameter, strand diameter, strand center points)
"""
seeds = np.linspace(-0.5, 0.5,nseeds)
dx = seeds[1]-seeds[0]
xs, ys = np.meshgrid(seeds, seeds)
if (nseeds-1) % 4 == 0:
ys[:,1::2] = ys[:,1::2] + 0.5*dx;
else:
ys[:,0::2] = ys[:,0::2] + 0.5*dx;
ys = ys*2/np.sqrt(3);
points = np.stack([xs.reshape(-1), ys.reshape(-1)], axis=1)
vor = Voronoi(points)
hexs = [v for v in vor.regions if len(v) == 6]
all_cells = vor.vertices[hexs, :]
max_dists = np.max(np.linalg.norm(all_cells, axis=2), axis=1)
cells = all_cells[max_dists < 0.5, :]
strand_cps = np.mean(cells, axis=1)
# if strand bundle is not symmetric, it will be off center so...
# move it back to center
strand_cps = strand_cps - np.mean(strand_cps, axis=0)
# quite a silly way to calculate the strand diameter.but it indeed is
# the minimum of the distances from the first cell center to all the rest
# minus the insulation thickness
strand_diam = np.min(np.linalg.norm(strand_cps[1:]-strand_cps[0], axis=1))*(1-insu)
nstrands = len(strand_cps)
Acu = nstrands*(strand_diam/2)**2*np.pi
scale = np.sqrt(Areq/Acu)
strand_cps_scaled = scale*strand_cps
strand_diam_scaled = scale*strand_diam
wire_diameter = (np.max(np.linalg.norm(strand_cps_scaled, axis=1), axis=0)*2
+ strand_diam_scaled*(1+insu)/(1-insu)
+ out_insu)
return wire_diameter, strand_diam_scaled, strand_cps_scaled | 759cc26a9606ac327851d9b1e691052123029d66 | 1,286 |
def bk():
"""
Returns an RGB object representing a black pixel.
This function is created to make smile() more legible.
"""
return introcs.RGB(0,0,0) | 0343367302c601fce9057a8191b666a098eaec81 | 1,287 |
def autoEpochToTime(epoch):
"""
Converts a long offset from Epoch value to a DBDateTime. This method uses expected date ranges to
infer whether the passed value is in milliseconds, microseconds, or nanoseconds. Thresholds used are
TimeConstants.MICROTIME_THRESHOLD divided by 1000 for milliseconds, as-is for microseconds, and
multiplied by 1000 for nanoseconds. The value is tested to see if its ABS exceeds the threshold. E.g. a value
whose ABS is greater than 1000 * TimeConstants.MICROTIME_THRESHOLD will be treated as nanoseconds.
:param epoch: (long) - The long Epoch offset value to convert.
:return: (io.deephaven.db.tables.utils.DBDateTime) null, if the input is equal to QueryConstants.NULL_LONG, otherwise a DBDateTime based
on the inferred conversion.
"""
return _java_type_.autoEpochToTime(epoch) | 1f2ae0397044c19413544a359a1d966a4f223128 | 1,288 |
def compile_recursive_descent(file_lines, *args, **kwargs):
"""Given a file and its lines, recursively compile until no ksx statements remain"""
visited_files = kwargs.get('visited_files', set())
# calculate a hash of the file_lines and check if we have already compiled
# this one
file_hash = hash_file_contents(file_lines)
if len(visited_files) > RECURSION_DESCENT_LIMIT:
msg = (
"Compiler appears to be in a circular reference loop, "
"this is currently non-recoverable and is a known issue.\n\n"
"See: https://github.com/LeonardMH/kos-scripts/issues/7 \n\n"
"In the meantime check your library for files which import a "
"file, where that file imports the original (A->B->A).\n\n"
"You might also attempt using the 'from x import y' syntax which "
"has slightly narrower scope."
)
raise CircularImportError(msg)
if file_hash in visited_files:
# we have already compiled this file, no need to do so again
return ""
else:
# we will now compile the file, mark that it has been visited
visited_files.add(file_hash)
# compile and split back out to individual lines
file_oneline = compile_single_file_lines(file_lines, *args, **kwargs)
file_lines = file_oneline.split('\n')
# if there are no more ksx directives in the lines compiled we are done,
# return the stringified compile result
if not file_has_ksx_directive(file_lines):
return file_oneline
# if there are still more ksx directives in the lines compiled so far, run
# again
kwargs['visited_files'] = visited_files
return compile_recursive_descent(file_lines, *args, **kwargs).rstrip() + '\n' | 9e5306c2d2cc6696883ac3ec37114c13340fe1f5 | 1,289 |
def majority_voting(masks, voting='hard', weights=None, threshold=0.5):
"""Soft Voting/Majority Rule mask merging; Signature based upon the Scikit-learn VotingClassifier (https://github.com/scikit-learn/scikit-learn/blob/2beed55847ee70d363bdbfe14ee4401438fba057/sklearn/ensemble/_voting.py#L141)
Parameters
----------
masks : segmentations masks to merge, ndarray
Expected shape is num_of_masks * 1 * h * w
Accepts masks in range 0-1 (i.e apply sigmoid before passing to this function)
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
threshold : for separating between the positive and negative class, default=0.5
Applied first in case of hard voting and applied last in case of soft voting
"""
assert len(masks.shape) == 4
if voting not in ('soft', 'hard'):
raise ValueError(f"Voting must be 'soft' or 'hard'; got (voting= {voting})")
for m in masks:
assert (m >= 0.).all() and (m <= 1.).all()
if voting == 'hard':
masks = (masks >= threshold).astype(np.float32)
if weights is None:
weights = np.array([1] * masks.shape[0])
else:
weights = np.array(weights)
# Broadcasting starts with the trailing (i.e. rightmost) dimensions and works its way left, therefore we move the "mask" dimension to the right
masks= np.transpose(masks, (1, 2, 3, 0))
masks = masks * weights
masks= np.transpose(masks, (3, 0, 1, 2))
masks = masks.sum(axis=0)
if voting == 'soft':
masks = (masks >= (threshold * weights.sum())).astype(np.float32)
elif voting == 'hard': # Same as doing a majority vote
masks = (masks > (0.5 * weights.sum())).astype(np.float32)
assert len(masks.shape) == 3
return masks.astype(np.float32) | 882e98bc3a0c817c225f740042eb43b3bc4734fa | 1,290 |
def animate(zdata,
xdata,
ydata,
conversionFactorArray,
timedata,
BoxSize,
timeSteps=100,
filename="particle"):
"""
Animates the particle's motion given the z, x and y signal (in Volts)
and the conversion factor (to convert between V and nm).
Parameters
----------
zdata : ndarray
Array containing the z signal in volts with time.
xdata : ndarray
Array containing the x signal in volts with time.
ydata : ndarray
Array containing the y signal in volts with time.
conversionFactorArray : ndarray
Array of 3 values of conversion factors for z, x and y (in units of Volts/Metre)
timedata : ndarray
Array containing the time data in seconds.
BoxSize : float
The size of the box in which to animate the particle - in nm
timeSteps : int, optional
Number of time steps to animate
filename : string, optional
filename to create the mp4 under (<filename>.mp4)
"""
timePerFrame = 0.203
print("This will take ~ {} minutes".format(timePerFrame * timeSteps / 60))
convZ = conversionFactorArray[0] * 1e-9
convX = conversionFactorArray[1] * 1e-9
convY = conversionFactorArray[2] * 1e-9
ZBoxStart = -BoxSize # 1/conv*(_np.mean(zdata)-0.06)
ZBoxEnd = BoxSize # 1/conv*(_np.mean(zdata)+0.06)
XBoxStart = -BoxSize # 1/conv*(_np.mean(xdata)-0.06)
XBoxEnd = BoxSize # 1/conv*(_np.mean(xdata)+0.06)
YBoxStart = -BoxSize # 1/conv*(_np.mean(ydata)-0.06)
YBoxEnd = BoxSize # 1/conv*(_np.mean(ydata)+0.06)
FrameInterval = 1 # how many timesteps = 1 frame in animation
a = 20
b = 0.6 * a
myFPS = 7
myBitrate = 1000000
fig = _plt.figure(figsize=(a, b))
ax = fig.add_subplot(111, projection='3d')
ax.set_title("{} us".format(timedata[0] * 1000000))
ax.set_xlabel('X (nm)')
ax.set_xlim([XBoxStart, XBoxEnd])
ax.set_ylabel('Y (nm)')
ax.set_ylim([YBoxStart, YBoxEnd])
ax.set_zlabel('Z (nm)')
ax.set_zlim([ZBoxStart, ZBoxEnd])
ax.view_init(20, -30)
# ax.view_init(0, 0)
def setup_plot():
XArray = 1 / convX * xdata[0]
YArray = 1 / convY * ydata[0]
ZArray = 1 / convZ * zdata[0]
scatter = ax.scatter(XArray, YArray, ZArray)
return scatter,
def animate(i):
# print "\r {}".format(i),
print("Frame: {}".format(i), end="\r")
ax.clear()
ax.view_init(20, -30)
ax.set_title("{} us".format(int(timedata[i] * 1000000)))
ax.set_xlabel('X (nm)')
ax.set_xlim([XBoxStart, XBoxEnd])
ax.set_ylabel('Y (nm)')
ax.set_ylim([YBoxStart, YBoxEnd])
ax.set_zlabel('Z (nm)')
ax.set_zlim([ZBoxStart, ZBoxEnd])
XArray = 1 / convX * xdata[i]
YArray = 1 / convY * ydata[i]
ZArray = 1 / convZ * zdata[i]
scatter = ax.scatter(XArray, YArray, ZArray)
ax.scatter([XArray], [0], [-ZBoxEnd], c='k', alpha=0.9)
ax.scatter([-XBoxEnd], [YArray], [0], c='k', alpha=0.9)
ax.scatter([0], [YBoxEnd], [ZArray], c='k', alpha=0.9)
Xx, Yx, Zx, Xy, Yy, Zy, Xz, Yz, Zz = [], [], [], [], [], [], [], [], []
for j in range(0, 30):
Xlast = 1 / convX * xdata[i - j]
Ylast = 1 / convY * ydata[i - j]
Zlast = 1 / convZ * zdata[i - j]
Alpha = 0.5 - 0.05 * j
if Alpha > 0:
ax.scatter([Xlast], [0 + j * 10], [-ZBoxEnd],
c='grey',
alpha=Alpha)
ax.scatter([-XBoxEnd], [Ylast], [0 - j * 10],
c='grey',
alpha=Alpha)
ax.scatter([0 - j * 2], [YBoxEnd], [Zlast],
c='grey',
alpha=Alpha)
Xx.append(Xlast)
Yx.append(0 + j * 10)
Zx.append(-ZBoxEnd)
Xy.append(-XBoxEnd)
Yy.append(Ylast)
Zy.append(0 - j * 10)
Xz.append(0 - j * 2)
Yz.append(YBoxEnd)
Zz.append(Zlast)
if j < 15:
XCur = 1 / convX * xdata[i - j + 1]
YCur = 1 / convY * ydata[i - j + 1]
ZCur = 1 / convZ * zdata[i - j + 1]
ax.plot([Xlast, XCur], [Ylast, YCur], [Zlast, ZCur], alpha=0.4)
ax.plot_wireframe(Xx, Yx, Zx, color='grey')
ax.plot_wireframe(Xy, Yy, Zy, color='grey')
ax.plot_wireframe(Xz, Yz, Zz, color='grey')
return scatter,
anim = _animation.FuncAnimation(fig,
animate,
int(timeSteps / FrameInterval),
init_func=setup_plot,
blit=True)
_plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
mywriter = _animation.FFMpegWriter(fps=myFPS, bitrate=myBitrate)
# , fps = myFPS, bitrate = myBitrate)
anim.save('{}.mp4'.format(filename), writer=mywriter)
return None | aa0f08481f7efc39dae725a0c5f7fbc377586261 | 1,291 |
import re
def name_of_decompressed(filename):
""" Given a filename check if it is in compressed type (any of
['.Z', '.gz', '.tar.gz', '.zip']; if indeed it is compressed return the
name of the uncompressed file, else return the input filename.
"""
dct = {
'.Z': re.compile('.Z$'),
'.tar.gz': re.compile('.tar.gz$'),
'.gz': re.compile('.gz$'),
'.zip': re.compile('.zip$')
}
ctype = find_os_compression_type(filename)
if ctype is None:
return filename
try:
return re.sub(dct[ctype], '', filename)
except:
raise RuntimeError('[ERROR] decompress:name_of_decompressed Failed!') | ee0c49edca853fbf1da8caccbba68c9cde391f6b | 1,292 |
import random
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1 | 2e8a5e2d3c8fd6770e78a6ad30afc52f63c43073 | 1,293 |
def benchmark(func):
"""Decorator to mark a benchmark."""
BENCHMARKS[func.__name__] = func
return func | 0edadb46c446ed5603434d14ab7a40cdf76651b5 | 1,294 |
def do_positive_DFT(data_in, tmax):
"""
Do Discrete Fourier transformation and take POSITIVE frequency component part.
Args:
data_in (array): input data.
tmax (float): sample frequency.
Returns:
data_s (array): output array with POSITIVE frequency component part.
data_w (array): the Discrete Fourier Transform sample frequencies POSITIVE frequency component part.
"""
data_s = np.fft.fft(data_in)
data_w = np.fft.fftfreq(tmax)
# only take the positive frequency components
return data_w[0:tmax//2], data_s[0:tmax//2] | c3bab6b9595cf77869f65eacf6acf6d7f990ca10 | 1,295 |
def service(base_app, location):
"""Service fixture."""
return base_app.extensions["invenio-records-lom"].records_service | 52ad7f4624e7d0af153f0fcaaccfb56effddb86d | 1,296 |
def check_file_content(path, expected_content):
"""Check file has expected content.
:param str path: Path to file.
:param str expected_content: Expected file content.
"""
with open(path) as input:
return expected_content == input.read() | 77bdfae956ce86f2422ed242c4afcaab19cab384 | 1,297 |
import sys
def get_arguments(deluge=False):
"""Retrieves CLI arguments from the 'addmedia' script and uses
get_parser() to validate them.
Returns the full file path to the config file in use and a dict of
validated arguments from the MHParser object.
"""
# Check for deluge
if deluge:
return get_deluge_arguments()
# Get parser
parser = get_parser()
# If no args, show help
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Get validated args from parser
new_args = parser.parse_args().__dict__
# Remove config to return separately
config = new_args.pop('config')
return config, new_args | ade6a32749eab72da06609e8e12401ec806b7afe | 1,298 |
from datetime import datetime
import select
def verify_apikey(payload,
raiseonfail=False,
override_authdb_path=None,
override_permissions_json=None,
config=None):
"""Checks if an API key is valid.
This version does not require a session.
Parameters
----------
payload : dict
This dict contains a single key:
- apikey_dict: the decrypted and verified API key info dict from the
frontend.
- user_id: the user ID of the person wanting to verify this key.
- user_role: the user role of the person wanting to verify this key.
raiseonfail : bool
If True, will raise an Exception if something goes wrong.
override_authdb_path : str or None
If given as a str, is the alternative path to the auth DB.
override_permissions_json : str or None
If given as a str, is the alternative path to the permissions JSON to
use. This is used to check if the user_id is allowed to actually verify
("read") an API key.
config : SimpleNamespace object or None
An object containing systemwide config variables as attributes. This is
useful when the wrapping function needs to pass in some settings
directly from environment variables.
Returns
-------
dict
The dict returned is of the form::
{'success': True if API key is OK and False otherwise,
'messages': list of str messages if any}
"""
for key in ('reqid', 'pii_salt'):
if key not in payload:
LOGGER.error(
"Missing %s in payload dict. Can't process this request." % key
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'apikey': None,
'expires': None,
'messages': ["Invalid API key request."],
}
for key in ('apikey_dict', 'user_id', 'user_role'):
if key not in payload:
LOGGER.error(
'[%s] Invalid API key request, missing %s.' %
(payload['reqid'], key)
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'messages': ["Some required keys are missing from payload."]
}
apikey_dict = payload['apikey_dict']
user_id = payload['user_id']
user_role = payload['user_role']
# check if the user is allowed to read the presented API key
apikey_verify_allowed = check_user_access(
{'user_id': user_id,
'user_role': user_role,
'action': 'view',
'target_name': 'apikey',
'target_owner': apikey_dict['uid'],
'target_visibility': 'private',
'target_sharedwith': None,
'reqid': payload['reqid'],
'pii_salt': payload['pii_salt']},
raiseonfail=raiseonfail,
override_permissions_json=override_permissions_json,
override_authdb_path=override_authdb_path
)
if not apikey_verify_allowed['success']:
LOGGER.error(
"[%s] Invalid API key verification request. "
"from user_id: %s, role: %s. The API key presented is "
"not readable by this user." %
(payload['reqid'],
pii_hash(user_id, payload['pii_salt']),
pii_hash(user_role, payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"originating user is not allowed to operate on this API key"
),
'messages': ["API key verification failed. "
"You are not allowed to operate on this API key."]
}
# this checks if the database connection is live
currproc = mp.current_process()
engine = getattr(currproc, 'authdb_engine', None)
if override_authdb_path:
currproc.auth_db_path = override_authdb_path
if not engine:
currproc.authdb_engine, currproc.authdb_conn, currproc.authdb_meta = (
authdb.get_auth_db(
currproc.auth_db_path,
echo=raiseonfail
)
)
apikeys = currproc.authdb_meta.tables['apikeys_nosession']
# the apikey sent to us must match the stored apikey's properties:
# - token
# - userid
# - expired must be in the future
# - issued must be in the past
# - not_valid_before must be in the past
dt_utcnow = datetime.utcnow()
sel = select([
apikeys.c.apikey,
apikeys.c.expires,
]).select_from(apikeys).where(
apikeys.c.apikey == apikey_dict['tkn']
).where(
apikeys.c.user_id == apikey_dict['uid']
).where(
apikeys.c.user_role == apikey_dict['rol']
).where(
apikeys.c.expires > dt_utcnow
).where(
apikeys.c.issued < dt_utcnow
).where(
apikeys.c.not_valid_before < dt_utcnow
)
result = currproc.authdb_conn.execute(sel)
row = result.fetchone()
result.close()
if row is not None and len(row) != 0:
LOGGER.info(
"[%s] No-session API key verified successfully. "
"user_id: %s, role: '%s', audience: '%s', subject: '%s', "
"apiversion: %s, expires on: %s" %
(payload['reqid'],
pii_hash(apikey_dict['uid'],
payload['pii_salt']),
apikey_dict['rol'],
apikey_dict['aud'],
apikey_dict['sub'],
apikey_dict['ver'],
apikey_dict['exp'])
)
return {
'success': True,
'messages': [(
"No-session API key verified successfully. Expires: %s." %
row['expires'].isoformat()
)]
}
else:
LOGGER.error(
"[%s] No-session API key verification failed. Failed key "
"user_id: %s, role: '%s', audience: '%s', subject: '%s', "
"apiversion: %s, expires on: %s" %
(payload['reqid'],
pii_hash(apikey_dict['uid'],
payload['pii_salt']),
apikey_dict['rol'],
apikey_dict['aud'],
apikey_dict['sub'],
apikey_dict['ver'],
apikey_dict['exp'])
)
return {
'success': False,
'failure_reason': (
"key validation failed, "
"provided key does not match stored key or has expired"
),
'messages': [(
"API key could not be verified."
)]
} | f1f5d9f65b2c9b8b9175ea4729042d9bb040a0e7 | 1,299 |