content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def loyalty():
"""Пересчитать индекс лояльности"""
articles = Article.objects.all()
if articles.count() == 0:
logger.info('Пока нет статей для пересчёта. Выходим...')
return False
logger.info('Начало пересчёта индекса лояльности')
logger.info(f'Количество материалов: {articles.count()}')
texts = [item.text for item in articles]
dt = DefineText(texts)
themes, _ = dt.article_theme()
sentiments, _ = dt.article_sentiment()
for article, theme, sentiment in zip(articles, themes, sentiments):
article.theme = bool(theme)
article.sentiment = sentiment
article.save() | 3db3214e1d6d2f2f3d54f9c1d01807ed9558ef6b | 1,800 |
import json
def user_info(request):
"""Returns a JSON object containing the logged-in student's information."""
student = request.user.student
return HttpResponse(json.dumps({
'academic_id': student.academic_id,
'current_semester': int(student.current_semester),
'name': student.name,
'username': request.user.username}), content_type="application/json") | 41c1bcc8f69d97f76acbe7f15c4bc5cbc2ea6b60 | 1,801 |
def extract_brain_activation(brainimg, mask, roilabels, method='mean'):
"""
Extract brain activation from ROI.
Parameters
----------
brainimg : array
A 4D brain image array with the first dimension correspond to pictures and the rest 3D correspond to brain images
mask : array
A 3D brain image array with the same size as the rest 3D of brainimg.
roilabels : list, array
ROI labels
method : str
Method to integrate activation from each ROI, by default is 'mean'.
Returns
-------
roisignals : list
Extracted brain activation.
Each element in the list is the extracted activation of the roilabels.
Due to different label may contain different number of activation voxels,
the output activation could not stored as numpy array list.
"""
if method == 'mean':
calc_way = partial(np.mean, axis=1)
elif method == 'std':
calc_way = partial(np.std, axis=1)
elif method == 'max':
calc_way = partial(np.max, axis=1)
elif method == 'voxel':
calc_way = np.array
else:
raise Exception('We haven''t support this method, please contact authors to implement.')
assert brainimg.shape[1:] == mask.shape, "brainimg and mask are mismatched."
roisignals = []
for i, lbl in enumerate(roilabels):
roisignals.append(calc_way(brainimg[:, mask==lbl]))
return roisignals | fac20ea1c99696aab84137964dbbfdfa7bd66612 | 1,802 |
def logit(x):
"""
Elementwise logit (inverse logistic sigmoid).
:param x: numpy array
:return: numpy array
"""
return np.log(x / (1.0 - x)) | 4ce2474a9eb97208613268d3005959a4a162dbe0 | 1,803 |
import hashlib
import binascii
def _base58_decode(address: str) -> bool:
"""
SEE https://en.bitcoin.it/wiki/Base58Check_encoding
"""
try:
decoded_address = base58.b58decode(address).hex()
result, checksum = decoded_address[:-8], decoded_address[-8:]
except ValueError:
return False
else:
for _ in range(1, 3):
result = hashlib.sha256(binascii.unhexlify(result)).hexdigest()
return checksum == result[:8] | e0610e882b64511743376ce7a0370e7600436411 | 1,804 |
def get_average_matrix(shape, matrices):
""" Take the average matrix by a list of matrices of same shape """
return _ImageConvolution().get_average_matrix(shape, matrices) | dfdd4995751bb2894a7bada961d863bb800e79a5 | 1,805 |
def pph_custom_pivot(n, t0):
"""
O algoritmo recebe uma lista n com pares de coordenadas (a, b) e retorna uma lista s, somente com as
coordenadas que juntas tenham uma razão máxima do tipo r = ((a0 + a1 + ... + an) / (b0 + b1 + ... + bn)).
Esse algoritmo tem complexidade de pior caso O(n^2) quando a razão de todos os elementos serão sempre menores que a razão
do pivot. Para encontrar o elemento pivot, o algoritmo faz o seguinte cálculo:
pivot = [a0 + (a1 + a2 + ... + an)] / [b0 + (b1 + b2 + ... + bn)]
Args:
n (list[Pair]): Lista com coordenadas do tipo Pair.
t0 (Pair): a0 e b0 iniciais de referência para o algoritmo.
Returns:
s (list[Pair]): Lista com coordenadas que maximizam a razão r.
"""
# 0- Declaração do objeto HiberbolicSet
s = HiperbolicSet(t0.a, t0.b)
k = n
# 1- Calcula um pivot usando a função r = ((a0 + a1 + ... + an) / (b0 + b1 + ... + bn)) em O(n)
pivot = custom_pivot(k, None, t0.a, t0.b)
# 2- Chama os steps da recursão para o cálculo do pph customizado
res = pph_steps(k, pivot, pivot.a, pivot.b)
# 6- Adiciona a lista com os pares que maximizam a razão - O(n)
s.add_all(res)
return s | 7853aeac0f4acae6270e82b5c1568f6d0858b779 | 1,806 |
def music(hot_music_url, **kwargs):
"""
get hot music result
:return: HotMusic object
"""
result = fetch(hot_music_url, **kwargs)
# process json data
datetime = parse_datetime(result.get('active_time'))
# video_list = result.get('music_list', [])
musics = []
music_list = result.get('music_list', [])
for item in music_list:
music = data_to_music(item.get('music_info', {}))
music.hot_count = item.get('hot_value')
musics.append(music)
# construct HotMusic object and return
return HotMusic(datetime=datetime, data=musics) | cf49e0648bb84ff9aa033bf49f732260770b47f5 | 1,807 |
def parse_from_docstring(docstring, spec='operation'):
"""Returns path spec from docstring"""
# preprocess lines
lines = docstring.splitlines(True)
parser = _ParseFSM(FSM_MAP, lines, spec)
parser.run()
return parser.spec | 37026d6e0fd0edf476d59cdd33ac7ec2d04eb38d | 1,808 |
def collection_headings(commodities) -> CommodityCollection:
"""Returns a special collection of headings to test header and chapter
parenting rules."""
keys = ["9900_80_0", "9905_10_0", "9905_80_0", "9910_10_0", "9910_80_0"]
return create_collection(commodities, keys) | 545419cd79fbd86d0a16aad78996977ea1ff4605 | 1,809 |
import getpass
def get_ssh_user():
"""Returns ssh username for connecting to cluster workers."""
return getpass.getuser() | 166048aa258bd0b2c926d03478e8492a405b0f7e | 1,810 |
def tryf(body, *handlers, elsef=None, finallyf=None):
"""``try``/``except``/``finally`` as a function.
This allows lambdas to handle exceptions.
``body`` is a thunk (0-argument function) that represents
the body of the ``try`` block.
``handlers`` is ``(excspec, handler), ...``, where
``excspec`` is either an exception type,
or a tuple of exception types.
``handler`` is a 0-argument or 1-argument
function. If it takes an
argument, it gets the exception
instance.
Handlers are tried in the order specified.
``elsef`` is a thunk that represents the ``else`` block.
``finallyf`` is a thunk that represents the ``finally`` block.
Upon normal completion, the return value of ``tryf`` is
the return value of ``elsef`` if that was specified, otherwise
the return value of ``body``.
If an exception was caught by one of the handlers, the return
value of ``tryf`` is the return value of the exception handler
that ran.
If you need to share variables between ``body`` and ``finallyf``
(which is likely, given what a ``finally`` block is intended
to do), consider wrapping the ``tryf`` in a ``let`` and storing
your variables there. If you want them to leak out of the ``tryf``,
you can also just create an ``env`` at an appropriate point,
and store them there.
"""
def accepts_arg(f):
try:
if arity_includes(f, 1):
return True
except UnknownArity: # pragma: no cover
return True # just assume it
return False
def isexceptiontype(exc):
try:
if issubclass(exc, BaseException):
return True
except TypeError: # "issubclass() arg 1 must be a class"
pass
return False
# validate handlers
for excspec, handler in handlers:
if isinstance(excspec, tuple): # tuple of exception types
if not all(isexceptiontype(t) for t in excspec):
raise TypeError(f"All elements of a tuple excspec must be exception types, got {excspec}")
elif not isexceptiontype(excspec): # single exception type
raise TypeError(f"excspec must be an exception type or tuple of exception types, got {excspec}")
# run
try:
ret = body()
except BaseException as exception:
# Even if a class is raised, as in `raise StopIteration`, the `raise` statement
# converts it into an instance by instantiating with no args. So we need no
# special handling for the "class raised" case.
# https://docs.python.org/3/reference/simple_stmts.html#the-raise-statement
# https://stackoverflow.com/questions/19768515/is-there-a-difference-between-raising-exception-class-and-exception-instance/19768732
exctype = type(exception)
for excspec, handler in handlers:
if isinstance(excspec, tuple): # tuple of exception types
# this is safe, exctype is always a class at this point.
if any(issubclass(exctype, t) for t in excspec):
if accepts_arg(handler):
return handler(exception)
else:
return handler()
else: # single exception type
if issubclass(exctype, excspec):
if accepts_arg(handler):
return handler(exception)
else:
return handler()
else:
if elsef is not None:
return elsef()
return ret
finally:
if finallyf is not None:
finallyf() | bde4282c4422272717e48a546430d2b93e9d0529 | 1,811 |
def obtain_sheet_music(score, most_frequent_dur):
"""
Returns unformated sheet music from score
"""
result = ""
octaves = [3 for i in range(12)]
accidentals = [False for i in range(7)]
for event in score:
for note_indx in range(len(event[0])):
data = notenum2string(event[0][note_indx], accidentals, octaves)
result += data[0]
accidentals = data[1]
octaves = data[2]
if note_indx != len(event[0])-1:
result += '-'
if event[1] != most_frequent_dur: # Quarters are default
result += '/'
result += dur2mod(event[1], most_frequent_dur)
result += ','
return result | 4c216f2cca0d2054af355bc097c22ff2b7662969 | 1,812 |
def adjacency_matrix(edges):
"""
Convert a directed graph to an adjacency matrix.
Note: The distance from a node to itself is 0 and distance from a node to
an unconnected node is defined to be infinite.
Parameters
----------
edges : list of tuples
list of dependencies between nodes in the graph
[(source node, destination node, weight), ...]
Returns
-------
out : tuple
(names, adjacency matrix)
names - list of unique nodes in the graph
adjacency matrix represented as list of lists
"""
# determine the set of unique nodes
names = set()
for src, dest, _ in edges:
# add source and destination nodes
names.add(src)
names.add(dest)
# convert set of names to sorted list
names = sorted(names)
# determine initial adjacency matrix with infinity weights
matrix = [[float('Inf')] * len(names) for _ in names]
for src, dest, weight in edges:
# update weight in adjacency matrix
matrix[names.index(src)][names.index(dest)] = weight
for src in names:
matrix[names.index(src)][names.index(src)] = 0
# return list of names and adjacency matrix
return names, matrix | b8743a6fa549b39d5cb24ae1f276e911b954ee5a | 1,813 |
def estimate_Cn(P=1013, T=273.15, Ct=1e-4):
"""Use Weng et al to estimate Cn from meteorological data.
Parameters
----------
P : `float`
atmospheric pressure in hPa
T : `float`
temperature in Kelvin
Ct : `float`
atmospheric struction constant of temperature, typically 10^-5 - 10^-2 near the surface
Returns
-------
`float`
Cn
"""
return (79 * P / (T ** 2)) * Ct ** 2 * 1e-12 | b74dd0c91197c24f880521a06d6bcd205d749448 | 1,814 |
import ctypes
def sg_get_scsi_status_str(scsi_status):
""" Fetch scsi status string. """
buff = _get_buffer(128)
libsgutils2.sg_get_scsi_status_str(scsi_status, 128, ctypes.byref(buff))
return buff.value.decode('utf-8') | 2bdf7feb455ccbab659961ddbba04a9fa1daeb85 | 1,815 |
import math
def numpy_grid(x, pad=0, nrow=None, uint8=True):
""" thin wrap to make_grid to return frames ready to save to file
args
pad (int [0]) same as utils.make_grid(padding)
nrow (int [None]) # defaults to horizonally biased rectangle closest to square
uint8 (bool [True]) convert to img in range 0-255 uint8
"""
x = x.clone().detach().cpu()
nrow = nrow or int(math.sqrt(x.shape[0]))
x = ((utils.make_grid(x, nrow=nrow, padding=pad).permute(1,2,0) - x.min())/(x.max()-x.min())).numpy()
if uint8:
x = (x*255).astype("uint8")
return x | e83452bb2387d79ca307840487bb4bdd24efed87 | 1,816 |
import functools
def if_active(f):
"""decorator for callback methods so that they are only called when active"""
@functools.wraps(f)
def inner(self, loop, *args, **kwargs):
if self.active:
return f(self, loop, *args, **kwargs)
return inner | 83b4eabaafa9602ad0547f87aeae99a63872152a | 1,817 |
def obs_all_node_target_pairs_one_hot(agent_id: int, factory: Factory) -> np.ndarray:
"""One-hot encoding (of length nodes) of the target location for each node. Size of nodes**2"""
num_nodes = len(factory.nodes)
node_pair_target = np.zeros(num_nodes ** 2)
for n in range(num_nodes):
core_target_index = []
if factory.nodes[n].table != None and factory.nodes[n].table.has_core():
core_target_index = [
factory.nodes.index(factory.nodes[n].table.core.current_target)
]
node_pair_target[n * num_nodes : (n + 1) * num_nodes] = np.asarray(
one_hot_encode(num_nodes, core_target_index)
)
else:
node_pair_target[n * num_nodes : (n + 1) * num_nodes] = np.zeros(num_nodes)
return node_pair_target | aed5fa19baf28c798f1e064b878b148867d19053 | 1,818 |
from typing import Callable
from typing import List
import math
def repeat_each_position(shape: GuitarShape, length: int = None, repeats: int = 2, order: Callable = asc) -> List[
List[FretPosition]]:
"""
Play each fret in the sequence two or more times
"""
if length is not None:
div_length = math.ceil(length / repeats)
else:
div_length = length
pattern = order(shape, length=div_length)
new_positions = []
for positions in pattern:
new_positions.extend([positions] * repeats)
if length is not None and len(new_positions) != length:
new_positions = adjust_length(new_positions, length)
return new_positions | 9783e218134839410e02d4bc5210804d6a945d6d | 1,819 |
import csv
import gzip
from StringIO import StringIO
import pandas
def gz_csv_read(file_path, use_pandas=False):
"""Read a gzipped csv file.
"""
with gzip.open(file_path, 'r') as infile:
if use_pandas:
data = pandas.read_csv(StringIO(infile.read()))
else:
reader = csv.reader(StringIO(infile.read()))
data = [row for row in reader]
return data | 725132f37454b66b6262236966c96d4b48a81049 | 1,820 |
def init_block(in_channels, out_channels, stride, activation=nn.PReLU):
"""Builds the first block of the MobileFaceNet"""
return nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm2d(out_channels),
make_activation(activation)
) | 966ccb1ca1cb7e134db3ac40bb4daf54950743b1 | 1,821 |
def address_working(address, value=None):
"""
Find, insert or delete from database task address
:param address: website address example: https://www.youtube.com/
:param value: True: add , False: remove, default: find
:return:
"""
global db
if value is True:
db.tasks.insert_one({'Address': address})
return True
if value is False:
db.tasks.delete_many({'Address': address})
return False
x = list(db.tasks.find({'Address': address}))
if len(x) == 0:
return False
else:
return True | 5879fb6f3d4756aceb8424a5fc22fd232841802c | 1,822 |
def merge_default_values(resource_list, default_values):
"""
Generate a new list where each item of original resource_list will be merged with the default_values.
Args:
resource_list: list with items to be merged
default_values: properties to be merged with each item list. If the item already contains some property
the original value will be maintained.
Returns:
list: list containing each item merged with default_values
"""
def merge_item(resource):
return merge_resources(default_values, resource)
return lmap(merge_item, resource_list) | c2e98260a34762d17185eacdfc2fb4be1b3a45f3 | 1,823 |
from datetime import datetime
import pytz
def finish_scheduling(request, schedule_item=None, payload=None):
"""
Finalize the creation of a scheduled action. All required data is passed
through the payload.
:param request: Request object received
:param schedule_item: ScheduledAction item being processed. If None,
it has to be extracted from the information in the payload.
:param payload: Dictionary with all the required data coming from
previous requests.
:return:
"""
# Get the payload from the session if not given
if payload is None:
payload = request.session.get(session_dictionary_name)
# If there is no payload, something went wrong.
if payload is None:
# Something is wrong with this execution. Return to action table.
messages.error(request,
_('Incorrect action scheduling invocation.'))
return redirect('action:index')
# Get the scheduled item if needed
if not schedule_item:
s_item_id = payload.get('schedule_id')
if not s_item_id:
messages.error(request, _('Incorrect parameters in action scheduling'))
return redirect('action:index')
# Get the item being processed
schedule_item = ScheduledAction.objects.get(pk=s_item_id)
# Check for exclude values and store them if needed
exclude_values = payload.get('exclude_values')
if exclude_values:
schedule_item.exclude_values = exclude_values
schedule_item.status = ScheduledAction.STATUS_PENDING
schedule_item.save()
# Create the payload to record the event in the log
log_payload = {
'action': schedule_item.action.name,
'action_id': schedule_item.action.id,
'execute': schedule_item.execute.isoformat(),
}
if schedule_item.action.action_type == Action.PERSONALIZED_TEXT:
log_payload.update({
'email_column': schedule_item.item_column.name,
'subject': schedule_item.payload.get('subject'),
'cc_email': schedule_item.payload.get('cc_email', []),
'bcc_email': schedule_item.payload.get('bcc_email', []),
'send_confirmation': schedule_item.payload.get('send_confirmation',
False),
'track_read': schedule_item.payload.get('track_read', False)
})
log_type = Log.SCHEDULE_EMAIL_EDIT
elif schedule_item.action.action_type == Action.PERSONALIZED_JSON:
ivalue = None
if schedule_item.item_column:
ivalue = schedule_item.item_column.name
log_payload.update({
'item_column': ivalue,
'token': schedule_item.payload.get('subject')
})
log_type = Log.SCHEDULE_JSON_EDIT
else:
log_type = None
# Create the log
Log.objects.register(request.user,
log_type,
schedule_item.action.workflow,
log_payload)
# Notify the user. Show the time left until execution and a link to
# view the scheduled events with possibility of editing/deleting.
# Successful processing.
now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE))
tdelta = schedule_item.execute - now
# Reset object to carry action info throughout dialogs
request.session[session_dictionary_name] = {}
request.session.save()
# Create the timedelta string
delta_string = ''
if tdelta.days != 0:
delta_string += ugettext('{0} days').format(tdelta.days)
hours = tdelta.seconds / 3600
if hours != 0:
delta_string += ugettext(', {0} hours').format(hours)
minutes = (tdelta.seconds % 3600) / 60
if minutes != 0:
delta_string += ugettext(', {0} minutes').format(minutes)
# Successful processing.
return render(request,
'scheduler/schedule_done.html',
{'tdelta': delta_string,
's_item': schedule_item}) | fa0fb4648ef9d750eca9f19ea435fd57ab433ad8 | 1,824 |
import json
def analyze(results_file, base_path):
"""
Parse and print the results from gosec audit.
"""
# Load gosec json Results File
with open(results_file) as f:
issues = json.load(f)['Issues']
if not issues:
print("Security Check: No Issues Detected!")
return ([], [], [])
else:
high_risk = list()
medium_risk = list()
low_risk = list()
# Sort Issues
for issue in issues:
if issue['severity'] == 'HIGH':
high_risk.append(issue)
elif issue['severity'] == 'MEDIUM':
medium_risk.append(issue)
elif issue['severity'] == 'LOW':
low_risk.append(issue)
# Print Summary
print()
print('Security Issue Summary:')
print(' Found ' + str(len(high_risk)) + ' High Risk Issues')
print(' Found ' + str(len(medium_risk)) + ' Medium Risk Issues')
print(' Found ' + str(len(low_risk)) + ' Low Risk Issues')
# Print Issues In Order of Importance
if high_risk:
header = ('= High Security Risk Issues =')
print_category(header, high_risk, base_path)
if medium_risk:
header = ('= Medium Security Risk Issues =')
print_category(header, medium_risk, base_path)
if low_risk:
header = ('= Low Security Risk Issues =')
print_category(header, low_risk, base_path)
return (high_risk, medium_risk, low_risk) | a016f4ba389305103c9bbab1db94706053237e5a | 1,825 |
def _peaks(image,nr,minvar=0):
"""Divide image into nr quadrants and return peak value positions."""
n = np.ceil(np.sqrt(nr))
quadrants = _rects(image.shape,n,n)
peaks = []
for q in quadrants:
q_image = image[q.as_slice()]
q_argmax = q_image.argmax()
q_maxpos = np.unravel_index(q_argmax,q.shape)
if q_image.flat[q_argmax] > minvar:
peaks.append(np.array(q_maxpos) + q.origin)
return peaks | f7ecc3e5fafd55c38a85b4e3a05a04b25cbd97cf | 1,826 |
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, e) | 1ce07b2f8f3e5e15c5f5cae8f7642ab97201a559 | 1,827 |
def connection_type_validator(type):
"""
Property: ConnectionInput.ConnectionType
"""
valid_types = [
"CUSTOM",
"JDBC",
"KAFKA",
"MARKETPLACE",
"MONGODB",
"NETWORK",
"SFTP",
]
if type not in valid_types:
raise ValueError("% is not a valid value for ConnectionType" % type)
return type | cc2ed6096097c719b505356e69a5bb5cdc109495 | 1,828 |
import matplotlib.pyplot as plt
def plot_time_series(meter_data, temperature_data, **kwargs):
""" Plot meter and temperature data in dual-axes time series.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
**kwargs
Arbitrary keyword arguments to pass to
:any:`plt.subplots <matplotlib.pyplot.subplots>`
Returns
-------
axes : :any:`tuple` of :any:`matplotlib.axes.Axes`
Tuple of ``(ax_meter_data, ax_temperature_data)``.
"""
# TODO(philngo): include image in docs.
figure = kwargs.pop('figure')
if not figure:
try:
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
default_kwargs = {"figsize": (16, 4)}
default_kwargs.update(kwargs)
fig, ax1 = plt.subplots(**default_kwargs)
else:
fig = figure
ax1 = figure.subplots(**default_kwargs)
ax1.plot(
meter_data.index,
meter_data.value,
color="C0",
label="Energy Use",
drawstyle="steps-post",
)
ax1.set_ylabel("Energy Use")
ax2 = ax1.twinx()
ax2.plot(
temperature_data.index,
temperature_data,
color="C1",
label="Temperature",
alpha=0.8,
)
ax2.set_ylabel("Temperature")
fig.legend()
return ax1, ax2 | 9e77f5e997d86ccb6932b3f192f979b3630d9458 | 1,829 |
import calendar
import time
def render_pretty_time(jd):
"""Convert jd into a pretty string representation"""
year, month, day, hour_frac = sweph.revjul(jd)
_, hours, minutes, seconds = days_frac_to_dhms(hour_frac/24)
time_ = calendar.timegm((year,month,day,hours,minutes,seconds,0,0,0))
return time.strftime('%e %b %Y %H:%M UTC', time.gmtime(time_)) | 07c63429ae7881fbdec867e8bebab7578bfaacdd | 1,830 |
import json
def jsonify(obj):
"""Dump an object to JSON and create a Response object from the dump.
Unlike Flask's native implementation, this works on lists.
"""
dump = json.dumps(obj)
return Response(dump, mimetype='application/json') | 72e1fb425507d5905ef96de05a146805f5aa4175 | 1,831 |
def section(stree):
"""
Create sections in a :class:`ScheduleTree`. A section is a sub-tree with
the following properties: ::
* The root is a node of type :class:`NodeSection`;
* The immediate children of the root are nodes of type :class:`NodeIteration`
and have same parent.
* The :class:`Dimension` of the immediate children are either: ::
* identical, OR
* different, but all of type :class:`SubDimension`;
* The :class:`Dimension` of the immediate children cannot be a
:class:`TimeDimension`.
"""
class Section(object):
def __init__(self, node):
self.parent = node.parent
self.dim = node.dim
self.nodes = [node]
def is_compatible(self, node):
return (self.parent == node.parent
and (self.dim == node.dim or node.dim.is_Sub))
# Search candidate sections
sections = []
for i in range(stree.height):
# Find all sections at depth `i`
section = None
for n in findall(stree, filter_=lambda n: n.depth == i):
if any(p in flatten(s.nodes for s in sections) for p in n.ancestors):
# Already within a section
continue
elif not n.is_Iteration or n.dim.is_Time:
section = None
elif section is None or not section.is_compatible(n):
section = Section(n)
sections.append(section)
else:
section.nodes.append(n)
# Transform the schedule tree by adding in sections
for i in sections:
node = NodeSection()
processed = []
for n in list(i.parent.children):
if n in i.nodes:
n.parent = node
if node not in processed:
processed.append(node)
else:
processed.append(n)
i.parent.children = processed
return stree | edd6682d1ff2a637049a801d548181d35e07961a | 1,832 |
def was_csv_updated() -> bool:
""" This function compares the last modified time on the csv file to the
actions folder to check which was last modified.
1. check if csv or files have more actions.
2. if same number of actions, assume the update was made in the csv
"""
csv_actions = get_cas_from_csv()
file_actions = get_cas_from_files()
return (
True
if len(csv_actions) >= len(file_actions)
else False
) | 7cf78696fa59e8abbe968916191600a265c96305 | 1,833 |
import math
def MakeBands(dR, numberOfBands, nearestInteger):
"""
Divide a range into bands
:param dR: [min, max] the range that is to be covered by the bands.
:param numberOfBands: the number of bands, a positive integer.
:param nearestInteger: if True then [floor(min), ceil(max)] is used.
:return: A List consisting of [min, midpoint, max] for each band.
"""
bands = list()
if (dR[1] < dR[0]) or (numberOfBands <= 0):
return bands
x = list(dR)
if nearestInteger:
x[0] = math.floor(x[0])
x[1] = math.ceil(x[1])
dx = (x[1] - x[0]) / float(numberOfBands)
b = [x[0], x[0] + dx / 2.0, x[0] + dx]
i = 0
while i < numberOfBands:
bands.append(b)
b = [b[0] + dx, b[1] + dx, b[2] + dx]
i += 1
return bands | 104720371d1f83bf2ee2c8fddbf05401ec034560 | 1,834 |
import math
def euler719(n=10**12):
"""Solution for problem 719."""
return sum(i*i
for i in range(2, 1 + int(math.sqrt(n)))
if can_be_split_in_sum(i*i, i)) | 3f814ed837ad58f73f901a81af34ac31b520b372 | 1,835 |
def inner(a, b):
"""
Inner product of two tensors.
Ordinary inner product of vectors for 1-D tensors (without complex
conjugation), in higher dimensions a sum product over the last
axes.
Note:
Numpy argument out is not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtypes are np.float16, np.float32, and
np.float64.
Args:
a (Tensor): input tensor. If a and b are nonscalar, their last
dimensions must match.
b (Tensor): input tensor. If a and b are nonscalar, their last
dimensions must match.
Returns:
Tensor or scalar, out.shape = a.shape[:-1] + b.shape[:-1].
Raises:
ValueError: if x1.shape[-1] != x2.shape[-1].
Supported Platforms:
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.ones((5, 3))
>>> b = np.ones((2, 7, 3))
>>> output = np.inner(a, b)
>>> print(output)
[[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]]
"""
if F.rank(a) == 0 or F.rank(b) == 0:
a = _expand(a, 1)
b = _expand(b, 1)
if F.rank(a) < F.rank(b):
a, b = b, a
return F.tensor_mul(a, b)
_ = _check_shape_aligned(F.shape(a), F.shape(b))
aligned_shape_a = (F.shape_mul(F.shape(a)[:-1]), F.shape(a)[-1])
aligned_shape_b = (F.shape_mul(F.shape(b)[:-1]), F.shape(a)[-1])
a_aligned = F.reshape(a, aligned_shape_a)
b_aligned = F.reshape(b, aligned_shape_b)
res = _matmul_T(a_aligned, b_aligned)
res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
return res | b09ee6e22fd6c9bd7c1fd758fa62b38ad8fae1ab | 1,836 |
def stern_warning(warn_msg: str) -> str:
"""Wraps warn_msg so that it prints in red."""
return _reg(colorama.Fore.RED, warn_msg) | 639f0f6aaf3ce1f6ad46ed0f5d852be3457337fb | 1,837 |
def alt2temp_ratio(H, alt_units=default_alt_units):
"""
Return the temperature ratio (temperature / standard temperature for
sea level). The altitude is specified in feet ('ft'), metres ('m'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
Examples:
Calculate the temperature ratio at 8,000 (default altitude units)
>>> alt2temp_ratio(8000)
0.94499531494013533
Calculate the temperature ratio at 8,000 m.
>>> alt2temp_ratio(8000, alt_units = 'm')
0.81953843484296374
"""
# function tested in tests/test_std_atm.py
return alt2temp(H, alt_units, temp_units='K') / T0 | c46ca3d63169676ccda223f475927a902a82a15e | 1,838 |
def encode_message(key, message):
""" Encodes the message (string) using the key (string) and
pybase64.urlsafe_b64encode functionality """
keycoded = []
if not key:
key = chr(0)
# iterating through the message
for i in range(len(message)):
# assigning a key_character based on the given key
key_character = key[i % len(key)]
# each char of the message has the key_char added (in ascii values)
# and is converted back to normal, and appended to the keycoded values
keycoded.append(
chr((ord(message[i]) + ord(key_character)) % 256)
)
encoded = pybase64.urlsafe_b64encode(
"".join(keycoded).encode() # convert to bytes object (builtin)
).decode() # back to text
return encoded | ea3a5403878dc58f1faa586c9851863a670c8cd0 | 1,839 |
import argparse
import os
def cfg():
"""Configuration of argument parser."""
parser = argparse.ArgumentParser(
description="Crawl SolarEdge and stores results in InfluxDB",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# TODO: error message when missing env variable
parser.add_argument('--gardena-email', required=False, default=os.getenv('GARDENA_EMAIL'), help="Gardena email") # noqa
parser.add_argument('--gardena-password', required=False, default=os.getenv('GARDENA_PASSWORD'), help="Gardena password") # noqa
parser.add_argument('--gardena-application-id', required=False, default=os.getenv('GARDENA_APPLICATION_ID'), help="Gardena application id") # noqa
parser.add_argument('--influxdb-host', required=False, default=os.getenv('INFLUXDB_HOST'), help="influx db host") # noqa
parser.add_argument('--influxdb-port', required=False, default=os.getenv('INFLUXDB_PORT'), help="influx db port") # noqa
parser.add_argument('--influxdb-user', required=False, default=os.getenv('INFLUXDB_PORT'), help="influx db user") # noqa
parser.add_argument('--influxdb-pass', required=False, default=os.getenv('INFLUXDB_PASS'), help="influx db password") # noqa
parser.add_argument('--influxdb-db', required=False, default=os.getenv('INFLUXDB_DB'), help="influx db database") # noqa
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args() | 1aba01ccc8d0b99227d541d56e866fabd62a3941 | 1,840 |
from IPython.core.debugger import Pdb
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
import warnings
def get_debugger():
"""
Returns a debugger instance
"""
try:
pdb = Pdb()
except ImportError:
try:
IPShell(argv=[""])
pdb = Pdb()
except ImportError:
warnings.warn(
'pdb was selected as a debugger. If you want to use ipython as a debugger you have to "pip install radish-bdd[ipython-debugger]"'
)
return pdb | bfc1600260b62dee8b89145635044ea6f0754064 | 1,841 |
def add_sibling(data, node_path, new_key, new_data, _i=0):
"""
Traversal-safe method to add a siblings data node.
:param data: The data object you're traversing.
:param node_path: List of path segments pointing to the node you're creating a
sibling of. Same as node_path of traverse()
:param new_key: The sibling key to create.
:param new_data: The new data to be stored at the key.
"""
if _i < len(node_path) - 1:
return add_sibling(data[node_path[_i]], node_path, new_key, new_data, _i + 1)
else:
data[new_key] = new_data | 4bc11315eab686659edc9f7eb8479508d3ca37fb | 1,842 |
def draw_pnl(ax, df):
"""
Draw p&l line on the chart.
"""
ax.clear()
ax.set_title('Performance')
index = df.index.unique()
dt = index.get_level_values(level=0)
pnl = index.get_level_values(level=4)
ax.plot(
dt, pnl, '-',
color='green',
linewidth=1.0,
label='Performance'
)
def perc(val):
return '{:2f}'.format(val)
ax.format_ydata = perc
set_legend(ax)
format_ax(ax) | 6210c1861943bf61a7df8dbe2124f8f0f5e77e89 | 1,843 |
def maxRstat(Z, R, i):
"""
Return the maximum statistic for each non-singleton cluster and its
children.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> R = inconsistent(Z)
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.74535599, 1.08655358, 3. , 1.15470054],
[1.91202266, 1.37522872, 3. , 1.15470054],
[3.25 , 0.25 , 3. , 0. ]])
`scipy.cluster.hierarchy.maxRstat` can be used to compute
the maximum value of each column of ``R``, for each non-singleton
cluster and its children:
>>> maxRstat(Z, R, 0)
array([1. , 1. , 1. , 1. , 1.05901699,
1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266,
3.25 ])
>>> maxRstat(Z, R, 1)
array([0. , 0. , 0. , 0. , 0.08346263,
0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872,
1.37522872])
>>> maxRstat(Z, R, 3)
array([0. , 0. , 0. , 0. , 0.70710678,
0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
1.15470054])
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR | d63c6370e9d896a2e315012fa92fa650d1acaee8 | 1,844 |
import re
def strip_characters(text):
"""Strip characters in text."""
t = re.sub('\(|\)|:|,|;|\.|’|”|“|\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'", '')
return t | 763ddc837ef9be19aa067e362c312ebd88632ed7 | 1,845 |
import os
def get_trained_model(datapath, dataset, image_size, nb_labels):
"""Recover model weights stored on the file system, and assign them into
the `model` structure
Parameters
----------
datapath : str
Path of the data on the file system
dataset : str
Name of the dataset
image_size : int
Image size, in pixels (height=width)
nb_labels : int
Number of output labels
Returns
-------
keras.models.Model
Convolutional neural network
"""
K.clear_session()
net = SemanticSegmentationNetwork(
network_name="semseg_postprocessing",
image_size=image_size,
nb_labels=nb_labels,
dropout=1.0,
architecture="unet",
)
model = Model(net.X, net.Y)
output_folder = utils.prepare_output_folder(
datapath, dataset, "semseg"
)
checkpoint_filename = "best-model-" + str(image_size) + "-full" + ".h5"
checkpoint_full_path = os.path.join(output_folder, checkpoint_filename)
if os.path.isfile(checkpoint_full_path):
model.load_weights(checkpoint_full_path)
logger.info(
"Model weights have been recovered from %s" % checkpoint_full_path
)
else:
logger.info(
(
"No available trained model for this image size"
" with optimized hyperparameters. The "
"inference will be done on an untrained model"
)
)
return model | 66d685553989a6e03df0b6db4891b5b423a6f937 | 1,846 |
def make_std_secgroup(name, desc="standard security group"):
"""
Returns a standarized resource group with rules for ping and ssh access.
The returned resource can be further configured with additional rules by the
caller.
The name parameter is used to form the name of the ResourceGroup, and also
provides the name of the SecGroup that is created in the ResourceGroup.
"""
return ResourceGroup("%s_std_secgroup" % name,
group=SecGroup(name, desc),
ping_rule=SecGroupRule("ping_rule",
ctxt.comp.container.group,
ip_protocol="icmp",
from_port=-1, to_port=-1),
ssh_rule=SecGroupRule("ssh_rule",
ctxt.comp.container.group,
ip_protocol="tcp",
from_port=22, to_port=22),
) | b53a65bdc04871c0d7ca56574c1852906b2d9351 | 1,847 |
def parse_plot_args(*args, **options):
"""Parse the args the same way plt.plot does."""
x = None
y = None
style = None
if len(args) == 1:
y = args[0]
elif len(args) == 2:
if isinstance(args[1], str):
y, style = args
else:
x, y = args
elif len(args) == 3:
x, y, style = args
return x, y, style | 7687ed00785c1ab20fdf2f7bdc969fde3c75840f | 1,848 |
def publications_classification_terms_get(search=None): # noqa: E501
"""List of Classification Terms
List of Classification Terms # noqa: E501
:param search: search term applied
:type search: str
:rtype: ApiOptions
"""
return 'do some magic!' | 6633c91d59a5df7805979bd85a01f8eb1c269946 | 1,849 |
def lu_decompose(tri_diagonal):
"""Decompose a tri-diagonal matrix into LU form.
Parameters
----------
tri_diagonal : TriDiagonal
Represents the matrix to decompose.
"""
# WHR Appendix B: perform LU decomposition
#
# d[0] = hd[0]
# b[i] = hu[i]
#
# Iterative algorithm:
# d[i] = hd[i] - hu[i-1] a[i-1]
# a[i] = hl[i] / d[i]
hd, hu, hl = tri_diagonal
b = hu
# We want to vectorize the calculation of d and a as much as possible,
# instead of using WHR's iterative algorithm directly.
#
# Substitute a[i-1] into the expression for d[i] to get a recurrence
# relation for d:
#
# d[i] = hd[i] - hu[i-1] a[i-1]
# = hd[i] - hu[i-1] * hl[i-1] / d[i-1]
#
# Let c[i] = hu[i-1] * hl[i-1].
# c[0] = 0, which is meaningless but convenient for the helper.
#
# d[i] = hd[i] - c[i] / d[i-1]
c = np.empty_like(hd)
c[0] = 0.0
np.multiply(hu, hl, c[1:])
np.negative(c, c)
d = hd.copy()
solve_lu_d(c, d)
# a[i] = hl[i] / d[i]
a = np.divide(hl, d[:-1])
return TriDiagonalLU(d, b, a) | 423bb853d96b534055bd00b3c768158c86826b1b | 1,850 |
def _card(item):
"""Handle card entries
Returns: title (append " - Card" to the name,
username (Card brand),
password (card number),
url (none),
notes (including all card info)
"""
notes = item.get('notes', "") or ""
# Add card info to the notes
notes = notes + ("\n".join([f"{i}: {j}" for i, j in item.get('card', "").items()]))
return f"{item['name']} - Card", \
item.get('card', {}).get('brand', '') or "", \
item.get('card', {}).get('number', "") or "", \
"", \
notes | fc7d5e4b960019b05ffe7ca02fd3d1a94d69b303 | 1,851 |
def s3():
"""Boto3 S3 resource."""
return S3().resource | 6402deaafa2ae7d599de1c8e8c67b9e669c06463 | 1,852 |
def SUE(xmean=None,ymean=None,xstdev=None,ystdev=None,rho=None, \
xskew=None,yskew=None,xmin=None,xmax=None,ymin=None,ymax=None, \
Npt=300,xisln=False,yisln=False):
"""
SKEWED UNCERTAINTY ELLIPSES (SUE)
Function to plot uncertainty SUEs (or 1 sigma contour of a bivariate
split-normal distribution). The parameters are the means (xmean,ymean), the
standard deviations (xstdev,ystdev), the skewnesses (xskew,yskew) and the
correlation coefficients (rho). The optional bounds (xmin,xmax,ymin,ymax)
have the effect of truncating the SUEs in case there is a range of
parameter space that is forbidden.
It is important to notice that the xisln/yisln parameters are not related to
the log settings of the axes where we plot the SUE, but are here to
indicate that the moments of the variable to plot correspond to the natural
logarithm (ln) of the variable we want to display. For instance, for
displaying the ellipses of (x,y) where, for x, the moments are those of lnx,
we would write:
SUE(xmean=mean_of_lnx,ymean=mean_of_y,xstdev=stdev_of_lnx, \
ystdev=stdev_of_y,xskew=skewness_of_lnx,yskew=skewness_of_y, \
rho=correl_coeff_of_lnx_and_y,xisln=True)
"""
# Rotation angle
theta = 1./2 * np.arctan( 2*rho*xstdev*ystdev / (xstdev**2-ystdev**2) )
# Numerically solve for taux and tauy (tau=1.D2 ==> skew=0.99)
taugrid = ramp(N=10000,x0=1.E-2,x1=1.E2,log=True)
Ax = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*xskew*xstdev**3 \
+ (np.sin(theta))**3*yskew*ystdev**3 ) \
/ ( (np.sin(theta))**6 + (np.cos(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) )**1.5
Ay = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*yskew*ystdev**3 \
- (np.sin(theta))**3*xskew*xstdev**3 ) \
/ ( (np.cos(theta))**6 + (np.sin(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) )**1.5
taux = np.exp(np.interp(Ax,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
tauy = np.exp(np.interp(Ay,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
if (not np.isfinite(taux) or taux > 1.E2): taux = 1.E2
if (not np.isfinite(tauy) or tauy > 1.E2): tauy = 1.E2
# Rest of the parameters
lambdax = np.sqrt( ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(taux) )
lambday = np.sqrt( ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(tauy) )
x0 = xmean - np.sqrt(2/np.pi) * ( np.cos(theta)*lambdax*(taux-1) \
- np.sin(theta)*lambday*(tauy-1) )
y0 = ymean - np.sqrt(2/np.pi) * ( np.sin(theta)*lambdax*(taux-1) \
+ np.cos(theta)*lambday*(tauy-1) )
# Draw the SUE
matrot = np.array([ [ np.cos(theta), -np.sin(theta) ], \
[ np.sin(theta), np.cos(theta) ] ])
xell_ax1 = np.zeros(2)
yell_ax1 = np.zeros(2)
xell_ax2 = np.zeros(2)
yell_ax2 = np.zeros(2)
for k in np.arange(4):
if (k == 0):
xell_sub = ramp(N=Npt,x0=-lambdax,x1=0) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 1):
xell_sub = ramp(N=Npt,x0=0,x1=lambdax*taux) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 2):
xell_sub = (ramp(N=Npt,x0=0,x1=lambdax*taux))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 3):
xell_sub = (ramp(N=Npt,x0=-lambdax,x1=0))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
# Add the limit case (half ellipse)
mask = np.logical_and(np.isfinite(yell_sub),np.isfinite(xell_sub))
xell_sub = xell_sub[mask]
yell_sub = yell_sub[mask]
Nsub = np.count_nonzero(mask)
# Rotate the ellipse
for j in np.arange(Nsub):
vecell = np.matmul(matrot, \
np.array([xell_sub[j]-x0,yell_sub[j]-y0]))
xell_sub[j] = vecell[0] + x0
yell_sub[j] = vecell[1] + y0
if (k == 0):
xell = xell_sub
yell = yell_sub
else:
xell = np.concatenate((xell,xell_sub))
yell = np.concatenate((yell,yell_sub))
xplot = np.concatenate((xell,[xell[0]]))
yplot = np.concatenate((yell,[yell[0]]))
# Logs and limits
if (xisln):
xplot = np.exp(xplot)
x0 = np.exp(x0)
if (yisln):
yplot = np.exp(yplot)
y0 = np.exp(y0)
if (xmin != None):
xplot[xplot < xmin] = xmin
if (x0 < xmin): x0 = xmin
if (xmax != None):
xplot[xplot > xmax] = xmax
if (x0 > xmax): x0 = xmax
if (ymin != None):
yplot[yplot < ymin] = ymin
if (y0 < ymin): y0 = ymin
if (ymax != None):
yplot[yplot > ymax] = ymax
if (y0 > ymax): y0 = ymax
return(xplot,yplot,x0,y0) | 8b298a2d2ba04a3f1262205f19b1993a4701e279 | 1,853 |
def create_label(places, size, corners, resolution=0.50, x=(0, 90), y=(-50, 50), z=(-4.5, 5.5), scale=4, min_value=np.array([0., -50., -4.5])):
"""Create training Labels which satisfy the range of experiment"""
x_logical = np.logical_and((places[:, 0] < x[1]), (places[:, 0] >= x[0]))
y_logical = np.logical_and((places[:, 1] < y[1]), (places[:, 1] >= y[0]))
z_logical = np.logical_and((places[:, 2] + size[:, 0]/2. < z[1]), (places[:, 2] + size[:, 0]/2. >= z[0]))
xyz_logical = np.logical_and(x_logical, np.logical_and(y_logical, z_logical))
center = places.copy()
center[:, 2] = center[:, 2] + size[:, 0] / 2. # Move bottom to center
sphere_center = ((center[xyz_logical] - min_value) / (resolution * scale)).astype(np.int32)
train_corners = corners[xyz_logical].copy()
anchor_center = sphere_to_center(sphere_center, resolution=resolution, scale=scale, min_value=min_value) #sphere to center
for index, (corner, center) in enumerate(zip(corners[xyz_logical], anchor_center)):
train_corners[index] = corner - center
return sphere_center, train_corners | 1ae1ed49674fbcee15fb6a8201e69be2c82630f9 | 1,854 |
def usage():
"""Serve the usage page."""
return render_template("meta/access.html") | 909272906678c9980f379342b87c8af6a00ab89c | 1,855 |
def GetCache(name, create=False):
"""Returns the cache given a cache indentfier name.
Args:
name: The cache name to operate on. May be prefixed by "resource://" for
resource cache names or "file://" for persistent file cache names. If
only the prefix is specified then the default cache name for that prefix
is used.
create: Creates the persistent cache if it exists if True.
Raises:
CacheNotFound: If the cache does not exist.
Returns:
The cache object.
"""
types = {
'file': file_cache.Cache,
'resource': resource_cache.ResourceCache,
}
def _OpenCache(cache_class, name):
try:
return cache_class(name, create=create)
except cache_exceptions.Error as e:
raise Error(e)
if name:
for cache_id, cache_class in types.iteritems():
if name.startswith(cache_id + '://'):
name = name[len(cache_id) + 3:]
if not name:
name = None
return _OpenCache(cache_class, name)
return _OpenCache(resource_cache.Cache, name) | b8e1796d772506d4abb9f8261df33b4cf6777934 | 1,856 |
def rf_local_divide_int(tile_col, scalar):
"""Divide a Tile by an integral scalar"""
return _apply_scalar_to_tile('rf_local_divide_int', tile_col, scalar) | 0a8c44cafcc44d323fb931fc1b037759ad907d18 | 1,857 |
from typing import Any
from typing import Dict
def or_(*children: Any) -> Dict[str, Any]:
"""Select devices that match at least one of the given selectors.
>>> or_(tag('sports'), tag('business'))
{'or': [{'tag': 'sports'}, {'tag': 'business'}]}
"""
return {"or": [child for child in children]} | 0bda8654ddc0f5dac80c8eb51b0d6d55b57c9e2a | 1,858 |
def get_width_and_height_from_size(x):
""" Obtains width and height from a int or tuple """
if isinstance(x, int): return x, x
if isinstance(x, list) or isinstance(x, tuple): return x
else: raise TypeError() | 581c9f332613dab5de9b786ce2bac3387ee1bd3b | 1,859 |
def remove_stopwords(lista,stopwords):
"""Function to remove stopwords
Args:
lista ([list]): list of texts
stopwords ([list]): [description]
Returns:
[list]: List of texts without stopwords
"""
lista_out = list()
for idx, text in enumerate(lista):
text = ' '.join([word for word in text.split() if word not in stopwords])
text = text.strip()
lista_out.append(text)
#print("Len original: {} - Len processed stopwords: {}".format(len(lista),len(lista_out)))
return lista_out | edca74bb3a041a65a628fcd3f0c71be5ad4858df | 1,860 |
import time
import io
import os
def Skeletonize3D(directory, crop=None, flip='y', dtype=None):
"""Skeletonize TrailMap results.
Parameters
----------
directory : string
Path to directory with segmented data.
crop : dict (optional, default None)
Dictionary with ImageJ-format cropping coordinates ({width:, height:, x:, y:,})
flip : string (optional, default 'y')
Option to flip axis, can be any combination of 'xyz'.
dtype : numpy dtype (optional, default None results in float32 images)
Data type for output image. Set dtype=np.uint16 if you are going to combine with autofluo in Imaris.
"""
#Load Data:
sample = directory.split('/')[-3]
print("Started " + time.ctime())
ims = io.ImageCollection(os.path.join(directory, '*.tif'), load_func=io.imread)
data = ims.concatenate()
#Optionally crop:
if crop:
rawshape=data.shape
data = data[:,crop['y']:crop['y']+crop['height'],crop['x']:crop['x']+crop['width']]
print("Cropped data from " + str(rawshape) + " to " + str(data.shape) + " at " + time.ctime())
cat = np.zeros(shape=(data.shape), dtype='float32') #Create output array
#Loop through thresholds 0.2 -> 0.9, extract signal, scale, and combine
for i in range(2,10,1):
print(str(i) + " started at " + time.ctime())
i=i/10
im = (data>i).astype('float32')
skel = morphology.skeletonize_3d(im).astype('float32')*i
print(str(i) + " completed at " + time.ctime())
cat = cat+skel
#Optionally flip along the x, y, or z axis:
if flip:
if 'y' in flip:
cat = np.flip(cat, axis=1)
if 'x' in flip:
cat = np.flip(cat, axis=2)
if 'z' in flip:
cat = np.flip(cat, axis=0)
if dtype:
cat = cat.astype(dtype) #have not tested that this results in same pixel values as changing image type in ImageJ.
#Save the result image stack:
try:
io.imsave(os.path.join(directory, sample + '_ThresholdedSkeleton3D.tif'), cat, check_contrast=False)
except PermissionError:
print("You do not have write permissions for " + str(directory) + '\n' + "Saving to your home directory instead.")
homedir = os.path.expanduser('~/')
io.imsave(os.path.join(homedir, sample + '_ThresholdedSkeleton3D.tif'), cat, check_contrast=False)
print("Finished " + sample + ' ' + time.ctime())
return cat | 72eaf148cad51df01c9487b12a492a5bd2cd8661 | 1,861 |
def get_users_report(valid_users, ibmcloud_account_users):
"""get_users_report()"""
users_report = []
valid_account_users = []
invalid_account_users = []
# use case 1: find users in account not in valid_users
for account_user in ibmcloud_account_users:
# check if account user is in valid_users
is_valid_user=False
for valid_user in valid_users:
if ( account_user["email"] == valid_user["email"] ):
account_user["name"] = valid_user["name"]
account_user["identities"] = valid_user["identities"]
if "resourceGroups" in valid_user:
account_user["resourceGroups"] = valid_user["resourceGroups"]
account_user["manager"] = valid_user["manager"]
account_user["association"] = valid_user["association"]
is_valid_user=True
if is_valid_user:
valid_account_users.append(account_user)
else:
invalid_account_users.append(account_user)
users_report = {
"valid_account_users" : valid_account_users,
"invalid_account_users" : invalid_account_users
}
return users_report | a96f8835496f82d8b6f8cd4f248ed8a03676795b | 1,862 |
def insert_bn(names):
"""Insert bn layer after each conv.
Args:
names (list): The list of layer names.
Returns:
list: The list of layer names with bn layers.
"""
names_bn = []
for name in names:
names_bn.append(name)
if 'conv' in name:
position = name.replace('conv', '')
names_bn.append(f'bn{position}')
return names_bn | efe1e6a3218fb33f74c17f90a06e2d18d17442e5 | 1,863 |
def convert_format(parameters):
"""Converts dictionary database type format to serial transmission format"""
values = parameters.copy()
for key, (index, format, value) in values.items():
if type(format) == type(db.Int):
values[key] = (index, 'i', value) # signed 32 bit int (arduino long)
elif type(format) == type(db.Int16):
values[key] = (index, 'h', value)
elif type(format) == type(db.Float):
values[key] = (index, 'f', value)
elif type(format) == type(db.String32):
values[key] = (index, 's', value)
elif type(format) == type(db.StringN):
values[key] = (index, 's', value)
elif type(format) == type(db.Time):
values[key] = (index, 'd', value)
return values | fae756d54cbef6ecc7de07d123513b773ccf1433 | 1,864 |
def prepare(_config):
"""
Preparation of the train and validation datasets for the training and initialization of the padertorch trainer,
using the configuration dict.
Args:
_config: Configuration dict of the experiment
Returns:
3-Tuple of the prepared datasets and the trainer.
trainer: padertorch trainer
train_dataset: training_dataset
validate_dataset: dataset for validation
"""
# Extraction needed strings from the config dict
train_dataset_name = _config['train_dataset_name']
validate_dataset_name = _config['validate_dataset_name']
database_json = _config['database_json']
# Initialization of the trainer
trainer = pt.Trainer.from_config(_config["trainer"])
db = JsonDatabase(json_path=database_json)
# Preparation of the datasets
train_dataset = prepare_dataset(db, train_dataset_name,
_config['batch_size'],
prefetch = not _config['debug'])
validate_dataset = prepare_dataset(db, validate_dataset_name,
_config['batch_size'],
prefetch = not _config['debug'])
# Print the representations of the two datasets to the console.
print(repr(train_dataset_name), repr(validate_dataset_name))
return (trainer, train_dataset, validate_dataset) | eab0f93d0682dd8aae8ec8c751b9ff795c9f68e2 | 1,865 |
import warnings
def getproj4(epsg):
"""
Get projection file (.prj) text for given epsg code from
spatialreference.org. See: https://www.epsg-registry.org/
.. deprecated:: 3.2.11
This function will be removed in version 3.3.5. Use
:py:class:`flopy.discretization.structuredgrid.StructuredGrid` instead.
Parameters
----------
epsg : int
epsg code for coordinate system
Returns
-------
prj : str
text for a projection (*.prj) file.
"""
warnings.warn(
"SpatialReference has been deprecated and will be removed in version "
"3.3.5. Use StructuredGrid instead.",
category=DeprecationWarning,
)
return get_spatialreference(epsg, text="proj4") | 80dccf9722f7dd45cca87dcd78775868cfe545ad | 1,866 |
def vpn_tunnel_inside_cidr(cidr):
"""
Property: VpnTunnelOptionsSpecification.TunnelInsideCidr
"""
reserved_cidrs = [
"169.254.0.0/30",
"169.254.1.0/30",
"169.254.2.0/30",
"169.254.3.0/30",
"169.254.4.0/30",
"169.254.5.0/30",
"169.254.169.252/30",
]
cidr_match_re = compile(
r"^169\.254\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)"
r"\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\/30$"
)
if cidr in reserved_cidrs:
raise ValueError(
'The following CIDR blocks are reserved and cannot be used: "%s"'
% (", ".join(reserved_cidrs))
)
elif not cidr_match_re.match(cidr):
raise ValueError(
"%s is not a valid CIDR."
" A size /30 CIDR block from the 169.254.0.0/16 must be specified." % cidr
)
return cidr | 01807a4db2fc80cf8253b0e000e412b0dce1a528 | 1,867 |
def choose_media_type(accept, resource_types):
"""choose_media_type(accept, resource_types) -> resource type
select a media type for the response
accept is the Accept header from the request. If there is no Accept header, '*/*' is assumed. If the Accept header cannot be parsed, HTTP400BadRequest is raised.
resource_types is an ordered list of available resource types, with the most desirable type first.
To find a match, the types in the Accept header are ordered by q value (descending), and each is compared with the available resource types in order. The first matching media type is returned.
If not match is found, HTTP406NotAcceptable is raised.
"""
# This function is exposed in the script dpf_choose_media_type,
# so if changes are made here, that script's documentation
# should be updated to reflect them.
# list of (type, subtype, q)
accept_types = []
for part in accept.split(','):
part = part.strip()
if ';' not in part:
mt = part
q = 1.0
else:
(mt, q) = part.split(';', 1)
mt = mt.strip()
q = q.strip()
if not q.startswith('q='):
raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n')
try:
q = float(q[2:])
except ValueError:
raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n')
if '/' not in mt:
raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n')
(type, subtype) = mt.split('/', 1)
accept_types.append((type, subtype, q))
accept_types.sort(cmp_accept_type)
accept_types.reverse()
for (type, subtype, q) in accept_types:
for available_type in resource_types:
(a_type, a_subtype) = available_type.split('/', 1)
if type != '*' and type != a_type:
continue
if subtype != '*' and subtype != a_subtype:
continue
return available_type
raise HTTP406NotAcceptable() | 876ad2ace8af69f5c6dc83d91d598220935987d5 | 1,868 |
import math
def get_border_removal_size(image: Image, border_removal_percentage: float = .04, patch_width: int = 8):
"""
This function will compute the border removal size. When computing the boarder removal the patch size becomes
important the output shape of the image will always be an even factor of the patch size. This allows the later
computations to evenly fit the image.
:param image: input image to get the dimentions
:param border_removal_percentage: how much of the boarder to remove
:param patch_width: the width size of the patches in pixels.
:return: how many pixes to be removed around the boarder
"""
w, h = image.size
return int(math.ceil(w * border_removal_percentage / patch_width)) * patch_width | f0f236b1d2a13058042269e0e85f52f37fb47b5e | 1,869 |
def get_natural_num(msg):
"""
Get a valid natural number from the user!
:param msg: message asking for a natural number
:return: a positive integer converted from the user enter.
"""
valid_enter = False
while not valid_enter:
given_number = input(msg).strip()
if given_number.isdigit():
num = int(given_number)
valid_enter = True
return num | 77bed94bf6d3e5ceb56d58eaf37e3e687e3c94ba | 1,870 |
from typing import Optional
def _decode_panoptic_or_depth_map(map_path: str) -> Optional[str]:
"""Decodes the panoptic or depth map from encoded image file.
Args:
map_path: Path to the panoptic or depth map image file.
Returns:
Panoptic or depth map as an encoded int32 numpy array bytes or None if not
existing.
"""
if not tf.io.gfile.exists(map_path):
return None
with tf.io.gfile.GFile(map_path, 'rb') as f:
decoded_map = np.array(Image.open(f)).astype(np.int32)
if FLAGS.panoptic_divisor > 0 and map_path.endswith(_LABEL_SUFFIX):
semantic_map = decoded_map[:, :, 0]
instance_map = (
decoded_map[:, :, 1] * _ENCODED_INSTANCE_LABEL_DIVISOR +
decoded_map[:, :, 2])
decoded_map = semantic_map * FLAGS.panoptic_divisor + instance_map
return decoded_map.tobytes() | 1f3b81827ba911614d8979b187b8cde7f10078fe | 1,871 |
def splitstr(s, l=25):
""" split string with max length < l
"(i/n)"
"""
arr = [len(x) for x in s.split()]
out = []
counter = 5
tmp_out = ''
for i in xrange(len(arr)):
if counter + arr[i] > l:
out.append(tmp_out)
tmp_out = ''
counter = 5
else:
tmp_out += s.split()[i] + ' '
counter = len(tmp_out) + 5
return out | 0d84d7bbf420d1f97993be459764c37fed50f8b3 | 1,872 |
import re
def SplitRequirementSpecifier(requirement_specifier):
"""Splits the package name from the other components of a requirement spec.
Only supports PEP 508 `name_req` requirement specifiers. Does not support
requirement specifiers containing environment markers.
Args:
requirement_specifier: str, a PEP 508 requirement specifier that does not
contain an environment marker.
Returns:
(string, string), a 2-tuple of the extracted package name and the tail of
the requirement specifier which could contain extras and/or a version
specifier.
Raises:
Error: No package name was found in the requirement spec.
"""
package = requirement_specifier.strip()
tail_start_regex = r'(\[|\(|==|>=|!=|<=|<|>|~=|===)'
tail_match = re.search(tail_start_regex, requirement_specifier)
tail = ''
if tail_match:
package = requirement_specifier[:tail_match.start()].strip()
tail = requirement_specifier[tail_match.start():].strip()
if not package:
raise Error(r'Missing package name in requirement specifier: \'{}\''.format(
requirement_specifier))
return package, tail | d71eee50c162756ac7aae0bd120323d50d3ab255 | 1,873 |
def arctanh(var):
"""
Wrapper function for atanh
"""
return atanh(var) | 955d09821d78703c99fc2e51f70ca0fc47b0c943 | 1,874 |
def predict_sentiment(txt: str, direc: str = 'models/sentiment/saved_models/model50') -> float:
"""
predicts sentiment of string
only use for testing not good for large data because
model is loaded each time
input is a txt string
optional directory change for using different models
returns a value from -1 to 1
Aproaching -1 being a negative sentiment
Aproaching 1 being a positive sentiment
"""
vals = spacy.load(direc)(txt).cats
return vals["pos"] if vals["pos"]>vals["neg"] else -1*vals["neg"] | d7ff2d361792032eb097e0b0e9818da6ce3af1e5 | 1,875 |
import numpy
def logfbank(signal,
samplerate=16000,
winlen=0.025,
winstep=0.01,
nfilt=26,
nfft=512,
lowfreq=0,
highfreq=None,
preemph=0.97,
winfunc=lambda x: numpy.ones((x,))):
"""Compute log Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming
:returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector.
"""
feat, energy = fbank(signal, samplerate, winlen, winstep, nfilt, nfft,
lowfreq, highfreq, preemph, winfunc)
return numpy.log(feat) | 670d5faf73fcca6da249d9b5c9fb6965eafab855 | 1,876 |
def get_rmsd( pose, second_pose, overhang = 0):
"""
Get RMSD assuming they are both the same length!
"""
#id_map = get_mask_for_alignment(pose, second_pose, cdr, overhang)
#rms = rms_at_corresponding_atoms_no_super(pose, second_pose, id_map)
start = 1 + overhang
end = pose.total_residue() - overhang
l = Loop(start, end)
loops = Loops()
loops.push_back(l)
rms = loop_rmsd(pose, second_pose, loops, False, True)
return rms | 80af3478fe946243cba5e7f3e45c61a8ea9af1d1 | 1,877 |
def inverse(a: int, n: int):
"""
calc the inverse of a in the case of module n, where a and n must be mutually prime.
a * x = 1 (mod n)
:param a: (int)
:param n: (int)
:return: (int) x
"""
assert greatest_common_divisor(a, n) == 1
return greatest_common_divisor_with_coefficient(a, n)[1] % n | 1012c4c69b81dccd2ecfd0c5cddf7a7bd9b2c1f8 | 1,878 |
def create_app():
"""Create the Flask application."""
return app | 7ff5c1e66ab48a5f262beb7abfa21e28680605c9 | 1,879 |
def generate_prime_candidate(length):
""" Genera un integer impar aleatorimanete
param size: tamanio del numero deseado
return:integer
"""
p = big_int(length)
p |= (1 << length - 1) | 1
return p | bdae69644156191a5388b23d7cf1853b8b0273b6 | 1,880 |
def PathPrefix(vm):
"""Determines the prefix for a sysbench command based on the operating system.
Args:
vm: VM on which the sysbench command will be executed.
Returns:
A string representing the sysbench command prefix.
"""
if vm.OS_TYPE == os_types.RHEL:
return INSTALL_DIR
else:
return '/usr/' | e0ade1847bce77f3b4efd9f801b36b219917f0b8 | 1,881 |
from typing import Union
import asyncio
def get_next_valid_seq_number(
address: str, client: SyncClient, ledger_index: Union[str, int] = "current"
) -> int:
"""
Query the ledger for the next available sequence number for an account.
Args:
address: the account to query.
client: the network client used to make network calls.
ledger_index: The ledger index to use for the request. Must be an integer
ledger value or "current" (the current working version), "closed" (for the
closed-and-proposed version), or "validated" (the most recent version
validated by consensus). The default is "current".
Returns:
The next valid sequence number for the address.
"""
return asyncio.run(main.get_next_valid_seq_number(address, client, ledger_index)) | cb2a502aabc474ab79ea14bd2305dda5bfe8b479 | 1,882 |
import re
def apps_list(api_filter, partial_name, **kwargs):
"""List all defined applications. If you give an optional command line
argument, the apps are filtered by name using this string."""
params = {}
if api_filter:
params = {"filter": api_filter}
rv = okta_manager.call_okta("/apps", REST.get, params=params)
# now filter by name, if given
if partial_name:
matcher = re.compile(partial_name)
rv = list(filter(lambda x: matcher.search(x["name"]), rv))
return rv | bafb2f1eb65e735b40613e302fcbc85507c25cb8 | 1,883 |
def jacquez(s_coords, t_coords, k, permutations=99):
"""
Jacquez k nearest neighbors test for spatio-temporal interaction.
:cite:`Jacquez:1996`
Parameters
----------
s_coords : array
(n, 2), spatial coordinates.
t_coords : array
(n, 1), temporal coordinates.
k : int
the number of nearest neighbors to be searched.
permutations : int, optional
the number of permutations used to establish pseudo-
significance (the default is 99).
Returns
-------
jacquez_result : dictionary
contains the statistic (stat) for the test and the
associated p-value (pvalue).
stat : float
value of the Jacquez k nearest neighbors test for the
dataset.
pvalue : float
p-value associated with the statistic (normally
distributed with k-1 df).
Examples
--------
>>> import numpy as np
>>> import libpysal as lps
>>> from pointpats import SpaceTimeEvents, jacquez
Read in the example data and create an instance of SpaceTimeEvents.
>>> path = lps.examples.get_path("burkitt.shp")
>>> events = SpaceTimeEvents(path,'T')
The Jacquez test counts the number of events that are k nearest
neighbors in both time and space. The following runs the Jacquez test
on the example data and reports the resulting statistic. In this case,
there are 13 instances where events are nearest neighbors in both space
and time.
# turning off as kdtree changes from scipy < 0.12 return 13
>>> np.random.seed(100)
>>> result = jacquez(events.space, events.t ,k=3,permutations=99)
>>> print(result['stat'])
13
The significance of this can be assessed by calling the p-
value from the results dictionary, as shown below. Again, no
space-time interaction is observed.
>>> result['pvalue'] < 0.01
False
"""
time = t_coords
space = s_coords
n = len(time)
# calculate the nearest neighbors in space and time separately
knnt = lps.weights.KNN.from_array(time, k)
knns = lps.weights.KNN.from_array(space, k)
nnt = knnt.neighbors
nns = knns.neighbors
knn_sum = 0
# determine which events are nearest neighbors in both space and time
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
knn_sum += count
stat = knn_sum
# return the results (if no inference)
if not permutations:
return stat
# loop for generating a random distribution to assess significance
dist = []
for p in range(permutations):
j = 0
trand = np.random.permutation(time)
knnt = lps.weights.KNN.from_array(trand, k)
nnt = knnt.neighbors
for i in range(n):
t_neighbors = nnt[i]
s_neighbors = nns[i]
check = set(t_neighbors)
inter = check.intersection(s_neighbors)
count = len(inter)
j += count
dist.append(j)
# establish the pseudo significance of the observed statistic
distribution = np.array(dist)
greater = np.ma.masked_greater_equal(distribution, stat)
count = np.ma.count_masked(greater)
pvalue = (count + 1.0) / (permutations + 1.0)
# report the results
jacquez_result = {'stat': stat, 'pvalue': pvalue}
return jacquez_result | dc71d74cc0e0159e1164d659ca3f07f3b9a61dd6 | 1,884 |
def array2tensor(array, device='auto'):
"""Convert ndarray to tensor on ['cpu', 'gpu', 'auto']
"""
assert device in ['cpu', 'gpu', 'auto'], "Invalid device"
if device != 'auto':
return t.tensor(array).float().to(t.device(device))
if device == 'auto':
return t.tensor(array).float().to(t.device('cuda' if t.cuda.is_available() else 'cpu')) | 53826ad5b19a4e030bc3e98857c9b3285094370f | 1,885 |
def to_json(graph):
"""Convert this graph to a Node-Link JSON object.
:param BELGraph graph: A BEL graph
:return: A Node-Link JSON object representing the given graph
:rtype: dict
"""
graph_json_dict = node_link_data(graph)
# Convert annotation list definitions (which are sets) to canonicalized/sorted lists
graph_json_dict['graph'][GRAPH_ANNOTATION_LIST] = {
keyword: list(sorted(values))
for keyword, values in graph_json_dict['graph'][GRAPH_ANNOTATION_LIST].items()
}
# Convert set to list
graph_json_dict['graph'][GRAPH_UNCACHED_NAMESPACES] = list(graph_json_dict['graph'][GRAPH_UNCACHED_NAMESPACES])
return graph_json_dict | 325053a0838bbf1ab70a4fb61e17f93f27c80dab | 1,886 |
def export(df: pd.DataFrame):
"""
From generated pandas dataframe to xml configuration
:param df: computed pandas dataframe
:return:
"""
return df | 445e91a419746afef8062dcc1e6691572ba9390d | 1,887 |
def has_same_attributes(link1, link2):
"""
Return True if the two links have the same attributes for our purposes,
ie it is OK to merge them together into one link
Parameters:
link1 - Link object
link2 - Link object
Return value:
True iff link1 and link2 have compatible attributes
"""
return (link1.linktype == link2.linktype and
abs(link1.B - link2.B) < EPS and
abs(link1.power - link2.power) < EPS and
abs(link1.capacity - link2.capacity) < EPS) | e80f62d01ef18e547a2e7718ac2bb1ca3001b84f | 1,888 |
def test_signals_creation(test_df, signal_algorithm):
"""Checks signal algorithms can create a signal in a Pandas dataframe."""
test_df_copy = test_df.copy()
original_columns = test_df.columns
# We check if the test series has the columns needed for the rule to calculate.
required_columns = Api.required_inputs_for_algorithm(signal_algorithm)
all_present = True
for ii_requirement in required_columns:
if ii_requirement not in original_columns:
all_present = False
# If columns are missing, we anticipate a KeyError will trigger.
if not all_present:
with pytest.raises(KeyError):
Api.calculate_signal(test_df_copy, signal_algorithm)
return True
# Otherwise we expect to parse successfully.
df_with_signal = Api.calculate_signal(test_df_copy, signal_algorithm)
if not isinstance(df_with_signal, pd.DataFrame):
print(df_with_signal)
print("Type was: ", type(df_with_signal))
raise TypeError("Bad output format.")
# Signal algorithms should be adding new columns with float, int or NaN data.
new_columns = False
for ii_column_name in df_with_signal.columns:
if ii_column_name not in original_columns:
new_columns = True
for ii_value in df_with_signal[ii_column_name]:
if not isinstance(ii_value, (float, int)):
assert ii_value is "NaN"
# At least one new column should have been added. Otherwise output is overriding input columns.
if not new_columns:
raise AssertionError(
"No new columns were created by the signal function: ",
df_with_signal.columns,
" versus original of ",
original_columns,
) | 5a4515092d778090a77ce5933ad2e79b4d62df36 | 1,889 |
def get_prev_day(d):
"""
Returns the date of the previous day.
"""
curr = date(*map(int, d.split('-')))
prev = curr - timedelta(days=1)
return str(prev) | 9195c0be4fc25a68b0bb94e953bafd407c5931a3 | 1,890 |
import types
def copy_function(old_func, updated_module):
"""Copies a function, updating it's globals to point to updated_module."""
new_func = types.FunctionType(old_func.__code__, updated_module.__dict__,
name=old_func.__name__,
argdefs=old_func.__defaults__,
closure=old_func.__closure__)
new_func.__dict__.update(old_func.__dict__)
new_func.__module__ = updated_module.__name__
return new_func | e09022f734faa1774a3ac592c0e12b0b007ae8e3 | 1,891 |
import random
def get_random_color():
"""
获得一个随机的bootstrap颜色字符串标识
:return: bootstrap颜色字符串
"""
color_str = [
'primary',
'secondary',
'success',
'danger',
'warning',
'info',
'dark',
]
return random.choice(color_str) | 898814996aa5ada8f4000244887af382b8b9e1bc | 1,892 |
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the save gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the focus gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the focus gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilda),
c stands for the memory value
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"]
bf = parameters["bf"]
Wi = parameters["Wi"]
bi = parameters["bi"]
Wc = parameters["Wc"]
bc = parameters["bc"]
Wo = parameters["Wo"]
bo = parameters["bo"]
Wy = parameters["Wy"]
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
# Concatenate a_prev and xt (≈3 lines)
# todo: what the fuck of the shape
concat = np.zeros((n_x + n_a, m))
concat[: n_a, :] = a_prev
concat[n_a:, :] = xt
# Compute values for ft, it, cct, c_next, ot, a_next using the formulas given figure (4) (≈6 lines)
ft = sigmoid(np.dot(Wf, concat) + bf)
it = sigmoid(np.dot(Wi, concat) + bi)
cct = np.tanh(np.dot(Wc, concat) + bc)
c_next = ft * c_prev + it * cct
ot = sigmoid(np.dot(Wo, concat) + bo)
a_next = ot * np.tanh(c_next)
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy, a_next) + by)
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache | 1457b7bb53f0658d7c24744c3a06d4cdcdd4096e | 1,893 |
def logged_run(cmd, buffer):
"""Return exit code."""
pid = Popen(cmd, stdout=PIPE, stderr=STDOUT)
pid.wait()
buffer.write(pid.stdout.read())
return pid.returncode | 99a1aa4f8997ef7665a8e994b3df3d4ffe8a844b | 1,894 |
def _decode_and_format_b64_string(b64encoded_string, item_prefix=None,
current_depth=1, current_index=1):
"""Decode string and return displayable content plus list of decoded artifacts."""
# Check if we recognize this as a known file type
(_, f_type) = _is_known_b64_prefix(b64encoded_string)
_debug_print_trace('Found type: ', f_type)
output_files = _decode_b64_binary(b64encoded_string, f_type)
if not output_files:
return b64encoded_string, None
if len(output_files) == 1:
# get the first (only) item
out_name, out_record = list(output_files.items())[0]
_debug_print_trace('_decode_b64_binary returned a single record')
_debug_print_trace('record:', out_record)
# Build display string
# If a string, include the decoded item in the output
if out_record.encoding_type in ['utf-8', 'utf-16']:
display_string = f'<decoded type=\'string\' name=\'{out_name}\' ' +\
f'index=\'{item_prefix}{current_index}\' ' +\
f'depth=\'{current_depth}\'>' +\
f'{out_record.decoded_string}</decoded>'
return display_string, [out_record]
else:
# if a binary just record its presence
display_string = f'<decoded value=\'binary\' name=\'{out_name}\' ' +\
f'type=\'{out_record.file_type}\' ' +\
f'index=\'{item_prefix}{current_index}\' ' +\
f'depth=\'{current_depth}\'/>'
return display_string, [out_record]
else:
# Build header display string
display_header = f'<decoded value=\'multiple binary\' type=\'multiple\' ' +\
f' index=\'{item_prefix}{current_index}\'>'
child_display_strings = []
child_index = 1
child_depth = current_depth + 1
_debug_print_trace('_decode_b64_binary returned multiple records')
# Build child display strings
for child_name, child_rec in output_files.items():
_debug_print_trace('Child_decode: ', child_rec)
child_index_string = f'{item_prefix}{current_index}.{child_index}'
if child_rec.encoding_type in ['utf-8', 'utf-16']:
# If a string, include the decoded item in the output
child_display_string = f'<decoded type=\'string\' name=\'{child_name}\' ' +\
f'index=\'{child_index_string}\' ' +\
f'depth=\'{child_depth}\'>' +\
f'{child_rec.decoded_string}</decoded>'
else:
# if a binary just record its presence
child_display_string = f'<decoded type=\'{child_rec.file_type}\' ' +\
f'name=\'{child_name}\' ' +\
f'index=\'{child_index_string}\' ' +\
f'depth=\'{child_depth}\'/>'
child_display_strings.append(child_display_string)
child_index += 1
display_string = display_header + ''.join(child_display_strings) + '</decoded>'
return display_string, output_files.values() | ebf85b6b06319c6b5d8d35b5d419d3ff3b60204a | 1,895 |
import os
import shutil
import time
def buildDMG():
"""
Create DMG containing the rootDir
"""
outdir = os.path.join(WORKDIR, 'diskimage')
if os.path.exists(outdir):
shutil.rmtree(outdir)
imagepath = os.path.join(outdir,
'python-%s-macosx'%(getFullVersion(),))
if INCLUDE_TIMESTAMP:
imagepath = imagepath + '%04d-%02d-%02d'%(time.localtime()[:3])
imagepath = imagepath + '.dmg'
os.mkdir(outdir)
runCommand("hdiutil create -volname 'Univeral MacPython %s' -srcfolder %s %s"%(
getFullVersion(),
shellQuote(os.path.join(WORKDIR, 'installer')),
shellQuote(imagepath)))
return imagepath | 8acb9f52219ddc6000bb6cb38d52c04161c17fa3 | 1,896 |
import json
def create_iam_role(iam_client):
"""Create an IAM role for the Redshift cluster to have read only access to
S3.
Arguments:
iam_client (boto3.client) - IAM client
Returns:
role_arn (str) - ARN for the IAM Role
"""
# Create the role if it doesn't already exist.
try:
print('Creating IAM Role...')
redshift_role = iam_client.create_role(
Path="/",
RoleName=IAM_ROLE_NAME,
Description="Allows Redshift clusters to call AWS services",
AssumeRolePolicyDocument=json.dumps(
{
'Statement': [
{
'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}
}
],
'Version': '2012-10-17'
}
)
)
except Exception as e:
print(e)
# Attach the policy.
try:
iam_client.attach_role_policy(
RoleName=IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadonlyAccess"
)
except Exception as e:
print(e)
# Return the Role ARN.
role_arn = iam_client.get_role(RoleName=IAM_ROLE_NAME)['Role']['Arn']
print('Role ARN: %s' % role_arn)
return role_arn | 949026bae3edc1dacc5057427ecf3e21490bd9b8 | 1,897 |
import array
def cone_face_to_span(F):
"""
Compute the span matrix F^S of the face matrix F,
that is, a matrix such that
{F x <= 0} if and only if {x = F^S z, z >= 0}.
"""
b, A = zeros((F.shape[0], 1)), -F
# H-representation: A x + b >= 0
F_cdd = Matrix(hstack([b, A]), number_type=NUMBER_TYPE)
F_cdd.rep_type = RepType.INEQUALITY
P = Polyhedron(F_cdd)
V = array(P.get_generators())
for i in xrange(V.shape[0]):
if V[i, 0] != 0: # 1 = vertex, 0 = ray
raise NotConeFace(F)
return V[:, 1:].T | ae928e179085116fa8ac48fd01841458bdcd38ec | 1,898 |
def neighborhood(index, npoints, maxdist=1):
"""
Returns the neighbourhood of the current index,
= all points of the grid separated by up to
*maxdist* from current point.
@type index: int
@type npoints: int
@type maxdist int
@rtype: list of int
"""
return [index + i for i in range(-maxdist, maxdist + 1)
if i != 0 and 0 <= index + i <= npoints - 1] | 98166d810daa6b99862a4c9f6d1629fdfa571bd0 | 1,899 |