content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def LoadSparse(inputfile, verbose=False):
"""Loads a sparse matrix stored as npz file to its dense represent."""
npzfile = np.load(inputfile)
mat = sp.csr_matrix((npzfile['data'], npzfile['indices'],
npzfile['indptr']),
shape=tuple(list(npzfile['shape'])))
if verbose:
print 'Loaded sparse matrix from %s of shape %s' % (inputfile,
mat.shape.__str__())
return mat.todense() | 80dfeb5c48ab2c3905b78ba37226eb98fde5de45 | 1,400 |
from logging import getLogger
import logging
import os
def _get_sw_loader_logger():
""" Setup a new logger with passed skywalking CLI env vars,
don't import from skywalking, it may not be on sys.path
if user misuses the CLI to run programs out of scope
"""
logger = getLogger('skywalking-loader')
ch = logging.StreamHandler()
formatter = logging.Formatter('%(name)s [%(threadName)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
if os.environ.get('SW_PYTHON_CLI_DEBUG_ENABLED') == 'True': # set from the original CLI runner
logger.setLevel(level=logging.DEBUG)
return logger | a38e03e973bf32b8e2ac91e60e50e775e1dfe2f4 | 1,401 |
def chain_decomposition(G, root=None):
"""Return the chain decomposition of a graph.
The *chain decomposition* of a graph with respect a depth-first
search tree is a set of cycles or paths derived from the set of
fundamental cycles of the tree in the following manner. Consider
each fundamental cycle with respect to the given tree, represented
as a list of edges beginning with the nontree edge oriented away
from the root of the tree. For each fundamental cycle, if it
overlaps with any previous fundamental cycle, just take the initial
non-overlapping segment, which is a path instead of a cycle. Each
cycle or path is called a *chain*. For more information, see [1]_.
Parameters
----------
G : undirected graph
root : node (optional)
A node in the graph `G`. If specified, only the chain
decomposition for the connected component containing this node
will be returned. This node indicates the root of the depth-first
search tree.
Yields
------
chain : list
A list of edges representing a chain. There is no guarantee on
the orientation of the edges in each chain (for example, if a
chain includes the edge joining nodes 1 and 2, the chain may
include either (1, 2) or (2, 1)).
Raises
------
NodeNotFound
If `root` is not in the graph `G`.
Notes
-----
The worst-case running time of this implementation is linear in the
number of nodes and number of edges [1]_.
References
----------
.. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex-
and 2-edge-connectivity." *Information Processing Letters*,
113, 241–244. Elsevier. <https://doi.org/10.1016/j.ipl.2013.01.016>
"""
def _dfs_cycle_forest(G, root=None):
"""Builds a directed graph composed of cycles from the given graph.
`G` is an undirected simple graph. `root` is a node in the graph
from which the depth-first search is started.
This function returns both the depth-first search cycle graph
(as a :class:`~cynetworkx.DiGraph`) and the list of nodes in
depth-first preorder. The depth-first search cycle graph is a
directed graph whose edges are the edges of `G` oriented toward
the root if the edge is a tree edge and away from the root if
the edge is a non-tree edge. If `root` is not specified, this
performs a depth-first search on each connected component of `G`
and returns a directed forest instead.
If `root` is not in the graph, this raises :exc:`KeyError`.
"""
# Create a directed graph from the depth-first search tree with
# root node `root` in which tree edges are directed toward the
# root and nontree edges are directed away from the root. For
# each node with an incident nontree edge, this creates a
# directed cycle starting with the nontree edge and returning to
# that node.
#
# The `parent` node attribute stores the parent of each node in
# the DFS tree. The `nontree` edge attribute indicates whether
# the edge is a tree edge or a nontree edge.
#
# We also store the order of the nodes found in the depth-first
# search in the `nodes` list.
H = nx.DiGraph()
nodes = []
for u, v, d in nx.dfs_labeled_edges(G, source=root):
if d == 'forward':
# `dfs_labeled_edges()` yields (root, root, 'forward')
# if it is beginning the search on a new connected
# component.
if u == v:
H.add_node(v, parent=None)
nodes.append(v)
else:
H.add_node(v, parent=u)
H.add_edge(v, u, nontree=False)
nodes.append(v)
# `dfs_labeled_edges` considers nontree edges in both
# orientations, so we need to not add the edge if it its
# other orientation has been added.
elif d == 'nontree' and v not in H[u]:
H.add_edge(v, u, nontree=True)
else:
# Do nothing on 'reverse' edges; we only care about
# forward and nontree edges.
pass
return H, nodes
def _build_chain(G, u, v, visited):
"""Generate the chain starting from the given nontree edge.
`G` is a DFS cycle graph as constructed by
:func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge
that begins a chain. `visited` is a set representing the nodes
in `G` that have already been visited.
This function yields the edges in an initial segment of the
fundamental cycle of `G` starting with the nontree edge (`u`,
`v`) that includes all the edges up until the first node that
appears in `visited`. The tree edges are given by the 'parent'
node attribute. The `visited` set is updated to add each node in
an edge yielded by this function.
"""
while v not in visited:
yield u, v
visited.add(v)
u, v = v, G.nodes[v]['parent']
yield u, v
# Create a directed version of H that has the DFS edges directed
# toward the root and the nontree edges directed away from the root
# (in each connected component).
H, nodes = _dfs_cycle_forest(G, root)
# Visit the nodes again in DFS order. For each node, and for each
# nontree edge leaving that node, compute the fundamental cycle for
# that nontree edge starting with that edge. If the fundamental
# cycle overlaps with any visited nodes, just take the prefix of the
# cycle up to the point of visited nodes.
#
# We repeat this process for each connected component (implicitly,
# since `nodes` already has a list of the nodes grouped by connected
# component).
visited = set()
for u in nodes:
visited.add(u)
# For each nontree edge going out of node u...
edges = ((u, v) for u, v, d in H.out_edges(u, data='nontree') if d)
for u, v in edges:
# Create the cycle or cycle prefix starting with the
# nontree edge.
chain = list(_build_chain(H, u, v, visited))
yield chain | 603fffdd2530bbaf296bb628ec59ce0d7a9a8de2 | 1,402 |
def dup_lcm(f, g, K):
"""Computes polynomial LCM of `f` and `g` in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_lcm(f, g, K)
else:
return dup_rr_lcm(f, g, K) | f5b6a7f3d0aa7155bfffd03b3bcb3d01e716855c | 1,403 |
import urllib
import json
def add_tag_translation(request, tag_id, lang, text):
"""Adds a translation to the given Tag."""
tag = get_object_or_404(Tag, id=tag_id)
text = urllib.unquote(text)
data = {}
langs = tag.site.get_languages(lang)
if len(langs) == 0:
data['error'] = 'No languages defined'
else:
TagTranslation.objects.create(tag=tag, language=langs[0], text=text)
return HttpResponse(json.dumps(data), content_type='application/json') | 2fe49be5bee9b6b13104ed1465ea6c993db9eb51 | 1,404 |
def simpson(x, with_replacement=False):
"""For computing simpson index directly from counts (or frequencies, if with_replacement=True)
Parameters
----------
x :
with_replacement :
(Default value = False)
Returns
-------
"""
total = np.sum(x)
if with_replacement:
return np.sum([(y / total) * (y / total) for y in x])
else:
return np.sum([(y / total) * ((y - 1) / (total - 1)) for y in x]) | 282de607ee722d95db830ca7185a2d3519dcb78f | 1,405 |
async def setup_automation(hass, device_id, trigger_type):
"""Set up an automation trigger for testing triggering."""
return await async_setup_component(
hass,
AUTOMATION_DOMAIN,
{
AUTOMATION_DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device_id,
CONF_TYPE: trigger_type,
},
"action": {
"service": "test.automation",
"data": DATA_MESSAGE,
},
},
]
},
) | e06d8c38ccfb76b5c89d002407d400bf0135749b | 1,406 |
def generate_html_tutor_constraints(sai):
"""
Given an SAI, this finds a set of constraints for the SAI, so it don't
fire in nonsensical situations.
"""
constraints = set()
args = get_vars(sai)
# selection constraints, you can only select something that has an
# empty string value.
if len(args) == 0:
return frozenset()
# get action
action = sai[2]
if action == "ButtonPressed":
# Constrain the selection to be of type button
# constraints.add(('type', selection, 'MAIN::button'))
selection = args[0]
constraints.add(('id', selection, 'done'))
else:
# print("SAI", sai)
# print("ARGS", args)
selection = args[0]
constraints.add(('contentEditable', selection, True))
# constraints.add(('value', selection, '?selection-value'))
# constraints.add((is_empty_string, '?selection-value'))
# value constraints, don't select empty values
for i, arg in enumerate(args[1:]):
constraints.add(('value', arg, '?foa%ival' % (i+1)))
constraints.add((is_not_empty_string, '?foa%ival' % (i+1)))
# constraints.add(('type', a, 'MAIN::cell'))
return frozenset(constraints) | d6201e80574f766ae57609707bad8f617d908682 | 1,407 |
import os
import json
import shutil
def run_test(request):
"""
运行用例
:param request:
:return:
"""
kwargs = {
"failfast": False,
}
runner = HttpRunner(**kwargs)
# 测试用例的路径
test_case_dir_path = os.path.join(os.getcwd(), "suite")
test_case_dir_path = os.path.join(test_case_dir_path, get_time_stamp())
if request.is_ajax():
kwargs = json.loads(request.body.decode('utf-8'))
test_id = kwargs.pop('id')
base_url = kwargs.pop('env_name')
test_type = kwargs.pop('type')
run_test_by_type(test_id, base_url, test_case_dir_path, test_type)
report_name = kwargs.get('report_name', None)
main_hrun.delay(test_case_dir_path, report_name)
return HttpResponse('用例执行中,请稍后查看报告即可,默认时间戳命名报告')
else:
test_id = request.POST.get('id')
base_url = request.POST.get('env_name')
test_type = request.POST.get('type', 'test')
run_test_by_type(test_id, base_url, test_case_dir_path, test_type)
runner.run(test_case_dir_path)
shutil.rmtree(test_case_dir_path)
runner.summary = timestamp_to_datetime(runner.summary, data_type=False)
return render_to_response('report_template.html', runner.summary) | ea2770fd19fe36b53b50543c3894656738bb6909 | 1,408 |
import os
import logging
from shutil import copyfile
def set_up_CB(inp_d):
"""
Setting up directories and altered files
In this function we create all the needed directories and transfer
the files that were changed in order to get the program to work in KBase
into the PaperBLAST directory.
inp_d (input dict) must contain the following keys:
pb_home
genome_protein_filepath
genome_nucleotide_filepath
genome_dir
"""
pb_home = inp_d['pb_home']
tmp_dir = os.path.join(pb_home, "tmp")
fbrowse_data_dir = os.path.join(pb_home,"fbrowse_data")
data_dir = os.path.join(pb_home, "data")
private_dir = os.path.join(pb_home,"private")
genome_dir = os.path.join(pb_home, inp_d['genome_dir'])
blast_dir = os.path.join(pb_home, "bin/blast")
alt_file_dir = '/kb/module/lib/curated_blast/altered_files'
# Creating Directories in PaperBLAST directory
for d in [tmp_dir, fbrowse_data_dir, data_dir, private_dir, genome_dir,
blast_dir]:
if not os.path.exists(d):
os.mkdir(d)
else:
logging.warning("Directory {} already exists".format(d))
# Copying Files Over to PaperBLAST directory
pb_bin = os.path.join(pb_home, "bin")
for base_file in ["clear_dir.py","fastx_findorfs.py", "main.py", "usearch"]:
copyfile(os.path.join(alt_file_dir, base_file ),os.path.join(pb_bin, base_file))
os.chmod(os.path.join(pb_bin, base_file), 0o111)
# Copying the main CGI file
copyfile(os.path.join(alt_file_dir, 'dbg_genomeSearch.cgi' ),
os.path.join(pb_home,"cgi/dbg_genomeSearch.cgi"))
os.chmod(os.path.join(pb_home,"cgi/dbg_genomeSearch.cgi"), 0o111)
# Changing file mode
for fn in ["bl2seq", "blast/fastacmd", "blast/blastall", "blast/formatdb"]:
os.chmod(os.path.join(pb_bin, fn), 0o111)
#Copying Altered PaperBLAST files to appropriate directories within PaperBLAST
logging.debug("Altered files Dir: ")
new_files = os.listdir(alt_file_dir)
logging.debug(new_files)
#Removing current FetchAssembly (from github) and replacing with newer version
os.unlink(os.path.join(pb_home, "lib/FetchAssembly.pm"))
copyfile(os.path.join(alt_file_dir, 'FetchAssembly.pm'), os.path.join(pb_home, "lib/FetchAssembly.pm"))
os.chmod(os.path.join(pb_home, "lib/FetchAssembly.pm"), 0o111)
#We copy the genome files to their location within PaperBLAST
genome_p_location_pb = os.path.join(genome_dir,"faa")
genome_n_location_pb = os.path.join(genome_dir, "fna")
copyfile(inp_d['genome_protein_filepath'], genome_p_location_pb)
copyfile(inp_d['genome_nucleotide_filepath'], genome_n_location_pb)
#CODE
#We copy the reference data in the Docker data directory
data_dir = "/data"
pb_data_dir = os.path.join(pb_home, "data")
for f in os.listdir(data_dir):
# We only copy files and not directories
if os.path.isfile(os.path.join(data_dir,f)):
copyfile(os.path.join(data_dir, f),os.path.join(pb_data_dir,f))
logging.info("Succesfully completed creation of dirs and transfer of files")
return None | dbec8c64afabe170c5d5173d11c48bf27e0f8b21 | 1,409 |
def info(email):
"""Information about a specific email."""
with db_session() as db:
user = db.query(User).filter(User.email == email).first()
if user:
return [user.email, user.api_key, user.grabs]
else:
return None | d9624f33f18dd507c2fceb4e7f917d9cd695dea9 | 1,410 |
import networkx
def TetrahedralGraph():
"""
Returns a tetrahedral graph (with 4 nodes).
A tetrahedron is a 4-sided triangular pyramid. The tetrahedral
graph corresponds to the connectivity of the vertices of the
tetrahedron. This graph is equivalent to a wheel graph with 4 nodes
and also a complete graph on four nodes. (See examples below).
PLOTTING: The tetrahedral graph should be viewed in 3 dimensions.
We chose to use the default spring-layout algorithm here, so that
multiple iterations might yield a different point of reference for
the user. We hope to add rotatable, 3-dimensional viewing in the
future. In such a case, a string argument will be added to select
the flat spring-layout over a future implementation.
EXAMPLES: Construct and show a Tetrahedral graph
::
sage: g = graphs.TetrahedralGraph()
sage: g.show() # long time
The following example requires networkx::
sage: import networkx as NX
Compare this Tetrahedral, Wheel(4), Complete(4), and the
Tetrahedral plotted with the spring-layout algorithm below in a
Sage graphics array::
sage: tetra_pos = graphs.TetrahedralGraph()
sage: tetra_spring = Graph(NX.tetrahedral_graph())
sage: wheel = graphs.WheelGraph(4)
sage: complete = graphs.CompleteGraph(4)
sage: g = [tetra_pos, tetra_spring, wheel, complete]
sage: j = []
sage: for i in range(2):
....: n = []
....: for m in range(2):
....: n.append(g[i + m].plot(vertex_size=50, vertex_labels=False))
....: j.append(n)
sage: G = graphics_array(j)
sage: G.show() # long time
"""
G = networkx.tetrahedral_graph()
return Graph(G, name="Tetrahedron", pos =
{ 0 : (0, 0),
1 : (0, 1),
2 : (cos(3.5*pi/3), sin(3.5*pi/3)),
3 : (cos(5.5*pi/3), sin(5.5*pi/3))}
) | b39014244ae2750b2e118d2b7cfe4b7d7cd55997 | 1,411 |
import logging
def build_resnet_v1(input_shape, depth, num_classes, pfac, use_frn=False,
use_internal_bias=True):
"""Builds ResNet v1.
Args:
input_shape: tf.Tensor.
depth: ResNet depth.
num_classes: Number of output classes.
pfac: priorfactory.PriorFactory class.
use_frn: if True, then use Filter Response Normalization (FRN) instead of
batchnorm.
use_internal_bias: if True, use biases in all Conv layers.
If False, only use a bias in the final Dense layer.
Returns:
tf.keras.Model.
"""
def resnet_layer(inputs,
filters,
kernel_size=3,
strides=1,
activation=None,
pfac=None,
use_frn=False,
use_bias=True):
"""2D Convolution-Batch Normalization-Activation stack builder.
Args:
inputs: tf.Tensor.
filters: Number of filters for Conv2D.
kernel_size: Kernel dimensions for Conv2D.
strides: Stride dimensinons for Conv2D.
activation: tf.keras.activations.Activation.
pfac: prior.PriorFactory object.
use_frn: if True, use Filter Response Normalization (FRN) layer
use_bias: if True, use biases in Conv layers.
Returns:
tf.Tensor.
"""
x = inputs
logging.info('Applying conv layer.')
x = pfac(tf.keras.layers.Conv2D(
filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
use_bias=use_bias))(x)
if use_frn:
x = pfac(frn.FRN())(x)
else:
x = tf.keras.layers.BatchNormalization()(x)
if activation is not None:
x = tf.keras.layers.Activation(activation)(x)
return x
# Main network code
num_res_blocks = (depth - 2) // 6
filters = 16
if (depth - 2) % 6 != 0:
raise ValueError('depth must be 6n+2 (e.g. 20, 32, 44).')
logging.info('Starting ResNet build.')
inputs = tf.keras.layers.Input(shape=input_shape)
x = resnet_layer(inputs,
filters=filters,
activation='relu',
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
for stack in range(3):
for res_block in range(num_res_blocks):
logging.info('Starting ResNet stack #%d block #%d.', stack, res_block)
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(x,
filters=filters,
strides=strides,
activation='relu',
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
y = resnet_layer(y,
filters=filters,
activation=None,
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match changed dims
x = resnet_layer(x,
filters=filters,
kernel_size=1,
strides=strides,
activation=None,
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
x = tf.keras.layers.add([x, y])
if use_frn:
x = pfac(frn.TLU())(x)
else:
x = tf.keras.layers.Activation('relu')(x)
filters *= 2
# v1 does not use BN after last shortcut connection-ReLU
x = tf.keras.layers.AveragePooling2D(pool_size=8)(x)
x = tf.keras.layers.Flatten()(x)
x = pfac(tf.keras.layers.Dense(
num_classes,
kernel_initializer='he_normal'))(x)
logging.info('ResNet successfully built.')
return tf.keras.models.Model(inputs=inputs, outputs=x) | f72a48fcd0df9c7b3b2ea0c5c5db9d3dffad55b6 | 1,412 |
def lr_recover_l1(invecs, intensities, nonneg=True, **kwargs):
"""Computes the low-rank matrix reconstruction using l1-minimisation
.. math::
\min_Z \sum_i \vert \langle a_i| Z | a_i \rangle - y_i \vert \\
\mathrm{s.t.} Z \ge 0
where :math:`a_i` are the input vectors and :math:`y_i` are the measured
intensities.
For the arguments not listed see :func:`recover`
:param bool nonneg: Enfornce the constraint Z >= 0 (default True)
:param kwargs: Additional arguemnts passed to `cvx.Problem.solve`
:returns: array of shape (dim, dim); Low-rank matrix approximation for
given measurements
"""
dim = invecs.shape[1]
# we have to manually convert convex programm to real form since cvxpy
# does not support complex programms
z, mat_cons = _semidef_complex_as_real(dim) if nonneg else \
_hermitian_as_real(dim)
invecs_real = np.concatenate((invecs.real, invecs.imag), axis=1)
obj = cvx.Minimize(sum(cvx.abs(cvx.quad_form(a, z) - y)
for a, y in zip(invecs_real, intensities)))
prob = cvx.Problem(obj, mat_cons)
prob.solve(**kwargs)
if prob.status not in ['optimal', 'optimal_inaccurate']:
raise RuntimeError("Optimization did not converge: " + prob.status)
return z.value[:dim, :dim] + 1.j * z.value[dim:, :dim] | 6551f8e3ff146831d51d338c00c5f2e8b3d7acef | 1,413 |
def fetch_function_names() -> str:
"""Returns a list of cloud function names"""
functions = fetch_functions_in_json()
logs.debug(f"Fetched {len(functions)} cloud functions")
return "Temp holder until I figure out how to get a function's name" | 65c63a0b19d0ebd0a3e45e8a30d620e5cbc984f2 | 1,414 |
import pickle
def start_view_data(trans_id):
"""
This method is used to execute query using asynchronous connection.
Args:
trans_id: unique transaction id
"""
limit = -1
# Check the transaction and connection status
status, error_msg, conn, trans_obj, session_obj = \
check_transaction_status(trans_id)
if error_msg == ERROR_MSG_TRANS_ID_NOT_FOUND:
return make_json_response(success=0, errormsg=error_msg,
info='DATAGRID_TRANSACTION_REQUIRED',
status=404)
# get the default connection as current connection which is attached to
# trans id holds the cursor which has query result so we cannot use that
# connection to execute another query otherwise we'll lose query result.
try:
manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
trans_obj.sid)
default_conn = manager.connection(did=trans_obj.did)
except (ConnectionLost, SSHTunnelConnectionLost) as e:
raise
except Exception as e:
current_app.logger.error(e)
return internal_server_error(errormsg=str(e))
# Connect to the Server if not connected.
if not default_conn.connected():
status, msg = default_conn.connect()
if not status:
return make_json_response(
data={'status': status, 'result': "{}".format(msg)}
)
if status and conn is not None and \
trans_obj is not None and session_obj is not None:
# set fetched row count to 0 as we are executing query again.
trans_obj.update_fetched_row_cnt(0)
# Fetch the sql and primary_keys from the object
sql = trans_obj.get_sql(default_conn)
pk_names, primary_keys = trans_obj.get_primary_keys(default_conn)
session_obj['command_obj'] = pickle.dumps(trans_obj, -1)
has_oids = False
if trans_obj.object_type == 'table':
# Fetch OIDs status
has_oids = trans_obj.has_oids(default_conn)
# Fetch the applied filter.
filter_applied = trans_obj.is_filter_applied()
# Fetch the limit for the SQL query
limit = trans_obj.get_limit()
can_edit = trans_obj.can_edit()
can_filter = trans_obj.can_filter()
# Store the primary keys to the session object
session_obj['primary_keys'] = primary_keys
# Store the OIDs status into session object
session_obj['has_oids'] = has_oids
update_session_grid_transaction(trans_id, session_obj)
# Execute sql asynchronously
status, result = conn.execute_async(sql)
else:
status = False
result = error_msg
filter_applied = False
can_edit = False
can_filter = False
sql = None
return make_json_response(
data={
'status': status, 'result': result,
'filter_applied': filter_applied,
'limit': limit, 'can_edit': can_edit,
'can_filter': can_filter, 'sql': sql,
'info_notifier_timeout': blueprint.info_notifier_timeout.get()
}
) | 0c4c5797578abc0623805d269043c6fb3c0512a9 | 1,415 |
def fft_resize(images, resize=False, new_size=None):
"""Function for applying DFT and resizing.
This function takes in an array of images, applies the 2-d fourier transform
and resizes them according to new_size, keeping the frequencies that overlap
between the two sizes.
Args:
images: a numpy array with shape
[batch_size, height, width, num_channels]
resize: boolean, whether or not to resize
new_size: a tuple (size, size), with height and width the same
Returns:
im_fft_downsampled: a numpy array with shape
[batch_size, (new) height, (new) width, num_channels]
"""
assert len(images.shape) == 4, ("expecting images to be"
"[batch_size, height, width, num_channels]")
im_complex = images.astype("complex64")
im_fft = np.fft.fft2(im_complex, axes=(1, 2))
# resizing images
if resize:
# get fourier frequencies to threshold
assert (im_fft.shape[1] == im_fft.shape[2]), ("Need images to have same"
"height and width")
# downsample by threshold
width = im_fft.shape[2]
new_width = new_size[0]
freqs = np.fft.fftfreq(width, d=1.0 / width)
idxs = np.flatnonzero((freqs >= -new_width / 2.0) & (freqs <
new_width / 2.0))
im_fft_downsampled = im_fft[:, :, idxs, :][:, idxs, :, :]
else:
im_fft_downsampled = im_fft
return im_fft_downsampled | 64846e58fa8ad422b4668062b09458110feed7f5 | 1,416 |
import os
import shutil
def install_build(zipfile_name, ignore_if_exists=False):
"""
Install server build on local drive. 'zipfile_name' is the name of the
zip file in 'BSD_TEMP_FOLDER'.
The function returns the folder name of the battleserver build image.
If 'ignore_if_exists' is True, then the function returns immediately if the
build is already installed on local drive.
Details:
The build is installed into a subfolder in BSD_BATTLESERVER_FOLDER using
the same name as the zip file (sans the .zip ending). The contents of the
zip file is first extracted to a temporary folder, then that folder is
renamed to the final name. This is to ensure an atomic publishing of the
build. If the target folder already exists, it will be removed first.
"""
head, tail = os.path.split(zipfile_name)
image_name, ext = os.path.splitext(tail)
# The final destination of the build
dest_folder = os.path.join(config.BSD_BATTLESERVER_FOLDER, image_name)
dest_folder = os.path.abspath(dest_folder)
if ignore_if_exists and os.path.exists(dest_folder):
return image_name
zipfile_path = os.path.join(config.BSD_TEMP_FOLDER, zipfile_name)
zipfile_path = os.path.abspath(zipfile_path)
if not os.path.exists(zipfile_path):
raise RuntimeError("Zipfile '{}' not found!".format(zipfile_path))
with ZipFile(zipfile_path) as zipfile:
update_state(
state='PROGRESS',
meta={'file': tail, 'step': 'unzipping'},
)
# Extract to a staging folder
staging_folder = dest_folder + ".temp"
try:
logger.info("Unzipping %s to %s", zipfile_path, staging_folder)
zipfile.extractall(staging_folder)
# Publish the build
update_state(
state='PROGRESS',
meta={'file': tail, 'step': 'publishing'},
)
if os.path.exists(dest_folder):
logger.info("Removing previous install at %s", dest_folder)
shutil.rmtree(dest_folder, ignore_errors=False)
logger.info("Publishing %s to %s", staging_folder, dest_folder)
os.rename(staging_folder, dest_folder)
finally:
# Remove staging folder, if needed.
if os.path.exists(staging_folder):
logger.debug("Removing staging folder %s", staging_folder)
shutil.rmtree(staging_folder)
return image_name | d0877034489c6b2aa0c961cf020b32f4d1a4ca5c | 1,417 |
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Sequential(nn.ReplicationPad2d(1), nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)) | 4b23699e4766f341262499608680f5f1f2b6cd26 | 1,418 |
def get_inbound_layers_without_params(layer):
"""Return inbound layers.
Parameters
----------
layer: Keras.layers
A Keras layer.
Returns
-------
: list[Keras.layers]
List of inbound layers.
"""
return [layer for layer in get_inbound_layers(layer)
if not has_weights(layer)] | 9671b9277cb690dc54ffbcb35a4e22672b81748d | 1,419 |
def orders_matchresults(symbol, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
"""
:param symbol:
:param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return:
"""
params = {'symbol': symbol}
if types:
params[types] = types
if start_date:
params['start-date'] = start_date
if end_date:
params['end-date'] = end_date
if _from:
params['from'] = _from
if direct:
params['direct'] = direct
if size:
params['size'] = size
url = '/v1/order/matchresults'
return api_key_get(params, url) | 61121dc12bed75236622e45fb6c96b2595008e64 | 1,420 |
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision | 3979236388aecaa32452958bf344632a1c781181 | 1,421 |
def _is_grpc_unary_method(attr):
"""Check if attribute is a grpc method that returns unary."""
return isinstance(attr, (grpc.UnaryUnaryMultiCallable, grpc.StreamUnaryMultiCallable)) | cb20221038ea2754d259fca22941ba91944aceb8 | 1,422 |
def gridcorner(
D,
xyz,
labels=None,
projection="max_slice",
max_n_ticks=4,
factor=2,
whspace=0.05,
showDvals=True,
lines=None,
label_offset=0.4,
**kwargs
):
"""Generate a grid corner plot
Parameters
----------
D: array_like
N-dimensional data to plot, `D.shape` should be `(n1, n2,..., nN)`,
where `N`, is the number of grid points along dimension `i`.
xyz: list
List of 1-dimensional arrays of coordinates. `xyz[i]` should have
length `N` (see help for `D`).
labels: list
N+1 length list of labels; the first N correspond to the coordinates
labels, the final label is for the dependent (D) variable.
projection: str or func
If a string, one of `{"log_mean", "max_slice"} to use inbuilt functions
to calculate either the logged mean or maximum slice projection. Else
a function to use for projection, must take an `axis` argument. Default
is `gridcorner.max_slice()`, to project out a slice along the
maximum.
max_n_ticks: int
Number of ticks for x and y axis of the `pcolormesh` plots.
factor: float
Controls the size of one window.
showDvals: bool
If true (default) show the D values on the right-hand-side of the
1D plots and add a label.
lines: array_like
N-dimensional list of values to delineate.
Returns
-------
fig, axes:
The figure and NxN set of axes
"""
ndim = D.ndim
fig, axes = _get_fig_and_axes(ndim, factor, whspace)
if type(projection) == str:
if projection in ["log_mean"]:
projection = log_mean
elif projection in ["max_slice"]:
projection = max_slice
else:
raise ValueError("Projection {} not understood".format(projection))
for i in range(ndim):
projection_1D(
axes[i, i],
xyz[i],
D,
i,
projection=projection,
showDvals=showDvals,
lines=lines,
**kwargs
)
for j in range(ndim):
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
ax.get_shared_x_axes().join(axes[ndim - 1, j], ax)
if i < ndim - 1:
ax.set_xticklabels([])
if j < i:
ax.get_shared_y_axes().join(axes[i, i - 1], ax)
if j > 0:
ax.set_yticklabels([])
if j == i:
continue
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="upper"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="upper"))
ax, pax = projection_2D(
ax,
xyz[i],
xyz[j],
D,
i,
j,
lines=lines,
projection=projection,
**kwargs
)
if labels:
for i in range(ndim):
axes[-1, i].set_xlabel(labels[i])
if i > 0:
axes[i, 0].set_ylabel(labels[i])
if showDvals:
axes[i, i].set_ylabel(labels[-1])
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
return fig, axes | 37c929a5b323deb1968ba6504113141bfc8ee830 | 1,423 |
from ._standard_montage_utils import _str_names, _str
def read_dig_hpts(fname, unit='mm'):
"""Read historical .hpts mne-c files.
Parameters
----------
fname : str
The filepath of .hpts file.
unit : 'm' | 'cm' | 'mm'
Unit of the positions. Defaults to 'mm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
The hpts format digitzer data file may contain comment lines starting
with the pound sign (#) and data lines of the form::
<*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
where:
``<*category*>``
defines the type of points. Allowed categories are: ``hpi``,
``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to
head-position indicator coil locations, cardinal landmarks, EEG
electrode locations, and additional head surface points,
respectively.
``<*identifier*>``
identifies the point. The identifiers are usually sequential
numbers. For cardinal landmarks, 1 = left auricular point,
2 = nasion, and 3 = right auricular point. For EEG electrodes,
identifier = 0 signifies the reference electrode.
``<*x/mm*> , <*y/mm*> , <*z/mm*>``
Location of the point, usually in the head coordinate system
in millimeters. If your points are in [m] then unit parameter can
be changed.
For example::
cardinal 2 -5.6729 -12.3873 -30.3671
cardinal 1 -37.6782 -10.4957 91.5228
cardinal 3 -131.3127 9.3976 -22.2363
hpi 1 -30.4493 -11.8450 83.3601
hpi 2 -122.5353 9.2232 -28.6828
hpi 3 -6.8518 -47.0697 -37.0829
hpi 4 7.3744 -50.6297 -12.1376
hpi 5 -33.4264 -43.7352 -57.7756
eeg FP1 3.8676 -77.0439 -13.0212
eeg FP2 -31.9297 -70.6852 -57.4881
eeg F7 -6.1042 -68.2969 45.4939
...
"""
_scale = _check_unit_and_get_scaling(unit)
out = np.genfromtxt(fname, comments='#',
dtype=(_str, _str, 'f8', 'f8', 'f8'))
kind, label = _str_names(out['f0']), _str_names(out['f1'])
kind = [k.lower() for k in kind]
xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T
xyz *= _scale
del _scale
fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'}
fid = {fid_idx_to_label[label[ii]]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'}
ch_pos = {label[ii]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'}
hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'hpi'])
hpi.shape = (-1, 3) # in case it's empty
hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'extra'])
hsp.shape = (-1, 3) # in case it's empty
return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) | e5159900f5eb6e846f4028a237884fb0f85323f3 | 1,424 |
def GetTaskAttr( fname, attrName, defaultVal = None ):
"""Return the specified attribute of a task, or the specified default value if the task does not have this attribute."""
for line in SlurpFile( fname ).rstrip('\n').split('\n'):
arg, val = line.split('\t')
if arg == attrName: return coerceVal( val )
return defaultVal | ab4e8ca8286e94895afe78353b98283b7ab7e890 | 1,425 |
def print_board(white, black):
"""Produce GnuGO like output to verify board position.
Args:
white (np.array): array with 1's for white
black (np.array): array with 1's for black
Returns:
str: gnugo like output (without legend)
"""
s = ''
for x in xrange(19):
for y in xrange(19):
if white[x][y] == 1:
s += '0 '
elif black[x][y] == 1:
s += 'X '
else:
s += '. '
s += '\n'
return s | 5d6143ffd1964cbe41d7d9e0a9c9b7696e7d5008 | 1,426 |
from typing import List
def get_repositories_containing_graph(name: str) -> List[str]:
"""Returns the repositories containing a graph with the given graph name.
Parameters
----------------------------
name: str,
The name of the graph to retrieve.
Returns
----------------------------
List of repository names.
"""
return [
repository
for repository in get_available_repositories()
if name in get_available_graphs_from_repository(repository)
] | 16d2135e1d68fe699fa68b7786f7363dd06e5ab5 | 1,427 |
def build_query(dct):
"""Build SQL with '?' and value tuples from clause dictionary"""
if (dct is not {}):
str_clauses = ''
tpl_values = ()
bln_start = True
#print dct
for str_field, dct_op_val in dct.iteritems():
if (str_field is not None):
if (bln_start):
str_open = ' ('
bln_start = False
else:
str_open = ' and ('
str_clauses = ''.join([str_clauses, str_open, str_field, ' ', \
dct_op_val['logic'], ' ?)'])
var_val = dct_op_val['value']
if (str(var_val).lower() == 'null'):
var_val = None
tpl_values = tpl_values + (var_val, )
else: # simple 1 or 0 (ALL records or NO records) ...
# trumps all other clauses, so lets exit the loop
str_clauses = ' ?'
tpl_values = (dct_op_val['value'],)
break
return (tpl_values, str_clauses)
else:
return ((), " 1") | ac49014c8e629d2fdc12472f2b8b345cbee8ce18 | 1,428 |
def load_secrets(fn=".env", prefix="DJANGO_ENV_", **kwargs):
"""Load a list of configuration variables.
Return a dictionary of configuration variables, as loaded from a
configuration file or the environment. Values passed in as
``args`` or as the value in ``kwargs`` will be used as the
configuration variable's default value if one is not found in the
configuration file or environment.
Parameters
----------
fn : string, default=".env"
Configuration filename, defaults to ``.env``. May be in TOML,
JSON, YAML, or BespON formats. Formats will be attempted in this
order.
prefix : string, default="DJANGO_ENV_"
Prefix for environment variables. This prefix will be
prepended to all variable names before searching for them in
the environment.
kwargs : dict, optional
Dictionary with configuration variables as keys and default
values as values.
Returns
-------
dict
A dictionary of configuration variables and their values.
"""
return merge(kwargs, load_file(fn), load_environment(prefix)) | 6f18ba641e4c23383e47c7422efaaa990af4cc6a | 1,429 |
import itertools
def dependency_chain(pkgname):
"""Return an ordered list of dependencies for a package"""
depends = recurse_depends(pkgname)
return set(list(depends.keys()) + list(itertools.chain.from_iterable(depends.values()))) | 208226f2d771f4fa278a0295997fc53df55caa8f | 1,430 |
from typing import Callable
import click
def with_input(func: Callable) -> Callable:
"""
Attaches a "source" argument to the command.
"""
return click.argument(
"source", type=click.Path(exists=True), required=True
)(func) | 3117f183ac4e4d459a718b59fc9a3ba00b36e291 | 1,431 |
def check_loop_validity(inst_list):
""" Given a list of instructions, check whether they can form a valid loop.
This means, checking for anything that could create an infinite loop.
We are also disallowing double loops right now"""
for i, c in enumerate(inst_list):
if c in [5, 6, 16, 25]:
return False, i
return True, -1 | a58923e014947d1406165a831a57b73fcb9ab226 | 1,432 |
def target_channel_id_name_list(
conversations_list: list=None, including_archived: bool=False):
"""extract targeted channels id list from conversations_list response.
Returns:
id_list, name_list
"""
id_list = []
name_list = []
for ch in conversations_list:
if including_archived is False:
if ch['is_archived'] is True:
continue
id_list.append(ch['id'])
name_list.append(ch['name'])
return id_list, name_list | be2ec76242367a170deac2e577ec90c435046ef9 | 1,433 |
def NETWORKDAYS(*args) -> Function:
"""
Returns the number of net working days between two provided days.
Learn more: https//support.google.com/docs/answer/3092979
"""
return Function("NETWORKDAYS", args) | f93f34ef173a6f3f552062f33b599988ea63cb8a | 1,434 |
def calc_high_outlier(values) -> float:
"""Calculates the high outlier from a pandas Series"""
q1, q3 = [values.quantile(x, 'midpoint') for x in (0.25, 0.75)]
return q3 + 1.5 * (q3 - q1) | 8ee929aec1cb4af9a90d04893f8f94444d00ad22 | 1,435 |
def get_sql_delete_by_ids(table: str, ids_length: int):
"""
获取添加数据的字符串
:param table:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not ids_length or not isinstance(ids_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={ids_length}")
# 准备参数
ids = ["%s" for _ in range(ids_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"delete from {table} where id in ({ids_str});"
return s | f9980c92f5fa1064a99823655be4ea8aed619db3 | 1,436 |
def Float(request):
"""
A simple form with a single integer field
"""
schema = schemaish.Structure()
schema.add('myFloatField', schemaish.Float())
form = formish.Form(schema, 'form')
return form | 18ddcaa697dca96ea321a1aba51d4d2fb0fed47c | 1,437 |
def pca(X):
"""
Returns the eigenvectors U, the eigenvalues (on diagonal) in S.
Args:
X: array(# of training examples, n)
Returns:
U: array(n, n)
S: array(n, n)
"""
# Get some useful values
m, n, _, _ = X.shape
# Init U and S.
U = np.zeros(n)
S = np.zeros(n)
# When computing the covariance matrix, we have
# to divide by m (the number of examples).
sigma = (1. / m) * np.dot(X.T, X)
# Compute the eigenvectors and eigenvalues
# of the covariance matrix.
U, S, V = linalg.svd(sigma)
S = linalg.diagsvd(S, len(S), len(S))
return U, S | cf6304615b3d75b730235f238822c347342a4cbd | 1,438 |
import imp
def read_py_version(script_name, search_path):
"""Read the version of a script from a python file"""
file, pathname, desc = imp.find_module(script_name, [search_path])
try:
new_module = imp.load_module(script_name, file, pathname, desc)
if hasattr(new_module.SCRIPT, "version"):
return new_module.SCRIPT.version
except:
pass
return None | b69d9ff9f6f718c418fac1b0cd77355d8d4ffd1d | 1,439 |
def post_check_variable(team_id, source_id, check_id):
"""
.. :quickref: POST; Lorem ipsum."""
if not TeamPermission.is_manager_or_editor(team_id):
abort(403)
payload = get_payload()
payload.update({"team_id": team_id, "source_id": source_id, "check_id": check_id})
variable = VariableController.create(payload)
return jsonify(format_variable(variable)), 200 | 5c187a5cfec19409c155068c6c8212217adb3632 | 1,440 |
import re
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + [''] * (-diff)
elif diff > 0:
b = b + [''] * diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
if not expr.search(bline):
result.append("%sc%s" % (i + 1, i + 1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i + 1
return result | 802dd3287502c3d3fe85242ba51043e4b5769cd5 | 1,441 |
import pickle
def storeAgent(sess, agentObj):
"""
INPUT : session object
OUTPUT : Updated agent Onject
DESCRIPTION : Updates the agent object in that session
"""
currAgents = getCurrGen(sess)
lock(sess)
try:
if(sess.mode == 'SAFE'):
tpfp = open(GA_UTIL_DIR+"/utilFiles/tmp"+str(agentObj.sessID)+"/dnaPool/dna"
+str(agentObj.agentID)+".dna", "wb")
pickle.dump(agentObj, tpfp)
tpfp.close()
currAgents.add(agentObj.agentID)
else:
sess.agentBasket[agentObj.agentID] = agentObj
currAgents.add(agentObj.agentID)
except Exception:
print("error in store agent, couldnt wb")
return(0)
setCurrGen( sess, currAgents)
unlock(sess)
return(agentObj.agentID) | 57b74d722141f008332a8fa129018f0b72fcc26d | 1,442 |
def info(device):
"""
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
"""
out = __salt__["cmd.run_all"]("xfs_info {}".format(device))
if out.get("stderr"):
raise CommandExecutionError(out["stderr"].replace("xfs_info:", "").strip())
return _parse_xfs_info(out["stdout"]) | 96b1f91c921f607c0e348b8dd4355699fc12c5f0 | 1,443 |
from typing import Union
from typing import Dict
from typing import Tuple
from typing import Any
def serialize_framework_build_config(dict_: Union[Dict[str, str], str]) -> Tuple[Any, ...]:
"""Serialize a dict to a hashable tuple.
Parameters
----------
dict_: Dict[str, str]
Returns
-------
hashable_tuple: Tuple[Any, ...]
A hashable tuple.
"""
if isinstance(dict_, dict):
return tuple(sorted(list(dict_.items())))
return (dict_,) | 365b413ff21bf4fb7f5d153dbe74801ee125108f | 1,444 |
def _check_columns(data: pd.DataFrame,
features: list) -> pd.DataFrame:
"""
Given a dataframe and a list of expected features, print missing columns and return new dataframe
with only valid features
Parameters
-----------
data: Pandas.DataFrame
DataFrame for checking
features: list
list of features (column names)
Returns
---------
Pandas.DataFrame
new 'valid' DataFrame
"""
valid_features = [f for f in features if f in data.columns]
if len(valid_features) < len(features):
print(f'The following features are missing from the training data and will be excluded from the '
f'model {list(set(features) - set(valid_features))}')
return data[valid_features] | ad0c0eb17b7afeaad7505d69f77336820607d77b | 1,445 |
def get_confidence(imgfilename):
"""
1003_c60.jpg -> c6
"""
if not imgfilename:
return ''
return 'c' + imgfilename.split('/')[-1][0:1] | 7c98f2abd2119b41d7e2501823985a894da5a1a1 | 1,446 |
def get_connection(hostname: str,
port: int,
username: str,
password: str):
"""
DBへのコネクションを取得します。
Returns:
Connection: コネクション
"""
return pymysql.connect(
host=hostname,
port=port,
user=username,
password=password,
cursorclass=cursors.DictCursor
) | c2036f9b5ea2e69e6d0cd94fdcf0aa55e69d5d6f | 1,447 |
def get_alleles_existing_alleleinterpretation(
session, allele_filter, user=None, page=None, per_page=None
):
"""
Returns allele_ids that has connected AlleleInterpretations,
given allele_filter from argument.
Supports pagination.
"""
# Apply filter using Allele table as base
allele_ids = session.query(allele.Allele.id).filter(allele_filter)
# Now get the ones that are actually connected to AlleleInterpretation
# (distinct allele_ids sorted by date_last_update)
alleleinterpretation_allele_ids = (
session.query(workflow.AlleleInterpretation.allele_id)
.filter(workflow.AlleleInterpretation.allele_id.in_(allele_ids))
.group_by(workflow.AlleleInterpretation.allele_id)
.order_by(func.max(workflow.AlleleInterpretation.date_last_update).desc())
)
count = alleleinterpretation_allele_ids.count()
if page and per_page:
start = (page - 1) * per_page
end = page * per_page
alleleinterpretation_allele_ids = alleleinterpretation_allele_ids.slice(start, end)
alleleinterpretation_allele_ids = alleleinterpretation_allele_ids.all()
return alleleinterpretation_allele_ids, count | d7b42ac327f284d5905c5dc5b6893cbf0c18714e | 1,448 |
import logging
def _get_session(db_uri, use_batch_mode=True, echo=False):
"""Helper to get an SQLAlchemy DB session"""
# `use_batch_mode` is experimental currently, but needed for `executemany`
#engine = create_engine(db_uri, use_batch_mode=use_batch_mode, echo=echo)
engine = create_engine(db_uri, echo=echo)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
try:
connection = session.connection()
logging.info('Successfully connected to database.')
except:
raise RuntimeError(f'Couldn\'t connect to db: {db_uri}')
return session | d9b3455b601c86face0683ac0d8f3d8763180093 | 1,449 |
def has_extension(experiment: Experiment, name: str) -> bool:
"""
Check if an extension is declared in this experiment.
"""
return get_extension(experiment, name) is not None | 6bf7630634be8802364e1a2fa38e58df523f82d9 | 1,450 |
def machine_is_valid(cloud_machine, accounts):
"""
As the criteria for "what makes a glance image an atmosphere ProviderMachine" changes, we can use this function to hook out to external plugins, etc.
Filters out:
- ChromoSnapShot, eri-, eki-
- Private images not shared with atmosphere accounts
- Domain-specific image catalog(?)
"""
provider = accounts.core_provider
# If the name of the machine indicates that it is a Ramdisk, Kernel, or Chromogenic Snapshot, skip it.
if any(cloud_machine.name.startswith(prefix) for prefix in ['eri-','eki-', 'ChromoSnapShot']):
celery_logger.info("Skipping cloud machine %s" % cloud_machine)
return False
# If the metadata 'skip_atmosphere' is found, do not add the machine.
if cloud_machine.get('skip_atmosphere', False):
celery_logger.info("Skipping cloud machine %s - Includes 'skip_atmosphere' metadata" % cloud_machine)
return False
# If the metadata indicates that the image-type is snapshot -- skip it.
if cloud_machine.get('image_type', 'image') == 'snapshot':
celery_logger.info("Skipping cloud machine %s - Image type indicates a snapshot" % cloud_machine)
return False
owner_project = _get_owner(accounts, cloud_machine)
# If the image is private, ensure that an owner can be found inside the system.
if cloud_machine.get('visibility', '') == 'private':
shared_with_projects = accounts.shared_images_for(cloud_machine.id)
shared_with_projects.append(owner_project)
project_names = [p.name for p in shared_with_projects if p] # TODO: better error handling here
identity_matches = provider.identity_set.filter(
credential__key='ex_project_name', credential__value__in=project_names).count() > 0
if not identity_matches:
celery_logger.info("Skipping private machine %s - The owner does not exist in Atmosphere" % cloud_machine)
return False
if accounts.provider_creds.get('ex_force_auth_version', '2.0_password') != '3.x_password':
return True
# NOTE: Potentially if we wanted to do 'domain-restrictions' *inside* of atmosphere,
# we could do that (based on the domain of the image owner) here.
domain_id = owner_project.domain_id
config_domain = accounts.get_config('user', 'domain', 'default')
owner_domain = accounts.openstack_sdk.identity.get_domain(domain_id)
account_domain = accounts.openstack_sdk.identity.get_domain(config_domain)
if owner_domain.id != account_domain.id: # and if FLAG FOR DOMAIN-SPECIFIC ATMOSPHERE
celery_logger.info("Skipping private machine %s - The owner belongs to a different domain (%s)" % (cloud_machine, owner_domain))
return False
return True | 7fe9d33f3c53a1159892427ad87d67e0c96af185 | 1,451 |
def min_max_median(lst):
""" a function that takes a simple list of numbers lst as a parameter and returns a list with the min, max, and the median of lst. """
s = sorted(lst)
n = len(s)
return [ s[0], s[-1], s[n//2] if n % 2 == 1 else (s[n//2 - 1] + s[n//2]) / 2] | 59b1ceef5796d77cc039a42593ddb3d1d2244bd7 | 1,452 |
def _extract_decimal_with_text_az(tokens, short_scale, ordinals):
"""
Extract decimal numbers from a string.
This function handles text such as '2 nöqtə 5'.
Notes:
While this is a helper for extractnumber_az, it also depends on
extractnumber_az, to parse out the components of the decimal.
This does not currently handle things like:
number dot number number number
Args:
tokens [Token]: The text to parse.
short_scale boolean:
ordinals boolean:
Returns:
(float, [Token])
The value found and relevant tokens.
(None, None) if no decimal value is found.
"""
for c in _DECIMAL_MARKER_AZ:
partitions = partition_list(tokens, lambda t: t.word == c)
if len(partitions) == 3:
numbers1 = \
_extract_numbers_with_text_az(partitions[0], short_scale,
ordinals, fractional_numbers=False)
numbers2 = \
_extract_numbers_with_text_az(partitions[2], short_scale,
ordinals, fractional_numbers=False)
if not numbers1 or not numbers2:
return None, None
number = numbers1[-1]
decimal = numbers2[0]
# TODO handle number dot number number number
if "." not in str(decimal.text):
return number.value + float('0.' + str(decimal.value)), \
number.tokens + partitions[1] + decimal.tokens
return None, None | 678bfed714a302437d139ea4fb87de2ef295b61d | 1,453 |
import copy
def assign_read_kmers_to_contigs_new(kmer_ii, ambiguous_kmer_counts, unambiguous_contig_counts, contig_abundances):
"""
Assign ambiguous read k-mers based on contig averages counts.
"""
contig_counts = copy.deepcopy(unambiguous_contig_counts)
contig_location_tuples = []
total_abundance = 0
# Cycle through all ambiguous k-mers and assign them.
for kmer in ambiguous_kmer_counts.keys():
# and randomly assign the count to one of the items.
contig_location_tuples = kmer_ii[kmer]
#print 'Kmer:\t' + kmer
#print 'Count:\t' + str(ambiguous_kmer_counts[kmer])
#print 'Contig_locations:'
#pprint.pprint(contig_location_tuples)
# and randomly assign the count to one of the items.
#contigs_containing_kmer = accumulate(kmer_ii[kmer])
#print kmer +'\t',
contigs_containing_kmer = list(accumulate(contig_location_tuples))
#print contigs_containing_kmer
# Calculate total abundance
for contig in contigs_containing_kmer:
total_abundance += contig_abundances[contig[0]]
# Assign fractional counts based on total abundances.
for contig in contigs_containing_kmer:
#total_abundance += contig_abundances[contig[0]]
#print 'Assigning\t' + str(contig_abundances[contig[0]] * ambiguous_kmer_counts[kmer] / total_abundance) + '\tto\t' + contig[0]
contig_counts[contig[0]] += (contig_abundances[contig[0]] * ambiguous_kmer_counts[kmer] / total_abundance)
total_abundance = 0
#for i in xrange(0, ambiguous_kmer_counts[kmer]):
# contig = random.choice(contig_location_tuples)[0]
# #print "Selecting contig:\t" + contig
# contig_counts[contig] += 1
return contig_counts | a2d7a133183b7d020f461989065c132fb87bf336 | 1,454 |
def cremi_scores(seg, gt, border_threshold=None, return_all=True):
"""
Compute the cremi scores (Average of adapted rand error, vi-split, vi-merge)
Parameters
----------
seg: np.ndarray - the candidate segmentation
gt: np.ndarray - the groundtruth
border_threshold: value by which the border is eroded (default: None = no erosion)
Returns
-------
cremi-score: average of rand error, vi-split, vi-merge
vi-split: variation of information, split score
vi-merge: variation of information, merge score
adapted rand: adapted rand error
"""
assert seg.shape == gt.shape, "%s, %s" % (str(seg.shape, gt.shape))
# compute border threshold if specified
if border_threshold is not None:
xy_resolution = 4.
gt_ = create_border_mask(gt, border_threshold / xy_resolution, np.uint64(-1))
# add 1 to map back to 0 as lowest label
gt_ += 1
else:
gt_ = gt
## Try except because sometimes both have nothing in them.
try:
vi_s, vi_m = voi(seg, gt_)
are = adapted_rand(seg, gt_)
cs = (vi_s + vi_m + are) / 3
except:
cs = np.nan
vi_s = np.nan
vi_m = np.nan
are = np.nan
if return_all:
return {'cremi-score': cs, 'vi-split': vi_s, 'vi-merge': vi_m, 'adapted_rand': are}
else:
return cs | 9781eeb38885fe5efc3e052a15e418e39acdcc3c | 1,455 |
def sign_transaction(transaction_dict, private_key) -> SignedTransaction:
"""
Sign a (non-staking) transaction dictionary with the specified private key
Parameters
----------
transaction_dict: :obj:`dict` with the following keys
nonce: :obj:`int` Transaction nonce
gasPrice: :obj:`int` Transaction gas price in Atto
gas: :obj:`int` Gas limit in Atto
to: :obj:`str` Destination address
value: :obj:`int` Amount to be transferred in Atto
data: :obj:`str` Transaction data, used for smart contracts
from: :obj:`str` From address, optional (if passed, must match the
public key address generated from private_key)
chainId: :obj:`int` One of util.chainIds.keys(), optional
If you want to replay your transaction across networks, do not pass it
shardID: :obj:`int` Originating shard ID, optional (needed for cx shard transaction)
toShardID: :obj:`int` Destination shard ID, optional (needed for cx shard transaction)
r: :obj:`int` First 32 bytes of the signature, optional
s: :obj:`int` Next 32 bytes of the signature, optional
v: :obj:`int` Recovery value, optional
private_key: :obj:`str` The private key
Returns
-------
A SignedTransaction object, which is a named tuple
rawTransaction: :obj:`str` Hex bytes of the raw transaction
hash: :obj:`str` Hex bytes of the transaction hash
r: :obj:`int` First 32 bytes of the signature
s: :obj:`int` Next 32 bytes of the signature
v: :obj:`int` Recovery value
Raises
------
TypeError, if the from address specified is not the same
one as derived from the the private key
AssertionError, if the fields for the transaction are missing,
or if the chainId supplied is not a string,
or if the chainId is not a key in util.py
API Reference
-------------
https://readthedocs.org/projects/eth-account/downloads/pdf/stable/
"""
account, sanitized_transaction = sanitize_transaction(transaction_dict, private_key)
if 'to' in sanitized_transaction and sanitized_transaction[ 'to' ] is not None:
sanitized_transaction[ 'to' ] = convert_one_to_hex( sanitized_transaction[ 'to' ] )
filled_transaction = pipe( # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/transactions.py#L39
sanitized_transaction,
dict,
partial(merge, TRANSACTION_DEFAULTS),
chain_id_to_v,
apply_formatters_to_dict(HARMONY_FORMATTERS)
)
unsigned_transaction = serialize_transaction(filled_transaction)
transaction_hash = unsigned_transaction.hash()
if isinstance(unsigned_transaction, (UnsignedEthereumTxData, UnsignedHarmonyTxData)):
chain_id = None # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/signing.py#L26
else:
chain_id = unsigned_transaction.v
(v, r, s) = sign_transaction_hash(
account._key_obj, transaction_hash, chain_id)
encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s))
signed_transaction_hash = keccak(encoded_transaction)
return SignedTransaction(
rawTransaction=HexBytes(encoded_transaction),
hash=HexBytes(signed_transaction_hash),
r=r,
s=s,
v=v,
) | 735de56f1a2b9557cc09b9e589586eb92196936c | 1,456 |
def integralHesapla(denklem):
"""
Polinom kullanarak integral hesaplar.
:param denklem: İntegrali hesaplanacak polinom.
"""
a,b=5,len(anaVeriler)
deltax = 0.1
integral = 0
n = int((b - a) / deltax)
for i in range(n):
integral += deltax * (denklem.subs({x:a}) + denklem.subs({x:a+deltax})) / 2
a += deltax
return integral | 01bb7ebc5b678dc255e311c03c76415b0ac6f2db | 1,457 |
def fmt(text,*args,**kwargs):
"""
String formatting made easy
text - pattern
Examples
fmt("The is one = %ld", 1)
fmt("The is text = %s", 1.3)
fmt("Using keywords: one=%(one)d, two=%(two)d", two=2, one=1)
"""
return _fmt(text,args,kwargs) | a03a367d116bcde83bd0ff41ca8eb181af4c8aed | 1,458 |
def value_to_class(v):
"""
Return the label of the pixel patch, by comparing the ratio of foreground
to FOREGROUND_THRESHOLD
Input:
patch (numpy.ndarray): patch of a groundtruth image
size:(PATCH_SIZE, PATCH_SIZE)
Output:
the label of the patch:
1: foreground
0: background
"""
df = np.sum(v)
if df > FOREGROUND_THRESHOLD:
return 1
else:
return 0 | fe50615d7ed3567bb3e7de8987ce07f5736a0a5c | 1,459 |
import os
import glob
def namelist_path(output_dir):
"""Given directory containing TC results, return path to `*.in` file."""
file_paths = [os.path.join(output_dir, y) for x in os.walk(output_dir) for y in glob(os.path.join(x[0], '*.in'))]
if len(file_paths) > 1:
raise Exception("Multiple *.in files found in directory.")
return file_paths[0] | e50b09fc48a9a3c0792bf5a735c8c42b8d9ed80b | 1,460 |
def mc(cfg):
""" Return the MC (multi-corpus) AAI model, trained on the dysarthric and cross corpora.
3 BLSTM layers, and two single linear regression output layers to obtain articulatory trajectories corresponding to each corpus.
Parameters
----------
cfg: main.Configuration
user configuration file
Returns
-------
Model
"""
mdninput_Lstm = keras.Input(shape = (None, cfg.mfcc_dim))
lstm_1 = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'],
return_sequences=True))(mdninput_Lstm)
lstm_2a = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'],
return_sequences=True))(lstm_1)
lstm_2 = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'],
return_sequences=True))(lstm_2a)
output_1 = TimeDistributed(Dense(cfg.ema_dim,
activation='linear'))(lstm_2)
output_2 = TimeDistributed(Dense(cfg.ema_dim,
activation='linear'))(lstm_2)
model = keras.models.Model(mdninput_Lstm, [output_1, output_2])
return model | e93ca764185b9a2bd60929e9cf50574432bcf97f | 1,461 |
from typing import Callable
def gradient_dxyz(fxyz: tf.Tensor, fn: Callable) -> tf.Tensor:
"""
Function to calculate gradients on x,y,z-axis of a tensor using central finite difference.
It calculates the gradient along x, y, z separately then stack them together
:param fxyz: shape = (..., 3)
:param fn: function to call
:return: shape = (..., 3)
"""
return tf.stack([fn(fxyz[..., i]) for i in [0, 1, 2]], axis=4) | cc6e4660a2bfff22d04a2d05e3ff6b1dd7e5846a | 1,462 |
import multiprocessing
def get_chunk_range():
"""
Get the range of partitions to try.
"""
n_chunks = multiprocessing.cpu_count()
if n_chunks > 128:
raise NotImplementedError('Currently we consider the num. procs in machine to '
'be < 128')
chunk_range = [n_chunks]
while n_chunks < 128:
n_chunks *= 2
chunk_range += [n_chunks]
return chunk_range | 4de86cbeba03550d5bcd7bae68c054495136a398 | 1,463 |
def probabilityEval(Mub,Mbu,PSD,ID_basal,Mub_notBleached=None,Mbu_notBleached=None,ID_notBleached=None):
"""Returns the updated PSD Matrix and the corresponding number of receptors that got bound and unbound. To types, "basal" and "not bleached" can be considered, which is necessary when simulation FRAP.
Parameters
----------
Mub : array_like
Matrix containing binding probabilities for the type "basal".
Mbu : array_like
Matrix containing unbinding probabilities for the type "basal".
Mub_notBleached : array_like, optional
By default None. Matrix containing binding probabilities for the type "not bleached".
Mbu_notBleached : array_like, optional
By default None. Matrix containing unbinding probabilities for the type "not bleached".
PSD : array_like
Matrix representing the PSD grid and its bound receptors.
ID_basal : float
Receptor ID of the basal pool.
ID_notBleached: float
Receptor ID of the not bleached pool.
Returns
-------
out: float, float, float, float, array_like
Number of receptors that got bound and unbound of the two types "basal" and "not bleached" and the updated PSD matrix.
Examples
--------
Import libraries:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import ampartrafficking.stochastic_model as sm
Set parameters:
>>> U=10
>>> U_notBleached=10
>>> kUB=0.005
>>> kBU=1
>>> N=10
>>> ID_basal=1
>>> ID_notBleached=2
>>> dt=0.5
Create and populate grid and calculate nearest neighbour matrix:
>>> PSD=np.zeros((N,N))
>>> while np.sum(PSD)<20*ID_basal:
>>> i=np.random.randint(0,N)
>>> j=np.random.randint(0,N)
>>> if PSD[i,j]==0:
>>> PSD[i,j]=ID_basal
>>>
>>> while np.sum(PSD)<20*ID_basal+20*ID_notBleached:
>>> i=np.random.randint(0,N)
>>> j=np.random.randint(0,N)
>>> if PSD[i,j]==0:
>>> PSD[i,j]=ID_notBleached
>>>
>>> NN=sm.nearestNeighbours(PSD)
Plot PSD:
>>> plt.figure()
>>> plt.imshow(PSD)
>>> plt.colorbar()
Calculate probability Matrices and update the PSD Matrix:
>>> Mbu=sm.kBUcoop(kBU, NN, PSD, ID_basal)*dt
>>> Mub=sm.kUBcoop(kUB*U, NN, PSD)*dt
>>> Mbu_notBleached=sm.kBUcoop(kBU, NN, PSD, ID_notBleached)*dt
>>> Mub_notBleached=sm.kUBcoop(kUB*U_notBleached, NN, PSD)*dt
>>>
>>> PSD,dBoff,dBon,dBoff_notBleached,dBon_notBleached=sm.probabilityEval(Mub,Mbu,PSD,ID_basal,Mub_notBleached,Mbu_notBleached,ID_notBleached)
Plot PSD:
>>> plt.figure()
>>> plt.imshow(PSD)
>>> plt.colorbar()
Output: (left: before, right: after)
.. image:: images/example1_probabilityEval.png
:width: 45%
.. image:: images/example2_probabilityEval.png
:width: 45%
"""
n=np.shape(PSD)[0]
m=np.shape(PSD)[1]
R=np.random.rand(n,m)
Mask_ub=R<Mub
Mask_bu=R<Mbu
if Mub_notBleached is not None:
R=np.random.rand(n,m)
Mask_ub_notBleached=R<Mub_notBleached
Mask_bu_notBleached=R<Mbu_notBleached
if Mub_notBleached is not None:
R2=np.random.rand(n,m)
ii_basal=np.where((Mask_ub==True)&(Mask_ub_notBleached==True)&(R2<0.5))
ii_notBleached=np.where((Mask_ub==True)&(Mask_ub_notBleached==True)&(R2>=0.5))
Mask_ub[ii_basal]=False
Mask_ub_notBleached[ii_notBleached]=False
dBoff=np.sum(Mask_bu)
dBon=np.sum(Mask_ub)
if Mub_notBleached is not None:
dBoff_notBleached=np.sum(Mask_bu_notBleached)
dBon_notBleached=np.sum(Mask_ub_notBleached)
PSD[Mask_ub]=ID_basal
PSD[Mask_bu]=0
if Mub_notBleached is not None:
PSD[Mask_ub_notBleached]=ID_notBleached
PSD[Mask_bu_notBleached]=0
if Mub_notBleached is not None:
return PSD,dBoff, dBon, dBoff_notBleached, dBon_notBleached
else:
return PSD,dBoff, dBon | de4cfbefb2040570397e304afc3ee1c72674bb03 | 1,464 |
def train_on_data_once(
model_path,
cv_folds=0,
frames_path=None,
annotations_path=None,
species=None,
fold=0,
fraction=None,
perform_evaluation=True,
debug=0,
):
"""Performs training for the segmentation moduel of SIPEC (SIPEC:SegNet).
Parameters
----------
model_path : str
Path to model, can be either where a new model should be stored or a path to an existing model to be retrained.
cv_folds : int
Number of cross_validation folds, use 0 for a normal train/test split.
frames_path : str
Path to the frames used for training.
annotations_path : str
Path to the annotations used for training.
species : str
Species to perform segmentation on (can be any species, but "mouse" or "primate" have more specialised parameters). If your species is neither "mouse" nor "primate", use "default".
fold : int
If cv_folds > 1, fold is the number of fold to be tested on.
fraction : float
Factor by which to decimate the training data points.
perform_evaluation : bool
Perform subsequent evaluation of the model
debug : bool
Debug verbosity.
Returns
-------
model
SIPEC:SegNet model
mean_ap
Mean average precision score achieved by this model
"""
dataset_train, dataset_val = get_segmentation_data(
frames_path=frames_path,
annotations_path=annotations_path,
name=species,
cv_folds=cv_folds,
fold=fold,
fraction=fraction,
)
# initiate mouse model
model = SegModel(species)
# initiate training
model.init_training(model_path=model_path, init_with="coco")
model.init_augmentation()
# start training
print("training on #NUM images : ", str(len(dataset_train.image_ids)))
model.train(dataset_train, dataset_val)
# evaluate model
if perform_evaluation:
model = SegModel(species)
model_path = model.set_inference(model_path=model_path)
mean_ap = model.evaluate(dataset_val)
# if species == "primate" or species == "mouse":
# debug = 1
if debug:
helper = model_path.split("mask_rcnn_primate_0")
epochs = [
"010",
"020",
"030",
]
print(helper)
print(helper[0] + "mask_rcnn_primate_0" + "001" + ".h5")
for epoch in epochs:
model = SegModel("primate")
model.set_inference(
model_path=helper[0] + "mask_rcnn_primate_0" + epoch + ".h5"
)
mean_ap = model.evaluate(dataset_val)
print(epoch)
print(mean_ap)
return model, mean_ap | 39326dba42baec02118eed7c4d91f7e834b6a4b0 | 1,465 |
def train(data_base_path, output_dir, label_vocab_path, hparams_set_name,
train_fold, eval_fold):
"""Constructs trains, and evaluates a model on the given input data.
Args:
data_base_path: str. Directory path containing tfrecords named like "train",
"dev" and "test"
output_dir: str. Path to save checkpoints.
label_vocab_path: str. Path to tsv file containing columns
_VOCAB_ITEM_COLUMN_NAME and _VOCAB_INDEX_COLUMN_NAME. See
testdata/label_vocab.tsv for an example.
hparams_set_name: name of a function in the hparams module which returns a
tf.contrib.training.HParams object.
train_fold: fold to use for training data (one of
protein_dataset.DATA_FOLD_VALUES)
eval_fold: fold to use for training data (one of
protein_dataset.DATA_FOLD_VALUES)
Returns:
A tuple of the evaluation metrics, and the exported objects from Estimator.
"""
hparams = get_hparams(hparams_set_name)
label_vocab = parse_label_vocab(label_vocab_path)
(estimator, train_spec, eval_spec) = _make_estimator_and_inputs(
hparams=hparams,
label_vocab=label_vocab,
data_base_path=data_base_path,
output_dir=output_dir,
train_fold=train_fold,
eval_fold=eval_fold)
return tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec) | 7df17924a7be5ec07009658a7aaf25e79f8f4663 | 1,466 |
def get_seven_seg_light_pattern(img, seven_seg_pts, base=[0, 0]):
"""入力画像imgに対してseg_ptsで指定した座標群"""
ptn = 0x00
# TODO: この座標群を全て事前計算しておくとさらに高速化できそう。
seven_seg_pts_based = [
np.array(seven_seg_pts[0]) + np.array(base),
np.array(seven_seg_pts[1]) + np.array(base),
np.array(seven_seg_pts[2]) + np.array(base),
np.array(seven_seg_pts[3]) + np.array(base),
np.array(seven_seg_pts[4]) + np.array(base),
np.array(seven_seg_pts[5]) + np.array(base),
np.array(seven_seg_pts[6]) + np.array(base),
np.array(seven_seg_pts[7]) + np.array(base),
]
for i in range(8):
if (is_seg_light(img, seven_seg_pts_based[i])):
bit = 1
else:
bit = 0
ptn |= (bit << (7 - i))
return ptn | 59a67658f96763e87e634f56954ace150de4c8c0 | 1,467 |
def _enzyme_path_to_sequence(path, graph, enzymes_sites):
"""Converts a path of successive enzymes into a sequence."""
return "".join(
[enzymes_sites[path[0]]]
+ [graph[(n1, n2)]["diff"] for n1, n2 in zip(path, path[1:])]
) | a3de9de5dc37df641e36d09d07b49c402fa17fd1 | 1,468 |
def profile_to_section(profile_name):
"""Converts a profile name to a section header to be used in the config."""
if any(c in _WHITESPACE for c in profile_name):
profile_name = shlex_quote(profile_name)
return 'profile %s' % profile_name | c9c50556409c4840c7f530e8645b52b60b3f8fa7 | 1,469 |
def BytesFromFile(filename: str) -> ByteList:
"""Read the EDID from binary blob form into list form.
Args:
filename: The name of the binary blob.
Returns:
The list of bytes that make up the EDID.
"""
with open(filename, "rb") as f:
chunk = f.read()
return [int(x) for x in bytes(chunk)] | bfb899c2d1114f43ccbe4b317496111372e4bf2c | 1,470 |
def array_to_patches(arr, patch_shape=(3,3,3), extraction_step=1, normalization=False):
#Make use of skleanr function extract_patches
#https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/feature_extraction/image.py
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content.
Parameters
----------
arr : 3darray
3-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
patches = extract_patches(arr, patch_shape, extraction_step)
patches = patches.reshape(-1, patch_shape[0],patch_shape[1],patch_shape[2])
# patches = patches.reshape(patches.shape[0], -1)
if normalization==True:
patches -= np.mean(patches, axis=0)
patches /= np.std(patches, axis=0)
print('%.2d patches have been extracted' % patches.shape[0]) ,
return patches | 2c8e3660c0a9d67794e3d0e869ee87ea690bedc2 | 1,471 |
from typing import Optional
from typing import Tuple
def directory_select(message: str, default: Optional[str] = None, cli_flag: Optional[str] = None,
force_interactive: bool = False) -> Tuple[int, str]:
"""Display a directory selection screen.
:param str message: prompt to give the user
:param default: default value to return (if one exists)
:param str cli_flag: option used to set this value with the CLI
:param bool force_interactive: True if it's safe to prompt the user
because it won't cause any workflow regressions
:returns: tuple of the form (`code`, `string`) where
`code` - display exit code
`string` - input entered by the user
"""
return obj.get_display().directory_select(message, default=default, cli_flag=cli_flag,
force_interactive=force_interactive) | 093a29a03c00c21b5612f78ac4548b0c6db6974c | 1,472 |
def trade_from_kraken(kraken_trade):
"""Turn a kraken trade returned from kraken trade history to our common trade
history format"""
currency_pair = kraken_to_world_pair(kraken_trade['pair'])
quote_currency = get_pair_position(currency_pair, 'second')
return Trade(
# Kraken timestamps have floating point ...
timestamp=convert_to_int(kraken_trade['time'], accept_only_exact=False),
pair=currency_pair,
type=kraken_trade['type'],
rate=FVal(kraken_trade['price']),
cost=FVal(kraken_trade['cost']),
cost_currency=quote_currency,
fee=FVal(kraken_trade['fee']),
fee_currency=quote_currency,
amount=FVal(kraken_trade['vol']),
location='kraken'
) | 6b18a1d396605450f1af6fc1cfb2231852c964a9 | 1,473 |
def _create_instancer_mesh(positions: np.ndarray, name="mesh_points", *, bpy):
"""Create mesh with where each point is a pseudo face
(three vertices at the same position.
"""
assert positions.ndim == 2
assert positions.shape[1] == 3
if name in bpy.data.meshes:
raise RuntimeError("Mesh '{}' already exists.".format(name))
mesh = bpy.data.meshes.new(name=name)
num_vertices = len(positions)
mesh.vertices.add(num_vertices * 3)
mesh.vertices.foreach_set("co", np.repeat(positions, 3, axis=0).reshape((-1)))
mesh.loops.add(num_vertices * 3)
mesh.loops.foreach_set("vertex_index", np.arange(0, 3 * num_vertices))
loop_start = np.arange(0, 3 * num_vertices, 3, np.int32)
loop_total = np.full(fill_value=3, shape=(num_vertices,), dtype=np.int32)
num_loops = loop_start.shape[0]
mesh.polygons.add(num_loops)
mesh.polygons.foreach_set("loop_start", loop_start)
mesh.polygons.foreach_set("loop_total", loop_total)
mesh.update()
mesh.validate()
logger.info("Created instancer mesh with {} vertices.".format(len(positions)))
return mesh | d60cd53cd6b00e0c17df8ee33be38f48aafebe8e | 1,474 |
def bound_to_nitorch(bound, as_type='str'):
"""Convert boundary type to niTorch's convention.
Parameters
----------
bound : [list of] str or bound_like
Boundary condition in any convention
as_type : {'str', 'enum', 'int'}, default='str'
Return BoundType or int rather than str
Returns
-------
bound : [list of] str or BoundType
Boundary condition in NITorch's convention
"""
intype = type(bound)
if not isinstance(bound, (list, tuple)):
bound = [bound]
obound = []
for b in bound:
b = b.lower() if isinstance(b, str) else b
if b in ('replicate', 'repeat', 'border', 'nearest', BoundType.replicate):
obound.append('replicate')
elif b in ('zero', 'zeros', 'constant', BoundType.zero):
obound.append('zero')
elif b in ('dct2', 'reflect', 'reflection', 'neumann', BoundType.dct2):
obound.append('dct2')
elif b in ('dct1', 'mirror', BoundType.dct1):
obound.append('dct1')
elif b in ('dft', 'wrap', 'circular', BoundType.dft):
obound.append('dft')
elif b in ('dst2', 'antireflect', 'dirichlet', BoundType.dst2):
obound.append('dst2')
elif b in ('dst1', 'antimirror', BoundType.dst1):
obound.append('dst1')
else:
raise ValueError(f'Unknown boundary condition {b}')
if as_type in ('enum', 'int', int):
obound = list(map(lambda b: getattr(BoundType, b), obound))
if as_type in ('int', int):
obound = [b.value for b in obound]
if issubclass(intype, (list, tuple)):
obound = intype(obound)
else:
obound = obound[0]
return obound | 9767dbc3693fd105fed9d89b15340d2ba4d1c5dd | 1,475 |
import tempfile
import numpy
import os
def distance_transform_edt(
base_region_raster_path_band, target_distance_raster_path,
sampling_distance=(1., 1.), working_dir=None,
raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):
"""Calculate the euclidean distance transform on base raster.
Calculates the euclidean distance transform on the base raster in units of
pixels multiplied by an optional scalar constant. The implementation is
based off the algorithm described in: Meijster, Arnold, Jos BTM Roerdink,
and Wim H. Hesselink. "A general algorithm for computing distance
transforms in linear time." Mathematical Morphology and its applications
to image and signal processing. Springer, Boston, MA, 2002. 331-340.
The base mask raster represents the area to distance transform from as
any pixel that is not 0 or nodata. It is computationally convenient to
calculate the distance transform on the entire raster irrespective of
nodata placement and thus produces a raster that will have distance
transform values even in pixels that are nodata in the base.
Args:
base_region_raster_path_band (tuple): a tuple including file path to a
raster and the band index to define the base region pixels. Any
pixel that is not 0 and nodata are considered to be part of the
region.
target_distance_raster_path (string): path to the target raster that
is the exact euclidean distance transform from any pixel in the
base raster that is not nodata and not 0. The units are in
``(pixel distance * sampling_distance)``.
sampling_distance (tuple/list): an optional parameter used to scale
the pixel distances when calculating the distance transform.
Defaults to (1.0, 1.0). First element indicates the distance
traveled in the x direction when changing a column index, and the
second element in y when changing a row index. Both values must
be > 0.
working_dir (string): If not None, indicates where temporary files
should be created during this run.
raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver
name string as the first element and a GDAL creation options
tuple/list as the second. Defaults to a GTiff driver tuple
defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.
Return:
None
"""
working_raster_paths = {}
for raster_prefix in ['region_mask_raster', 'g_raster']:
with tempfile.NamedTemporaryFile(
prefix=raster_prefix, suffix='.tif', delete=False,
dir=working_dir) as tmp_file:
working_raster_paths[raster_prefix] = tmp_file.name
nodata = (get_raster_info(base_region_raster_path_band[0])['nodata'])[
base_region_raster_path_band[1]-1]
nodata_out = 255
def mask_op(base_array):
"""Convert base_array to 1 if not 0 and nodata, 0 otherwise."""
if nodata is not None:
return ~numpy.isclose(base_array, nodata) & (base_array != 0)
else:
return base_array != 0
if not isinstance(sampling_distance, (tuple, list)):
raise ValueError(
"`sampling_distance` should be a tuple/list, instead it's %s" % (
type(sampling_distance)))
sample_d_x, sample_d_y = sampling_distance
if sample_d_x <= 0. or sample_d_y <= 0.:
raise ValueError(
"Sample distances must be > 0.0, instead got %s",
sampling_distance)
raster_calculator(
[base_region_raster_path_band], mask_op,
working_raster_paths['region_mask_raster'], gdal.GDT_Byte, nodata_out,
calc_raster_stats=False,
raster_driver_creation_tuple=raster_driver_creation_tuple)
geoprocessing_core._distance_transform_edt(
working_raster_paths['region_mask_raster'],
working_raster_paths['g_raster'], sampling_distance[0],
sampling_distance[1], target_distance_raster_path,
raster_driver_creation_tuple)
for path in working_raster_paths.values():
try:
os.remove(path)
except OSError:
LOGGER.warning("couldn't remove file %s", path) | 186a53571e7ae03c83bfe899aef4a40067c0321e | 1,476 |
def Amp(f: jnp.ndarray, theta: jnp.ndarray) -> jnp.ndarray:
"""
Computes the Taylor F2 Frequency domain strain waveform with non-standard
spin induced quadrupoole moment for object two.
Note that this waveform assumes object 1 is a BH and therefore uses the
chi * M_total relation to find C
Note that this waveform also assumes that object one is the more massive.
Therefore the more massive object is always considered a BH
Returns:
Strain (array):
"""
# (
# th0,
# th3,
# _,
# _,
# _,
# _,
# ) = theta
# M_chirp = (
# 1 / (16 * pi * f[0]) * (125 / (2 * th0 ** 3)) ** (1 / 5) * C ** 3 / G
# ) / MSUN
# eta = (16 * pi ** 5 / 25 * th0 ** 2 / th3 ** 5) ** (1 / 3)
# Mt = M_chirp / eta ** (3 / 5)
# (
# Mt,
# eta,
# _,
# _,
# ) = theta
m1, m2, _, _ = theta
Mt = m1 + m2
eta = m1 * m2 / (m1 + m2) ** 2
distance = 1.0
pre = 3.6686934875530996e-19 # (GN*Msun/c^3)^(5/6)/Hz^(7/6)*c/Mpc/sec
Mchirp = Mt * eta ** 0.6
A0 = (
Mchirp ** (5.0 / 6.0)
/ (f + 1e-100) ** (7.0 / 6.0)
/ distance
/ pi ** (2.0 / 3.0)
* jnp.sqrt(5.0 / 24.0)
)
return pre * A0 | d618b760d8be0fe9e597eb3a2deefec455489349 | 1,477 |
def get_forecastgroup_path(dscript_name):
"""
get forecast group from dispatcher init file
:param dscript_name: filepath of dispatcher init file
:return: string containing the path of the forecast group
"""
# create object to represent init file
try:
df = DispatcherInitFile(dscript_name)
except RuntimeError:
print("Warning: Could not create Dispatcher script object from script using configFile={}." \
.format(dscript_name))
if debug:
raise
return None
# extract from init file
try:
dcf_name = df.elementValue(DispatcherInitFile.ForecastGroupElement)
except RuntimeError:
print("Warning: Could not extract ForecastGroup from Dispatcher config file {}".format(dscript_name))
if debug:
raise
return None
return dcf_name | 867eddf11a494b17366dba93d46eb298df0b5740 | 1,478 |
def spol(f, g):
"""
Compute the S-polynomial of f and g.
INPUT:
- ``f, g`` -- polynomials
OUTPUT: the S-polynomial of f and g
EXAMPLES::
sage: R.<x,y,z> = PolynomialRing(QQ)
sage: from sage.rings.polynomial.toy_buchberger import spol
sage: spol(x^2 - z - 1, z^2 - y - 1)
x^2*y - z^3 + x^2 - z^2
"""
fg_lcm = LCM(LM(f), LM(g))
return fg_lcm//LT(f)*f - fg_lcm//LT(g)*g | fff84c00b85fda2f4ebfc3e8bbf1caa68b206490 | 1,479 |
import functools
def evaluate_baselines(experiment,
seed,
num_pairs,
samples_per_pair,
loop_size=None):
"""Helper function to evaluate the set of baselines."""
gumbel_max_joint_fn = functools.partial(
coupling_util.joint_from_samples,
coupling_util.gumbel_max_sampler,
num_samples=samples_per_pair,
loop_size=loop_size)
return {
"Independent":
evaluate_joint(
lambda p, q, _: coupling_util.independent_coupling(p, q),
experiment, seed, num_pairs),
"ICDF":
evaluate_joint(
lambda p, q, _: coupling_util.inverse_cdf_coupling(p, q),
experiment, seed, num_pairs),
"ICDF (permuted)":
evaluate_joint(
lambda p, q, _: coupling_util.permuted_inverse_cdf_coupling(p, q),
experiment, seed, num_pairs),
"Gumbel-max":
evaluate_joint(
gumbel_max_joint_fn,
experiment,
seed,
num_pairs,
joint_correction_num_samples=samples_per_pair),
} | 555ea777ff1f694fd2ed2846f1e8cb1ca01cccd7 | 1,480 |
def generate_template_mask(protein):
"""Generate template mask."""
protein['template_mask'] = np.ones(shape_list(protein['template_domain_names']),
dtype=np.float32)
return protein | f92304249db66b4d7a28336c60c0fd4ce803da0f | 1,481 |
from scipy.interpolate import InterpolatedUnivariateSpline as spline
def minimal_rotation(R, t, iterations=2):
"""Adjust frame so that there is no rotation about z' axis
The output of this function is a frame that rotates the z axis onto the same z' axis as the
input frame, but with minimal rotation about that axis. This is done by pre-composing the input
rotation with a rotation about the z axis through an angle gamma, where
dgamma/dt = 2*(dR/dt * z * R.conjugate()).w
This ensures that the angular velocity has no component along the z' axis.
Note that this condition becomes easier to impose the closer the input rotation is to a
minimally rotating frame, which means that repeated application of this function improves its
accuracy. By default, this function is iterated twice, though a few more iterations may be
called for.
Parameters
==========
R: quaternion array
Time series describing rotation
t: float array
Corresponding times at which R is measured
iterations: int [defaults to 2]
Repeat the minimization to refine the result
"""
if iterations == 0:
return R
R = quaternion.as_float_array(R)
Rdot = np.empty_like(R)
for i in range(4):
Rdot[:, i] = spline(t, R[:, i]).derivative()(t)
R = quaternion.from_float_array(R)
Rdot = quaternion.from_float_array(Rdot)
halfgammadot = quaternion.as_float_array(Rdot * quaternion.z * R.conjugate())[:, 0]
halfgamma = spline(t, halfgammadot).antiderivative()(t)
Rgamma = np.exp(quaternion.z * halfgamma)
return minimal_rotation(R * Rgamma, t, iterations=iterations-1) | a1bd333ec9a01825a5355f47a80e14e37d510fae | 1,482 |
def get_info(api_key: hug.types.text, hug_timer=20):
"""Return 'getinfo' data from the Gridcoin Research client!"""
if (api_key == api_auth_key):
# Valid API Key!
response = request_json("getinfo", None)
if (response == None):
return {'success': False, 'api_key': True}
else:
return {'success': True, 'api_key': True, 'result': response, 'time_taken': hug_timer}
else:
# Invalid API Key!
return {'success': False, 'api_key': False} | ff4eb5df57cf9faa0464040d9f51040742a8f549 | 1,483 |
def get_output_col_names(perils, factors):
"""Column names of the output data frame that contains `perils` and `factors`"""
return (
PCon.RAW_STRUCT['stem']['col_names'] +
[per + PCon.OUTPUT_DEFAULTS['pf_sep'] + fac
for per, fac in pd.MultiIndex.from_product(
[perils, [PCon.RAW_STRUCT['bp_name']] + factors]
)]
) | ce455a87aeba1f7d4f02b7f1b0e25c4d3eafdd0f | 1,484 |
def _get_real_path(workspace_path):
"""Converts the given workspace path into an absolute path.
A tuple of a real path and an error is returned. In this tuple, either
the real path or error is present. The error is present in the returned tuple
either if no workspace dir is given or the generated real path is not under
the working directory.
"""
if not workspace_path:
return (None, 'No path is given')
root_dir = _get_root_dir(trailing_separator=False)
path = _to_real_path(root_dir, workspace_path)
return (path, None) if path.startswith(root_dir) else (None, 'Not authorized') | 75e0d08b288cc947987b96787f368daeef586612 | 1,485 |
import string
def simple_caesar(txt, rot=7):
"""Caesar cipher through ASCII manipulation, lowercase only."""
alphabet = string.ascii_lowercase # pick alphabet
shifted_alphabet = alphabet[rot:] + alphabet[:rot] # shift it
table = str.maketrans(alphabet, shifted_alphabet) # create mapping table
return txt.lower().translate(table) # apply | eb8d86d37d8a8902663ff68e095b3b822225859c | 1,486 |
def weave(devicePairs):
"""
"""
routers = [x[0] for x in devicePairs if x[0][1] == "router.PNG"]
selected = []
for devicePair in devicePairs:
starterDevice = devicePair[0]
if starterDevice[1] == "router.PNG":
continue
starterPosition = maths.getCenter(tuple(starterDevice[0]))
distances = []
for (endPosition, endDevice) in devicePair[1:]:
distances.append(maths.getDistance(starterPosition, maths.getCenter(endPosition)))
#if starterDevice[1] == "router.PNG":
# distances[distances.index(min(distances))] = np.Infinity
closestIndex = distances.index(min(distances))
closestDevice = devicePair[closestIndex + 1]
selected.append((starterDevice, closestDevice))
return selected | 6e312f2c89007e67efdb23d93c103e3f7583d48a | 1,487 |
def change_image_ani(image: _Surface,
name: _Optional[str] = None,
id_: _Optional[int] = None) -> _TextureAni:
"""
change_image_ani(image, name=None, id_None)
Type: function
Description: returns a TextureAni that simply changes the image of
an AniElement
Args:
'image' (pygame.Surface): the image to change the element to
'name' (str?): the name of the animation, defaults to None
'id_' (int?): the ID of the animation, defaults to None
Return type: TextureAni
"""
return _TextureAni(
name=name,
frames=[image],
time=0,
id_=id_,
reset_on_end=False
) | ff68e741937512d70ad714e54df037940154467f | 1,488 |
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
) from err | 06a014f531944eb0f5d428e5f2880a1e91de797c | 1,489 |
def read_u16(f):
"""Reads a two byte unsigned value from the file object f.
"""
temp = f.read(2)
if not temp:
raise EOFError("EOF")
return int.from_bytes(temp, byteorder='little', signed=False) | 03478ce0fd4076ca3a0c4ea2f687cca254ba7052 | 1,490 |
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num) | 2862a3a2c456fcfe5461f011a28bdf5ec94971a8 | 1,491 |
def process_image(sample, settings, mode, color_jitter, rotate):
""" process_image """
mean = settings.image_mean
std = settings.image_std
crop_size = settings.crop_size
img_path = sample[0]
img = cv2.imread(img_path)
if mode == 'train':
if rotate:
img = rotate_image(img)
if crop_size > 0:
img = random_crop(
img, crop_size, settings, interpolation=settings.interpolation)
if color_jitter:
img = distort_color(img)
if np.random.randint(0, 2) == 1:
img = img[:, ::-1, :]
else:
if crop_size > 0:
target_size = settings.resize_short_size
img = resize_short(
img, target_size, interpolation=settings.interpolation)
img = crop_image(img, target_size=crop_size, center=True)
img = img[:, :, ::-1]
if 'use_aa' in settings and settings.use_aa and mode == 'train':
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = policy(img)
img = np.asarray(img)
img = img.astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
if mode == 'train' or mode == 'val':
return (img, sample[1])
elif mode == 'test':
return (img, ) | 230991f4078c9963731355b276a6e351f7bcbab9 | 1,492 |
def loglinear(F, X, confmeth, conftype=1, alpha=0.75, t_star=0.0): # pylint: disable=C0103
"""
Function to estimate the parameters (gamma0 and gamma1) of the NHPP
loglinear model. There is no regression function for this model.
:param list F: list of failure counts.
:param list X: list of individual failures times.
:param int confmeth: the method for calculating confidence bounds.
:param int conftype: the confidence level type
1 = lower one-sided
2 = upper one-sided
3 = two-sided (default)
:param float alpha: the confidence level.
:param float t_star: the end of the observation period for time terminated,
or Type I, tests. Defaults to 0.0.
:return: [_gamma0_lower, _gamma0_hat, _gamma0_upper],
[_gamma1_lower, _gamma1_hat, _gamma1_upper]
:rtype: tuple of lists
"""
# Define the function that will be set equal to zero and solved for gamma1.
def _gamma1(gamma1, T, r, Ta): # pylint: disable=C0103
"""
Function for estimating the gamma1 value.
:param float gamma1:
:param float T: the sum of individual failure times.
:param int r: the total number of failures observed.
:param float Ta: the latest observed failure time.
:return: _g1; the starting estimate of the gamma1 parameter.
:rtype: float
"""
# Calculate interim values.
_a = r / gamma1
_b = r * Ta * np.exp(gamma1 * Ta)
_c = np.exp(gamma1 * Ta) - 1.0
_g1 = T + _a - (_b / _c)
return _g1
# Initialize variables.
_g0 = [0.0, 0.0, 0.0]
_g1 = [0.0, 0.0, 0.0]
_typeii = False
# Ensure failure times are of type float.
X = [float(x) for x in X]
# Ensure the confidence level is expressed as a decimal, then find the
# standard normal and student-t critical values for constructing
# confidence bounds on the parameters.
if alpha > 1.0:
alpha = alpha / 100.0
# If no observation time was passed, use the maximum failure time and set
# the _typeii variable True to indicate this is a failure truncated
# dataset.
if t_star == 0.0:
t_star = sum(X)
_typeii = True
if not _typeii:
_N = sum(F) - 1
else:
_N = sum(F) - 2
_T = sum(X)
# Calculate the Loglinear parameters.
_g1[1] = fsolve(_gamma1, 0.001, args=(_T, _N, t_star))[0]
_g0[1] = np.log((_N * _g1[1]) / (np.exp(_g1[1] * t_star) - 1.0))
# TODO: Add support for one-sided bounds.
#if confmeth == 1: # Crow bounds.
#elif confmeth == 3: # Fisher matrix bounds.
print _g0, _g1
return(_g0, _g1) | 77a5e8c388b5e865341afce1949aefcfd1b33bbd | 1,493 |
def convert_pressures(a, from_units, to_units):
"""Converts values in numpy array (or a scalar) from one pressure unit to another, in situ if array.
arguments:
a (numpy float array, or float): array of pressure values to undergo unit conversion in situ, or a scalar
from_units (string): the units of the data before conversion
to_units (string): the required units
returns:
a after unit conversion
note:
To see supported units, use: `valid_uoms(quantity='pressure')`
"""
return convert(a, from_units, to_units, quantity = 'pressure', inplace = True) | d25ca383fe0cfaf6e756958ff99aebf2b06e13a9 | 1,494 |
def amovie(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#amovie"""
return filter(stream, amovie.__name__, *args, **kwargs) | 60daca8722bb42b34231a82dd3c9175108af8f9b | 1,495 |
import numpy
def kutta_condition(A_source, B_vortex):
"""
Builds the Kutta condition array.
Parameters
----------
A_source: 2D Numpy array of floats
Source contribution matrix for the normal velocity.
B_vortex: 2D Numpy array of floats
Vortex contribution matrix for the normal velocity.
Returns
-------
b: 1D Numpy array of floats
The left-hand side of the Kutta-condition equation.
"""
b = numpy.empty(A_source.shape[0]+1, dtype=float)
# matrix of source contribution on tangential velocity
# is the same than
# matrix of vortex contribution on normal velocity
b[:-1] = B_vortex[0, :] + B_vortex[-1, :]
# matrix of vortex contribution on tangential velocity
# is the opposite of
# matrix of source contribution on normal velocity
b[-1] = - numpy.sum(A_source[0, :] + A_source[-1, :])
print(b)
return b | 0009018c39c21f1bc3b98745ea7a475f0a7e6fe7 | 1,496 |
def absorption_sinogram(p, anglelist):
"""Generates the absorption sinogram for absorption by the full
elemental content of the Phantom2d object.
Parameters
----------
p : Phantom2d object
anglelist : list of float
Ordered list of sinogram projection angles in degrees.
Returns
-------
array of float
Sinogram of requested scattering or fluorescence.
This is a 2d x-theta map of dimensionless values.
"""
sinogram = np.empty((p.cols, len(anglelist)))
if config.show_progress:
pbar = ProgressBar(maxval=max(1, len(anglelist)-1), term_width=80).start()
for i, angle in enumerate(anglelist):
if config.show_progress:
pbar.update(i)
increasing_ix = True # Set True to accumulate cmam along increasing y
n_map = irradiance_map(p, angle, n0=1.0, increasing_ix=increasing_ix)
if increasing_ix:
sinogram[:, i] = np.log(n_map[0] / n_map[-1])
else:
sinogram[:, i] = np.log(n_map[-1] / n_map[0])
return sinogram | 37f5048b9207221387c2410e2bb0be20bafc8dcf | 1,497 |
def trace_feature_vector_from_nodes(embeddings, traces, dimension):
"""
Computes average feature vector for each trace
Parameters
-----------------------
embeddings,
Text-based model containing the computed encodings
traces: List,
List of traces treated as sentences by the model
Returns
-----------------------
vectors: List
list of vector encodings for each trace
"""
vectors_average, vectors_max = [], []
for trace in traces:
trace_vector = []
for token in trace:
try:
trace_vector.append(embeddings[token])
except KeyError:
pass
if len(trace_vector) == 0:
trace_vector.append(np.zeros(dimension))
vectors_average.append(np.array(trace_vector).mean(axis=0))
vectors_max.append(np.array(trace_vector).max(axis=0))
return vectors_average, vectors_max | 93efdf6da293bd6af61c1c77e8b19c76c6b71193 | 1,498 |
def jitter_rotate(drawing, sigma=0.2):
"""
Rotate an entire drawing about 0,0 by a random gaussian.
"""
rotation = np.random.randn(1) * sigma
matrix = create_rotation_matrix(rotation)
return [np.dot(stroke, matrix).squeeze() for stroke in drawing] | 058709f6a84e99fbd8899e3e6c4aed09b7c0ad6e | 1,499 |