content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def ui_form_stations():
"""
This function lists all stations
"""
# get _all_ the stations
stations = station_get(0)
# render stations in HTML template
return render_template("stations.html", result=stations) | 07f53e694e135e0612c871c279e63432eeedf966 | 2,300 |
import json
import requests
def create_digital_object(obj):
"""
Create a digitial object for a cilantro object in AtoM.
:param Object obj: THE cilantro object
:return: str The generated URI for the digital object
"""
url = f"{atom_uri}/api/digitalobjects"
headers = {'REST-API-Key': atom_api_key,
'Content-Type': 'application/json'}
data = _get_digital_object_data(obj)
json_data = json.dumps(data, indent=4)
log.debug(f"Digital object: {json_data}")
response = requests.post(url, data=json_data, headers=headers)
response.raise_for_status()
return f"{atom_uri}/{response.json()['slug']}" | 21a803e88330b538e9018116107c680ed6565bc4 | 2,301 |
def fitcreds():
"""
returns the ['credentials'] dictionary
:return: dictionary or None
"""
return fitcfg().get('credentials', None) | d8c30b43ae3c91fc7f08d2a47b401416da8b7d4b | 2,302 |
import re
def brace_expand(str):
"""Perform brace expansion, a lá bash."""
match = re.search('{(.+?)(,.*?)?}', str)
if match:
strings = brace_expand(replace_range(str,
match.start(),
match.end(),
match.group(1)))
if match.group(2):
strings.extend(brace_expand(replace_range(str,
match.start(),
match.end(),
match.group(2)[1:])))
return strings
else: # No braces were in the string.
return [str] | a4426eb8d1ecfc3ac8d9b9ecff57c8364b372042 | 2,303 |
def create(number):
"""
create() : Add document to Firestore collection with request body.
Ensure you pass a custom ID as part of json body in post request,
e.g. json={'id': '1', 'title': 'Write a blog post'}
"""
try:
id = request.json['id']
todo_ref = user_ref.document(number).collection("todos")
todo_ref.document(id).set(request.json)
all_todos = [doc.to_dict() for doc in todo_ref.stream()]
return jsonify(all_todos), 200
except Exception as e:
return f"An Error Occured: {e}" | 94d703a4b310545d895e05dc9e872fb0b64e990b | 2,304 |
def sort_terms(node, parent_children, hierarchy):
"""Recursively create a list of nodes grouped by category."""
for c in parent_children.get(node, []):
hierarchy.append(c)
sort_terms(c, parent_children, hierarchy)
return hierarchy | 5ae737206f3859c01da6b8e9475db688e53a8d13 | 2,305 |
def sumPm(mirror):
"""Returns sum of all mechanical power from active machines"""
sysPm = 0.0
# for each area
for area in mirror.Area:
# reset current sum
area.cv['Pm'] = 0.0
# sum each active machine Pm to area agent
for mach in area.Machines:
if mach.cv['St'] == 1:
area.cv['Pm'] += mach.cv['Pm']
# sum area agent totals to system
sysPm += area.cv['Pm']
return sysPm | 996891a5386f59fb111b5726552537d67e9c419c | 2,306 |
def chunk(rs, n, column=None):
"""Returns a list of rows in chunks
:param rs: a list of rows
:param n:
- int => returns 3 rows about the same size
- list of ints, [0.3, 0.4, 0.3] => returns 3 rows of 30%, 40$, 30%
- list of nums, [100, 500, 100] => returns 4 rows with break points\
100, 500, 1000, but you must pass the column name\
for the break points like
chunk(rs, [100, 500, 100], 'col')
:param column: column name for break points
:returns: a list of rows
"""
size = len(rs)
if isinstance(n, int):
start = 0
result = []
for i in range(1, n + 1):
end = int((size * i) / n)
# must yield anyway
result.append(rs[start:end])
start = end
return result
# n is a list of percentiles
elif not column:
# then it is a list of percentiles for each chunk
assert sum(n) <= 1, "Sum of percentils for chunks must be <= 1.0"
ns = [int(x * size) for x in accumulate(n)]
result = []
for a, b in zip([0] + ns, ns):
result.append(rs[a:b])
return result
# n is a list of break points
else:
rs.sort(key=lambda r: r[column])
start, end = 0, 0
result = []
for bp in n:
while (rs[end][column] < bp) and end < size:
end += 1
result.append(rs[start:end])
start = end
result.append(rs[end:])
return result | cfaad9bd0b973e3bcf7474f46ad14d01b52924cf | 2,307 |
def get_file_detail(traffic_file):
"""
Args:
traffic_file: a name
Returns:
roadnet_file and flow_file name
"""
phase = None
roadnet_file = None
flow_file = None
for category in TRAFFIC_CATEGORY:
if traffic_file in list(TRAFFIC_CATEGORY[category].keys()):
phase = TRAFFIC_CATEGORY[category][traffic_file][0]
roadnet_file = TRAFFIC_CATEGORY[category][traffic_file][1]
flow_file = TRAFFIC_CATEGORY[category][traffic_file][2]
return phase, roadnet_file, flow_file | 638efcc7254544d0cd0ab305cea12aed5a0e7ba2 | 2,308 |
async def root():
"""
:return: welcoming page returning Made by @woosal1337
"""
try:
return {f"Made by @woosal1337"}
except Exception as e:
return {f"{e} has happened!"} | 3d4e9acf038f60a9d91755eafcfb7e9dcfaa7a71 | 2,309 |
def sequence_accuracy_score(y_true, y_pred):
"""
Return sequence accuracy score. Match is counted only when two sequences
are equal.
"""
total = len(y_true)
if not total:
return 0
matches = sum(1 for yseq_true, yseq_pred in zip(y_true, y_pred)
if yseq_true == yseq_pred)
return matches / total | b1345aaa6fd0161f648a1ca5b15c921c2ed635ad | 2,310 |
def load_content(sentence_file):
"""Load input file with sentences to build LSH.
Args:
sentence_file (str): Path to input with txt file with sentences to Build LSH.
Returns:
dict: Dict with strings and version of string in lower case and without comma.
"""
sentences = {}
with open(sentence_file) as content:
for line in content:
line = line.strip()
line_clean = line.replace(",", "")
line_clean = line_clean.lower()
sentences[line_clean] = line
return sentences | 31c3104179e995d59cffbea92caf2d32decc572c | 2,311 |
import collections
def _analyse_graph(graph):
""" Analyses a connected graph to find a set of distinct paths and a
topologically ordered sequence.
"""
# Make a copy without self-cycles to modify
g = graph.clean_copy()
g0 = g.copy()
# Start with the diameter of the graph
diameter = g.diameter()
if not diameter:
# The graph has no edges so return sorted list of isolated vertices
return [], sorted(g.isolated, key=graph.sort)
diameter_ac = diameter.make_acyclic()
# Remove diameter from graph and search the rest
g.remove_path(diameter)
paths, sequence = [diameter], list(diameter_ac)
stack = collections.deque()
# Search paths both forwards and backwards
# All diverging branches are searched backwards and vice versa
stack.extend((v, True) for v in reversed(diameter))
stack.extend((v, False) for v in diameter)
while stack:
vertex, forward = stack.pop()
try:
new_paths = g.search_paths(vertex, forward=forward).values()
except KeyError:
continue
if not any(new_paths):
continue
# Add paths to list
longest = max(sorted(new_paths, key=graph.path_sort), key=len)
g.remove_path(longest)
paths.append(longest)
# Merge paths into sequence
longest_ac = longest.make_acyclic()
index = sequence.index(vertex)
if forward:
_merge_forward(g0, sequence, longest_ac, index)
else:
_merge_backward(g0, sequence, longest_ac, index)
# Add new paths to stack for searching
stack.extendleft((v, True) for v in reversed(longest_ac))
stack.extendleft((v, False) for v in longest_ac)
# Maybe another distinct path here - return vertex to queue
stack.append((vertex, forward))
if g.vertices:
# Expect all vertices and edges to be removed from connected graph.
raise ValueError(
f"Vertices {g.vertices!r} still left over from graph {g0!r}"
)
_rearrange_cycles(g0, sequence)
return paths, sequence | 19bcc25117b962626db5386e8cf39625e315cd79 | 2,312 |
def rare_last_digit(first):
"""Given a leading digit, first, return all possible last digits of a rare number"""
if first == 2:
return (2,)
elif first == 4:
return (0,)
elif first == 6:
return (0,5)
elif first == 8:
return (2,3,7,8)
else:
raise ValueError(f"Invalid first digit of rare number: {first}") | 2b15d35a6281d679dce2dedd7c1944d2a93e8756 | 2,313 |
def decorrelation_witer(W):
"""
Iterative MDUM decorrelation that avoids matrix inversion.
"""
lim = 1.0
tol = 1.0e-05
W = W/(W**2).sum()
while lim > tol:
W1 = (3.0/2.0)*W - 0.5*dot(dot(W,W.T),W)
lim = npmax(npabs(npabs(diag(dot(W1,W.T))) - 1.0))
W = W1
return W | ddc67d5aee68acb6202f7ebd2f5e99a6bf9e3158 | 2,314 |
import webbrowser
def _open_public_images(username):
"""
:param username: username of a given person
:return:
"""
try:
new_url = "https://www.facebook.com/" + username + "/photos_all"
webbrowser.open_new_tab(new_url)
return 1
except Exception as e:
print(e)
return -1 | bd488bae2182bd2d529734f94fb6fc2b11ca88d0 | 2,315 |
def fermat_number(n: int) -> int:
"""
https://en.wikipedia.org/wiki/Fermat_number
https://oeis.org/A000215
>>> [fermat_number(i) for i in range(5)]
[3, 5, 17, 257, 65537]
"""
return 3 if n == 0 else (2 << ((2 << (n - 1)) - 1)) + 1 | 4427ab7171fd86b8e476241bc94ff098e0683363 | 2,316 |
def get_id_ctx(node):
"""Gets the id and attribute of a node, or returns a default."""
nid = getattr(node, "id", None)
if nid is None:
return (None, None)
return (nid, node.ctx) | cbca8573b4246d0378297e0680ab05286cfc4fce | 2,317 |
def fitsfile_clumpy(filename,ext=None,header=True,**kwargs):
"""Read a (CLUMPY) fits file.
Parameters:
-----------
filename : str
Path to the CLUMPY .fits file to be read.
ext : {int,str}
The FITS extension to be read. Either as EXTVER, specifying
the HDU by an integer, or as EXTNAME, giving the HDU by name.
For CLUMPY FITS files:
0 or 'imgdata' is the image cube
` 1 or 'clddata' is the projected map of cloud number per ine-of-sight.
header : bool
If True, the HDU header will also be read. Not used currently.
"""
if 'hypercubenames' in kwargs:
ext = kwargs['hypercubenames'][0]
assert (isinstance(ext,(int,str))),\
"'ext' must be either integer or a string, specifying the FITS extension by number or by name, respectively."
dataset, header = pyfits.getdata(filename,ext,header=header) # dataset.shape is (Nwave,Nypix,Nxpix) for 3D, and (Nypix,Nxpix) for 2D.
x = N.arange(float(header['NAXIS1']))
y = N.arange(float(header['NAXIS2']))
# x = range(header['NAXIS1'])
# y = range(header['NAXIS2'])
if dataset.ndim == 2:
axes = None
axnames = ['x','y']
axvals = [x,y]
elif dataset.ndim == 3:
axes = (0,2,1)
wave = N.array([v for k,v in header.items() if k.startswith('LAMB')])
axnames = ['wave','x','y']
axvals = [wave,x,y]
dataset = N.transpose(dataset,axes=axes) # now it's (Nwave,Nxpix,Nypix) for 3D, and (Nxpix,Nypix) for 2D.
datasets = [dataset] # has to be a list for function 'convert'
hypercubenames = kwargs['hypercubenames']
return datasets, axnames, axvals, hypercubenames | 70055d193bba1e766a02f522d31eb9f2327ccc64 | 2,318 |
from typing import Optional
def is_tkg_plus_enabled(config: Optional[dict] = None) -> bool:
"""
Check if TKG plus is enabled by the provider in the config.
:param dict config: configuration provided by the user.
:return: whether TKG+ is enabled or not.
:rtype: bool
"""
if not config:
try:
config = get_server_runtime_config()
except Exception:
return False
service_section = config.get('service', {})
tkg_plus_enabled = service_section.get('enable_tkg_plus', False)
if isinstance(tkg_plus_enabled, bool):
return tkg_plus_enabled
elif isinstance(tkg_plus_enabled, str):
return utils.str_to_bool(tkg_plus_enabled)
return False | 75f1ef48582777ea59d543f33517cf10c2871927 | 2,319 |
import math
from typing import Sequence
def da_cma(max_evaluations = 50000, da_max_evals = None, cma_max_evals = None,
popsize=31, stop_fitness = -math.inf):
"""Sequence differential evolution -> CMA-ES."""
daEvals = np.random.uniform(0.1, 0.5)
if da_max_evals is None:
da_max_evals = int(daEvals*max_evaluations)
if cma_max_evals is None:
cma_max_evals = int((1.0-daEvals)*max_evaluations)
opt1 = Da_cpp(max_evaluations = da_max_evals, stop_fitness = stop_fitness)
opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
stop_fitness = stop_fitness)
return Sequence([opt1, opt2]) | 378d54d0da1e5ec36529ae3fa94fd40b9a2dbecd | 2,320 |
from typing import Set
def j_hashset(s: Set = None) -> jpy.JType:
"""Creates a Java HashSet from a set."""
if s is None:
return None
r = jpy.get_type("java.util.HashSet")()
for v in s:
r.add(v)
return r | 28ad96de2b973d006e38b3e5b3228b81da31f4b6 | 2,321 |
def get_year(h5, songidx=0):
"""
Get release year from a HDF5 song file, by default the first song in it
"""
return h5.root.musicbrainz.songs.cols.year[songidx] | eabd7cfd63a06448f7ef4c94f39a8d44af0d971e | 2,322 |
def _create_simulation_parametrization():
"""Convert named scenarios to parametrization.
Each named scenario is duplicated with different seeds to capture the uncertainty in
the simulation..
"""
named_scenarios = get_named_scenarios()
scenarios = []
for name, specs in named_scenarios.items():
is_resumed = specs.get("is_resumed", "fall")
save_last_states = specs.get("save_last_states", False)
for seed in range(specs["n_seeds"]):
produces = {
"period_outputs": create_path_to_period_outputs_of_simulation(
name, seed
)
}
if specs.get("save_rapid_test_statistics", False):
rapid_test_statistics_path = create_path_to_raw_rapid_test_statistics(
name, seed
)
produces["rapid_test_statistics"] = rapid_test_statistics_path
# since we use "append" mode to build this we need to delete the
# present file with every run
if rapid_test_statistics_path.exists():
rapid_test_statistics_path.unlink()
else:
rapid_test_statistics_path = None
if save_last_states:
produces["last_states"] = create_path_to_last_states_of_simulation(
name, seed
)
depends_on = get_simulation_dependencies(
debug=FAST_FLAG == "debug",
is_resumed=is_resumed,
)
if is_resumed:
depends_on["initial_states"] = create_path_to_last_states_of_simulation(
f"{is_resumed}_baseline", seed
)
spec_tuple = (
depends_on,
specs["sim_input_scenario"],
specs["params_scenario"],
specs["start_date"],
specs["end_date"],
save_last_states,
produces,
500 + 100_000 * seed,
is_resumed,
rapid_test_statistics_path,
)
scenarios.append(spec_tuple)
signature = (
"depends_on, sim_input_scenario, params_scenario, "
+ "start_date, end_date, save_last_states, produces, seed, "
+ "is_resumed, rapid_test_statistics_path"
)
return signature, scenarios | 281dd9c70c9d60f2ffa8b02fb69341ffd3c2ad19 | 2,323 |
def calc_buffer(P, T, buffer):
"""
Master function to calc any buffer given a name.
Parameters
----------
P: float
Pressure in GPa
T: float or numpy array
Temperature in degrees K
buffer: str
Name of buffer
Returns
-------
float or numpy array
logfO2
"""
if buffer == 'NNO':
return calc_NNO(P, T)
elif buffer == 'QFM':
return calc_QFM(P, T)
elif buffer == 'IW':
return calc_IW(P, T)
elif buffer == 'CrCr2O3':
return calc_CrCr2O3(P, T)
elif buffer == 'SiSiO2':
return calc_SiSiO2(P, T)
elif buffer == 'HM':
return calc_HM(P, T)
elif buffer == 'CoCoO':
return calc_CoCoO(P, T)
elif buffer == 'ReReO':
return calc_ReReO(P, T)
elif buffer == 'Graphite':
return calc_Graphite(P, T)
elif buffer == 'QIF':
return calc_QIF(P, T)
elif buffer == 'MoMoO2':
return calc_MoMoO2(P,T)
elif buffer == 'CaCaO':
return calc_CaCaO(P,T)
elif buffer == 'AlAl2O3':
return calc_AlAl2O3(P,T)
elif buffer == 'KK2O':
return calc_KK2O(P,T)
elif buffer == 'MgMgO':
return calc_MgMgO(P,T)
elif buffer == 'MnMnO':
return calc_MnMnO(P,T)
elif buffer == 'NaNa2O':
return calc_NaNa2O(P,T)
elif buffer == 'TiTiO2':
return calc_TiTiO2(P,T)
else:
raise InputError('Buffer name not recognized') | acb75ec734e0c366d1424bee608ec2b32f4be626 | 2,324 |
def printImproperDihedral(dihedral, alchemical = False):
"""Generate improper dihedral line
Parameters
----------
dihedral : dihedral Object
dihedral Object
Returns
-------
dihedralLine : str
Improper dihedral line data
"""
V2 = dihedral.V2*0.5
V2_B = dihedral.V2_B*0.5
label = 'imptors %7s %5s %5s %5s %8.3f %4.1f %2d\n' % \
(dihedral.atomA.typeA, dihedral.atomB.typeA, dihedral.atomC.typeA, dihedral.atomD.typeA, V2, 180.0, 2)
if alchemical: label = 'imptors %7s %5s %5s %5s %8.3f %4.1f %2d\n' % \
(dihedral.atomA.typeB, dihedral.atomB.typeB, dihedral.atomC.typeB, dihedral.atomD.typeB, V2_B, 180.0, 2)
return label | bcfece212ac6cc0eb476cb96c44e6af910185bc7 | 2,325 |
import math
def inv_erf(z):
"""
Inverse error function.
:param z: function input
:type z: float
:return: result as float
"""
if z <= -1 or z >= 1:
return "None"
if z == 0:
return 0
result = ndtri((z + 1) / 2.0) / math.sqrt(2)
return result | 7ba55c0a0544f65b95c4af93b4ccdfa7d58faf2b | 2,326 |
from typing import Optional
import requests
def batch_post(
api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the batch API endpoint for events"""
res = post(api_key, host, "/batch/", gzip, timeout, **kwargs)
return _process_response(res, success_message="data uploaded successfully", return_json=False) | 18d039f1bd430cb85a2e8ad18777e5a289aef41f | 2,327 |
from typing import List
import pickle
def load_ste_data(task_name: str) -> List[pd.DataFrame]:
"""Loads the STE data corresponding to the given task name.
Args:
task_name (str): The name of the STE data file.
Returns:
List[pd.DataFrame]: The STE data if found, else empty list.
"""
# Variant-aware STE task names
ste_task_variant_names = get_ste_data_names()
# Variant-agnostic STE task names
ste_task_base_names = set(
[task_name.split("_")[0] for task_name in ste_task_variant_names]
)
if task_name in ste_task_variant_names:
# Load variant-aware STE data
ste_file_name = l2l.get_l2root_base_dirs("taskinfo", task_name + ".pickle")
with open(ste_file_name, "rb") as ste_file:
ste_data = pickle.load(ste_file)
return ste_data
elif task_name in ste_task_base_names:
ste_data = []
# Load variant-agnostic STE data
for ste_variant_file in l2l.get_l2root_base_dirs("taskinfo").glob(
task_name + "*.pickle"
):
with open(ste_variant_file, "rb") as ste_file:
ste_data.extend(pickle.load(ste_file))
# Remove variant label from task names
for idx, ste_data_df in enumerate(ste_data):
ste_data[idx]["task_name"] = ste_data_df["task_name"].apply(
lambda x: x.split("_")[0]
)
return ste_data
else:
return [] | 71d24a5b16fdd0ae9b0cc8b30a4891388594c519 | 2,328 |
def bsplclib_CacheD1(*args):
"""
* Perform the evaluation of the of the cache the parameter must be normalized between the 0 and 1 for the span. The Cache must be valid when calling this routine. Geom Package will insure that. and then multiplies by the weights this just evaluates the current point the CacheParameter is where the Cache was constructed the SpanLength is to normalize the polynomial in the cache to avoid bad conditioning effects
:param U:
:type U: float
:param Degree:
:type Degree: int
:param CacheParameter:
:type CacheParameter: float
:param SpanLenght:
:type SpanLenght: float
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param Weights:
:type Weights: TColStd_Array1OfReal &
:param Point:
:type Point: gp_Pnt
:param Vec:
:type Vec: gp_Vec
:rtype: void
* Perform the evaluation of the Bspline Basis and then multiplies by the weights this just evaluates the current point the parameter must be normalized between the 0 and 1 for the span. The Cache must be valid when calling this routine. Geom Package will insure that. and then multiplies by the weights ththe CacheParameter is where the Cache was constructed the SpanLength is to normalize the polynomial in the cache to avoid bad conditioning effectsis just evaluates the current point
:param U:
:type U: float
:param Degree:
:type Degree: int
:param CacheParameter:
:type CacheParameter: float
:param SpanLenght:
:type SpanLenght: float
:param Poles:
:type Poles: TColgp_Array1OfPnt2d
:param Weights:
:type Weights: TColStd_Array1OfReal &
:param Point:
:type Point: gp_Pnt2d
:param Vec:
:type Vec: gp_Vec2d
:rtype: void
"""
return _BSplCLib.bsplclib_CacheD1(*args) | 41b3834b17b79c0e738338c12e5078cff6cf87ea | 2,329 |
def velocity_filter(freq, corr_spectrum, interstation_distance, cmin=1.0,
cmax=5.0, p=0.05):
"""
Filters a frequency-domain cross-spectrum so as to remove all signal
corresponding to a specified velocity range.
In practice, the procedure (i) inverse-Fourier transforms the cross spectrum
to the time domain; (ii) it zero-pads the resulting time-domain signal at
times corresponding to velocities outside the velocity range by applying a
cosine taper (the same cosine taper is applied at the two ends of the
interval); (iii) a forward-Fourier transform brings back the padded cross
correlation to the frequency domain [e.g., Magrini & Boschi 2021].
Parameters
----------
freq : ndarray of shape (n,)
Frequency vector
cross_spectrum : ndarray of shape (n,)
Complex-valued frequency-domain cross_spectrum
interstation_distance : float (in km)
cmin, cmax : float (in km/s)
Velocity range. Default values are 1 and 5
p : float
Decimal percentage of cosine taper. Default is 0.05 (5%)
Returns
-------
corr : ndarray of shape (n,)
Filtered cross-spectrum
References
----------
Magrini & Boschi 2021, Surface‐Wave Attenuation From Seismic Ambient Noise:
Numerical Validation and Application, JGR
"""
dt = 1 / (2 * freq[-1])
idx_tmin = int((interstation_distance/cmax)/dt * (1-p/2)) # 5percent extra for taper
idx_tmax = int((interstation_distance/cmin)/dt * (1+p/2)) # 5% extra for taper
vel_filt_window = cosine_taper(idx_tmax-idx_tmin, p=p)
tcorr = np.fft.irfft(corr_spectrum)
vel_filt = np.zeros(len(tcorr))
vel_filt[idx_tmin : idx_tmax] = vel_filt_window
vel_filt[-idx_tmax+1 : -idx_tmin+1] = vel_filt_window #+1 is just for symmetry reasons
tcorr *= vel_filt
corr = np.fft.rfft(tcorr)
return corr | 56f460b190b8e3fc4d8936a22d9c677e592d3719 | 2,330 |
import io
import types
def transcribe_file(path, language):
"""
Translate an PCM_16 encoded audio signal stored in a file using Google's STT API (Google Cloud Speech).
This implementation should be changed to transcribe audio-bytes directly.
:param path: path to audio file holding audio bytes
:param language: language of the text spoken in the audio signal
:return: string holding the transcription generated by Google Cloud Speech
or empty string if no transcription was found
"""
client = speech.SpeechClient()
with io.open(path, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
language_code = LANGUAGE_CODES[language]
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code=language_code)
# Detects speech in the audio file
response = client.recognize(config, audio)
if response and response.results:
return response.results[0].alternatives[0].transcript
return '' | 0dcee8c1987c897b14eb25e58612be5d7b284f1b | 2,331 |
def make_aware(dt):
"""Appends tzinfo and assumes UTC, if datetime object has no tzinfo already."""
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc) | 4894449548c19fc6aef7cd4c98a01f49043b3013 | 2,332 |
import torch
def train(model, tokenizer, train_dataset, batch_size, lr, adam_epsilon,
epochs):
"""
:param model: Bert Model to train
:param tokenizer: Bert Tokenizer to train
:param train_dataset:
:param batch_size: Stick to 1 if not using using a high end GPU
:param lr: Suggested learning rate from paper is 5e-5
:param adam_epsilon: Used for weight decay fixed suggested parameter is
1e-8
:param epochs: Usually a single pass through the entire dataset is
satisfactory
:return: Loss
"""
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=batch_size)
t_total = len(train_dataloader) // batch_size # Total Steps
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=lr, eps=adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, 0, t_total)
# ToDo Case for fp16
# Start of training loop
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", batch_size)
model.train()
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(int(epochs), desc="Epoch")
for _ in train_iterator:
epoch_iterator = tqdm_notebook(train_dataloader, desc="Iteration")
for batch in epoch_iterator:
inputs, labels = mask_tokens(batch, tokenizer)
inputs = inputs.to('cuda') # Don't bother if you don't have a gpu
labels = labels.to('cuda')
outputs = model(inputs, masked_lm_labels=labels)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
loss.backward()
tr_loss += loss.item()
# if (step + 1) % 1 == 0: # 1 here is a placeholder for gradient
# accumulation steps
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
return model, tokenizer | 7b2ca610b0e52011b55fc8f8bf66f0129001ea1d | 2,333 |
def fastqcounter(infile):
"""
Returns the number of unique sequences in a fastq file
"""
#check if file is derep'd using DerepCheck()
derep = reptools.DerepCheck(infile)
n=0
if derep:
with open(infile) as fn:
for title,seq,qual in reptools.FASTQparser(fn):
n+=reptools.DerepCount(title)
else:
with open(infile) as fn:
for title,seq,qual in reptools.FASTQparser(fn):
n+=1
return(n) | 49b9704d77c3422bb82968bc23c0fa8b77b012a3 | 2,334 |
def raichuMoves(board,player):
""""Generate All raichu Successors"""
piece = "@" if player == "w" else "$"
possible_boards = []
raichu_locs=[(row_i,col_i) for col_i in range(len(board[0])) for row_i in range(len(board)) if board[row_i][col_i]==piece]
for each_raichu in raichu_locs:
new_boards = raichu_move(board, player, piece, each_raichu[0], each_raichu[1])
if len(new_boards) == 0:
continue
possible_boards.extend(new_boards)
return possible_boards | a015d9f2d9505677e2fecf6df4e2dd4142c67a82 | 2,335 |
def check_supported():
"""返回模块是否可用"""
return True | 6575a81d5ad30a3bb9bb857bbdead2cd2e4ff340 | 2,336 |
def tf_batch_propagate(hamiltonian, hks, signals, dt, batch_size):
"""
Propagate signal in batches
Parameters
----------
hamiltonian: tf.tensor
Drift Hamiltonian
hks: Union[tf.tensor, List[tf.tensor]]
List of control hamiltonians
signals: Union[tf.tensor, List[tf.tensor]]
List of control signals, one per control hamiltonian
dt: float
Length of one time slice
batch_size: int
Number of elements in one batch
Returns
-------
"""
if signals is not None:
batches = int(tf.math.ceil(signals.shape[0] / batch_size))
batch_array = tf.TensorArray(
signals.dtype, size=batches, dynamic_size=False, infer_shape=False
)
for i in range(batches):
batch_array = batch_array.write(
i, signals[i * batch_size : i * batch_size + batch_size]
)
else:
batches = int(tf.math.ceil(hamiltonian.shape[0] / batch_size))
batch_array = tf.TensorArray(
hamiltonian.dtype, size=batches, dynamic_size=False, infer_shape=False
)
for i in range(batches):
batch_array = batch_array.write(
i, hamiltonian[i * batch_size : i * batch_size + batch_size]
)
dUs_array = tf.TensorArray(tf.complex128, size=batches, infer_shape=False)
for i in range(batches):
x = batch_array.read(i)
if signals is not None:
result = tf_propagation_vectorized(hamiltonian, hks, x, dt)
else:
result = tf_propagation_vectorized(x, None, None, dt)
dUs_array = dUs_array.write(i, result)
return dUs_array.concat() | 76ef9633312fd81f47c04d028940febf6625c787 | 2,337 |
def noise_dither_bayer(img:np.ndarray) -> np.ndarray:
"""Adds colored bayer dithering noise to the image.
Args:
img: Image to be dithered.
Returns:
version of the image with dithering applied.
"""
imgtype = img.dtype
size = img.shape
#Note: these are very slow for large images, must crop first before applying.
# Bayer works more or less. I think it's missing a part of the image, the
# dithering pattern is apparent, but the quantized (color palette) is not there.
# Still enough for models to learn dedithering
bayer_matrix = np.array([[0, 8, 2, 10], [12, 4, 14, 6], [3, 11, 1, 9], [15, 7, 13, 5]]) #/256 #4x4 Bayer matrix
bayer_matrix = bayer_matrix*16
red = img[:,:,2] #/255.
green = img[:,:,1] #/255.
blue = img[:,:,0] #/255.
img_split = np.zeros((img.shape[0], img.shape[1], 3), dtype = imgtype)
for values, color, channel in zip((red, green, blue), ('red', 'green', 'blue'), (2,1,0)):
for i in range(0, values.shape[0]):
for j in range(0, values.shape[1]):
x = np.mod(i, 4)
y = np.mod(j, 4)
if values[i, j] > bayer_matrix[x, y]:
img_split[i,j,channel] = 255 #1
dithered = img_split #*255.
return dithered | 9b752e6ef09c9c3b8a3ff276cbef796c96609535 | 2,338 |
def get_match_rank(track, tagged_file):
"""
:param track:
:param files:
:type track: TrackMetadata
:return:
"""
filenames = [filter_filename(os.path.splitext(os.path.basename(filename.path))[0]) for filename in tagged_file]
rank1 = [0]*len(tagged_file)
# Alphabetically closest
lowest = 100000
index = -1
values = [0]*len(tagged_file)
for filename in filenames:
value = levenshtein(track.title, filename)
values[filenames.index(filename)] = value
if value < lowest:
lowest = value
index = filenames.index(filename)
print index
closest = get_close_matches(track.title, filenames)
if index != -1:
rank1[index] = 1
rank2 = [0]*len(tagged_file)
closest = min(tagged_file, key=lambda x: abs(track.get_duration_in_seconds() - x.length))
rank2[tagged_file.index(closest)] = 1
final_ranks = [0.5*rank1[i] + 0.5*rank2[i] for i in xrange(0, len(rank1))]
return final_ranks | 446b0a25f7466d2ce46d51637588ff2e2d49a681 | 2,339 |
import os
import random
def execshell_withpipe_ex(cmd, b_printcmd=True):
"""
Deprecated. Recommand using ShellExec.
"""
strfile = '/tmp/%s.%d.%d' % (
'shell_env.py', int(os.getpid()), random.randint(100000, 999999)
)
os.mknod(strfile)
cmd = cmd + ' 1>' + strfile + ' 2>/dev/null'
os.system(cmd)
if True == b_printcmd:
print(cmd)
fphandle = open(strfile, 'r')
lines = fphandle.readlines()
fphandle.close()
os.unlink(strfile)
return lines | 36191036aebef1af26a2471735cc8a6f45e13d27 | 2,340 |
def parseData(file_name, delimiter=None, header_size=0, col_types=None, ret_array=False):
""" Parse data form a text file
Arguments:
file_name: [str] Name of the input file.
Keyword arguments:
delimiter: [str] Data delimiter (often a comma of a semicolon). None by default, i.e. space/tab
delimited data
header_size: [int] Number of lines in the header of the file. 0 by defualt.
col_types: [type, or list of types] Define which columns are of which type. E.g. if all colums contain
floating point data, then you can specify:
col_types=float.
On the other hand, if the first colum
contains integer values, and second column contains floating point values, you can specify:
col_types=[int, float]
This argument is None by default, meaning that values will be left as strings.
ret_array: [bool] If True, the function returns a numpy array. If False, it returns a Pyhon list.
Be aware that if col_types are specified, and one of the types is float, the whole array will be
a float array. Furthermore, if some values in the read data are strings, the all values in the
numpy array will be strings are well.
Returns:
data_list: Python list if ret_array is False, numpy array if ret_array is True
"""
with open(file_name) as f:
# Skip header
for i in range(header_size):
next(f)
data_list = []
# Go through every line of the file
for line in f:
line = line.replace('\n', '').replace('\r', '')
# Split the line by the given delimiter
if delimiter is None:
line = line.split()
else:
line = line.split(delimiter)
# Convert the columns to given types
if col_types is not None:
if not isinstance(col_types, list):
col_types = [col_types]*len(line)
if len(line) == len(col_types):
for i, (tp, entry) in enumerate(zip(col_types, line)):
line[i] = tp(entry)
data_list.append(line)
# Convert the data to a numpy array
if ret_array:
data_list = np.array(data_list)
return data_list | 184ba8a3176991c033bf339517650205089b4493 | 2,341 |
from typing import Dict
from typing import Any
from typing import List
def file_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns file's reputation
"""
files = argToList(args.get('file'))
since = convert_string_to_epoch_time(args.get('since'), arg_name='since')
until = convert_string_to_epoch_time(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
headers = argToList(args.get('headers'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for file in files:
if get_hash_type(file) not in ('sha256', 'sha1', 'md5'): # check file's validity
raise ValueError(f'Hash "{file}" is not of type SHA-256, SHA-1 or MD5')
try:
raw_response = client.file(file, since, until, limit)
except Exception as exception:
# If anything happens, handle like there are no results
err_msg = f'Could not process file: "{file}"\n {str(exception)}'
demisto.debug(err_msg)
raw_response = {}
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
malicious_description = get_malicious_description(score, data, params)
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=malicious_description
)
if not headers:
headers = ['description', 'status', 'share_level', 'added_on', 'review_status', 'id', 'password',
'sample_size', 'sample_size_compressed', 'sample_type', 'victim_count', 'md5', 'sha1',
'sha256', 'sha3_384', 'ssdeep']
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for file hash {file}', data, headers=headers)
data_entry = data[0]
file_indicator = Common.File(
dbot_score=dbot_score,
file_type=data_entry.get('sample_type'),
size=data_entry.get('sample_size'),
md5=data_entry.get('md5'),
sha1=data_entry.get('sha1'),
sha256=data_entry.get('sha256'),
ssdeep=data_entry.get('ssdeep'),
tags=data_entry.get('tags')
)
else: # no data
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about file: {file} \n'
file_indicator = Common.File(
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.File',
outputs_key_field='id',
outputs=data,
indicator=file_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results | 3b1313607efe26c8736562d937dd99130b2e8cd8 | 2,342 |
import subprocess
def run_task(client, cmd, cwd, prerequire=[], shell=False, quiet=False):
""" run cmd, in cwd
cmd should be a list (*args), if shell is False
when wildcards are used, shell should be Ture, and cmd is just a string
prerequire is a list of futures that must be gathered before the cmd can run
"""
if not quiet:
print(f"starting job {cmd} in {cwd}")
client.gather(prerequire)
return client.submit(subprocess.run, cmd,
stdout=subprocess.PIPE,stderr=subprocess.STDOUT,
shell=shell, check=True, cwd=cwd, key=create_uid()) | 8c2aeeccfd36fcfa6fd72c5215f68a1f8946c032 | 2,343 |
import scipy
def Kane_2D_builder(N,dis,mu,B=0,
params={},crystal='zincblende',
mesh=0,
sparse='yes'):
"""
2D 8-band k.p Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
wire which is infinite in one direction, decribed using 8-band k.p theory.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential.
B: float
Magnetic field along the wire's direction.
params: dic or str
Kane/Luttinger parameters of the k.p Hamiltonian. 'InAs', 'InSb',
'GaAs' and 'GaSb' selects the defult parameters for these materials.
crystal: {'zincblende','wurtzite','minimal'}
Crystal symmetry along the nanowire growth. 'minimal' is a minimal
model in which the intra-valence band coupling are ignored.
mesh: mesh
If the discretization is homogeneous, mesh=0. Otherwise, mesh
provides a mesh with the position of the sites in the mesh.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
if (params=={} or params=='InAs') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InSb') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 20.4, 8.3, 9.1
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 34.8, 15.5, 16.5
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 6.98, 2.06, 2.93
P, m_eff = 1097.45, 1.0
EF, Ecv, Evv, Ep = 0, -1519, -341, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
Ep=3/(0.063)/(3/np.abs(Ecv)+1/np.abs(Ecv+Evv))
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 13.4, 4.7, 6.0
P, m_eff = 971.3, 1.0
EF, Ecv, Evv, Ep = 0, -812, -760, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InAs') and (crystal=='wurtzite'):
m_eff = 1.0
D1,D2,D3,D4=100.3,102.3,104.1,38.8
A1,A2,A3,A4,A5,A6,A7=-1.5726,-1.6521,-2.6301,0.5126,0.1172,1.3103,-49.04
B1,B2,B3=-2.3925,2.3155,-1.7231
e1,e2=-3.2005,0.6363
P1,P2=838.6,689.87
alpha1,alpha2,alpha3=-1.89,-28.92,-51.17
beta1,beta2=-6.95,-21.71
gamma1,Ec, Ev=53.06,0,-664.9
elif crystal=='minimal' or crystal=='zincblende':
gamma0, gamma1, gamma2, gamma3 = params['gamma0'], params['gamma1'], params['gamma2'], params['gamma3']
P, m_eff = params['P'], params['m_eff']
EF, Ecv, Evv = params['EF'], params['Ecv'], params['Evv']
if crystal=='zincblende':
Ep=(cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
## Make sure that the onsite parameters are arrays:
Nx, Ny = N[0], N[1]
if np.ndim(dis)==0:
dis_x, dis_y = dis, dis
else:
dis_x, dis_y = dis[0], dis[1]
if np.isscalar(mesh):
xi_x, xi_y = np.ones(N), np.ones(N)
elif len(mesh)==2:
xi_x, xi_y = dis_x/mesh[0]*np.ones(N), dis_y/mesh[1]*np.ones(N)
else:
xi_x, xi_y = dis_x/mesh[0], dis_y/mesh[1]
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny))
#Number of bands and sites
m_b = 8 * Nx * Ny
m_s = Nx * Ny
#Obtain the eigenenergies:
tx=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)**2)/cons.e*1e3*(xi_x[1::,:]+xi_x[:-1,:])/2
ty=cons.hbar**2/(2*m_eff*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3*(xi_y[:,1::]+xi_y[:,:-1])/2
txy=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)*(dis_y*1e-9))/cons.e*1e3*np.append(np.zeros((1,Ny)),xi_x[1::,:]+xi_x[:-1,:],axis=0)/2*np.append(np.zeros((Nx,1)),xi_y[:,1::]+xi_y[:,:-1],axis=1)/2
txy=txy[1::,1::]
ax=(xi_x[1::,:]+xi_x[:-1,:])/2/(2*dis_x)
ay=(xi_y[:,1::]+xi_y[:,:-1])/2/(2*dis_y)
e = np.append(2*tx[0,:].reshape(1,Ny),np.append(tx[1::,:]+tx[:-1,:],2*tx[-1,:].reshape(1,Ny),axis=0),axis=0)
em = e - np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
e += np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
ty=np.insert(ty,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
ay=np.insert(ay,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
txy=np.insert(txy,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
e, em, mu, tx, ty = e.flatten(), em.flatten(), mu.flatten(), tx.flatten(), ty.flatten()
ax,ay=ax.flatten(),ay.flatten()
if not(B==0):
x, y = np.zeros(N), np.zeros(N)
if np.isscalar(mesh) and mesh==0:
mesh=np.ones((2,Nx,Ny))*dis[0]
for i in range(Nx):
for j in range(Ny):
x[i,j]=np.sum(mesh[0,0:i+1,j])-(Nx-1)*dis_x/2
y[i,j]=np.sum(mesh[1,i,0:j+1])-(Ny-1)*dis_y/2
for i in range(int((Nx-1)/2)):
x[Nx-i-1,:]=-x[i,:]
x[int((Nx-1)/2),:]=0
x=x/np.abs(x[0,0])*(Nx-1)*dis_x/2
for j in range(int((Ny-1)/2)):
y[:,Ny-j-1]=-y[:,j]
y[:,int((Ny-1)/2)]=0
y=y/np.abs(y[0,0])*(Ny-1)*dis_y/2
fact_B=cons.e/cons.hbar*1e-18
Mx, My = -fact_B*y/2*B, fact_B*x/2*B
Mx_kx, My_ky = (xi_x[1::,:]*Mx[1::,:]+xi_x[:-1,:]*Mx[:-1,:])/2/(2*dis_x), (xi_y[:,1::]*My[:,1::]+xi_y[:,:-1]*My[:,:-1])/2/(2*dis_y)
My_ky=np.insert(My_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mm_kx, Mm_ky = (xi_x[1::,:]*(Mx[1::,:]-1j*My[1::,:])+xi_x[:-1,:]*(Mx[:-1,:]-1j*My[:-1,:]))/2/(2*dis_x), -(xi_y[:,1::]*(Mx[:,1::]+1j*My[:,1::])+xi_y[:,:-1]*(Mx[:,:-1]+1j*My[:,:-1]))/2/(2*dis_y)
Mm_ky=np.insert(Mm_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mx, My = Mx.flatten(), My.flatten()
Mx_kx, My_ky = Mx_kx.flatten(), My_ky.flatten()
Mm_kx, Mm_ky = Mm_kx.flatten(), Mm_ky.flatten()
## Built the Hamiltonian:
if crystal=='zincblende':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
O1=(concatenate(((-1/np.sqrt(3)*(gamma2+2*gamma3))*em,-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(ty*(-1/np.sqrt(3)*(gamma2+2*gamma3))),ty*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3))),1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)))),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
B_s_m=(((Mx**2-My**2-2*1j*Mx*My)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k_m=(concatenate((2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## row 2:
# (2,4)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+2*m_s),np.append(index[1],O1[1][1]+4*m_s))
# (2,7)
args=np.append(args,-np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+7*m_s))
## row 3:
# (3,5)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+3*m_s),np.append(index[1],O1[1][1]+5*m_s))
# (3,6)
args=np.append(args,-np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+3*m_s),np.append(index[1],O1[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+4*m_s),np.append(index[1],O1[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+6*m_s))
# # If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
## row 2:
# (2,7)
args=np.append(args,-np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+7*m_s))
# (2,4)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+2*m_s),np.append(index[1],B_s_m[1][1]+4*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+2*m_s),np.append(index[1],B_k_m[1][1]+4*m_s))
## row 3:
# (3,5)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+3*m_s),np.append(index[1],B_s_m[1][1]+5*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+3*m_s),np.append(index[1],B_k_m[1][1]+5*m_s))
# (3,6)
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+3*m_s),np.append(index[1],B_s_m[1][0]+6*m_s))
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+3*m_s),np.append(index[1],B_k_m[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+4*m_s),np.append(index[1],B_s_m[1][0]+7*m_s))
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+4*m_s),np.append(index[1],B_k_m[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
elif crystal=='wurtzite':
Kc=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
Kp=(concatenate((ay,-ay,-1j*ax,1j*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
Kpc=(concatenate((em,-tx,-tx,ty,ty,-1j*txy[0:-1]/2,1j*txy/2,1j*txy/2,-1j*txy[0:-1]/2)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
### Upper diagonal:
## row 0:
# (0,1)
args=-A5*np.conj(Kpc[0])
index=(Kpc[1][1]+0,Kpc[1][0]+m_s)
# (0,2)
args=np.append(args,1j*(A7-alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+2*m_s))
# (0,4)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+4*m_s))
# (0,6)
args=np.append(args,-(P2-beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+6*m_s))
## row 1:
# (1,2)
args=np.append(args,-1j*(A7+alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+2*m_s))
# (1,3)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+m_s),np.append(index[1],Kp[1][0]+3*m_s))
# (1,5)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+5*m_s))
# (1,6)
args=np.append(args,(P2+beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (1,7)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+7*m_s))
## row 2:
# (2,4)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+2*m_s),np.append(index[1],diagonal(m_s)[1]+4*m_s))
# (2,5)
args=np.append(args,-1j*alpha3*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (2,6)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (2,7)
args=np.append(args, beta2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 3:
# (3,4)
args=np.append(args,-A5*Kpc[0])
index=(np.append(index[0],Kpc[1][0]+3*m_s),np.append(index[1],Kpc[1][1]+4*m_s))
# (3,5)
args=np.append(args,-1j*(A7-alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+5*m_s))
# (3,7)
args=np.append(args,(P2-beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+7*m_s))
## row 4:
# (4,5)
args=np.append(args,1j*(A7+alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (4,6)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+4*m_s),np.append(index[1],diagonal(m_s)[1]+6*m_s))
# (4,7)
args=np.append(args,-(P2+beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,-beta2*Kp[0])
index=(np.append(index[0],Kp[1][0]+5*m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (5,7)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+7*m_s))
## row 6:
# (6,7)
args=np.append(args,-1j*gamma1*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+6*m_s),np.append(index[1],Kp[1][0]+7*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+0),np.append(index[1],Kc[1][1]+0))
# (1,1)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+m_s),np.append(index[1],Kc[1][1]+m_s))
# (2,2)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+2*m_s))
# (3,3)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+3*m_s),np.append(index[1],Kc[1][1]+3*m_s))
# (4,4)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+4*m_s),np.append(index[1],Kc[1][1]+4*m_s))
# (5,5)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+5*m_s))
# (6,6)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+6*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (7,7)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+7*m_s),np.append(index[1],Kc[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate(((D1+D2+Ev)*np.ones(m_s),(D1-D2+Ev)*np.ones(m_s),(Ev)*np.ones(m_s),
(D1+D2+Ev)*np.ones(m_s),(D1-D2+Ev)*np.ones(m_s),(Ev)*np.ones(m_s),
(Ec)*np.ones(m_s),(Ec)*np.ones(m_s)))
elif crystal=='minimal':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,gamma0*T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,gamma0*T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,gamma0*B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,gamma0*B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,gamma0*B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,gamma0*B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
return (H) | 237a706dc3047353b021538a2cf1a75ef6c2768b | 2,344 |
def has_loop(edges, threshold=2):
""" check if a list of edges representing a directed graph contains a loop
args:
edges: list of edge sets representing a directed graph i.e. [(1, 2), (2, 1)]
threshold: min number of nodes contained in loop
returns:
bool
"""
g = nx.DiGraph()
g.add_edges_from(edges)
return any(len(comp) >= threshold for comp in strongly_connected_components(g)) | 5442524bf75fb75cc5639ae4f7412b8935234413 | 2,345 |
def connect(host=None, dbname=None, user=None, password=None, minconn=1,
maxconn=4):
"""
Attempts to connect to Postgres.
"""
if not any((host, dbname, user, password)):
host, dbname, user, password = get_db_env()
if not any((host, dbname, user, password)):
raise Exception('No database connection provided or configured.')
return ThreadedConnectionPool(minconn, maxconn, host=host, dbname=dbname,
user=user, password=password) | f8aea382b473023f5ea280be55a5b463b52eba49 | 2,346 |
def animate( data_cube, slit_data=None, slit_cmap="viridis", raster_pos=None, index_start=None, index_stop=None, interval_ms=50, gamma=0.4, figsize=(7,7), cutoff_percentile=99.9, save_path=None ):
"""
Creates an animation from the individual images of a data cube.
This function can be pretty slow and take 1-2 minutes.
Faster alternatives than matplotlib will be researched in the future.
Parameters
----------
data_cube : iris_data_cube
instance of sji_cube or raster_cube
slit_data : numpy.array
array with shape [n_steps, n_y] that is drawn on the slit for each step
slit_cmap : str
colormap to use for the visualisation of slit_data
raster_pos : int
If not None, only display images at raster postion *raster_pos*
index_start : int
index where to start animation (defaults to None -> will be set to 0)
index_stop : int
index where to stop animation (defaults to None -> will be set to n)
interval_ms : int
number of milliseconds between two frames
gamma : float
gamma correction for plotting: number between 0 (infinitely gamma correction) and 1 (no gamma correction)
figsize : tuple
figure size: (width,height)
cutoff_percentile : float
Often the maximum pixels shine out everything else, even after gamma correction. In order to reduce
this effect, the percentile at which to cut the intensity off can be specified with cutoff_percentile
in a range between 0 and 100.
save_path : str
path to file where animation output will be written to (use .mp4 extension)
Returns
-------
IPython.HTML :
HTML object with the animation
"""
# get number of steps
if raster_pos is None:
n = data_cube.shape[0]
else:
n = data_cube.get_raster_pos_steps( raster_pos )
# set default values for index_start and index_stop
if index_start is None:
index_start=0
if index_stop is None:
index_stop=n
# raise exception if there is a problem with i_start / i_stop
if index_stop > n or index_stop <= index_start:
raise Exception("Please make sure that index_start < index_stop < n_steps")
# release a duration warning
if index_stop-index_start > 100 and ir.config.verbosity_level >= 1:
print( "Creating animation with {} frames (this may take while)".format(index_stop-index_start) )
# initialize plot
fig = plt.figure( figsize=figsize )
image = data_cube.get_image_step( 0, raster_pos ).clip(min=0.01)**gamma
vmax = np.percentile( image, cutoff_percentile )
im = plt.imshow( image, cmap="gist_heat", vmax=vmax, origin='lower', interpolation="none" )
if slit_data is not None:
slit_pos = data_cube.get_slit_pos(0)
line = plt.scatter(
[slit_pos]*image.shape[0], np.arange(image.shape[0]),
c=slit_data[0,:], s=5, cmap=slit_cmap, marker='_',
vmin=np.min(slit_data), vmax=np.max(slit_data)
)
plt.colorbar()
# do nothing in the initialization function
def init():
return im,
# animation function
def animate(i, index_start):
xcenix = data_cube.headers[i+index_start]['XCENIX']
ycenix = data_cube.headers[i+index_start]['YCENIX']
date_obs = data_cube.headers[i+index_start]['DATE_OBS']
im.axes.set_title( "Frame {}: {}\nXCENIX: {:.3f}, YCENIX: {:.3f}".format( i+index_start, date_obs, xcenix, ycenix ) )
im.set_data( data_cube.get_image_step( i+index_start, raster_pos ).clip(min=0.01)**gamma )
if slit_data is not None:
slit_pos = data_cube.get_slit_pos(i)
line_data = np.vstack([[slit_pos]*image.shape[0], np.arange(image.shape[0])]).T
line.set_offsets(line_data)
line.set_array(slit_data[i,:])
return im,
# Call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, lambda i: animate(i, index_start), init_func=init, frames=index_stop-index_start, interval=interval_ms, blit=True)
# Close the plot
plt.close(anim._fig)
# Save animation if requested
if save_path is not None:
anim.save( save_path )
return HTML(anim.to_html5_video()) | af5649258a31280b7cfbca8d14794b0f8e6bd807 | 2,347 |
def tau_profile(ncols,vshifts,vdop,which_line,wave_cut_off=2.0):
"""
Computes a Lyman-alpha Voigt profile for HI or DI given column density,
velocity centroid, and b parameter.
"""
## defining rest wavelength, oscillator strength, and damping parameter
if which_line == 'h1':
lam0s,fs,gammas=1215.67,0.4161,6.26e8
elif which_line == 'd1':
lam0s,fs,gammas=1215.3394,0.4161,6.27e8
elif which_line == 'mg2_h':
lam0s,fs,gammas=2796.3543,6.155E-01,2.625E+08
elif which_line == 'mg2_k':
lam0s,fs,gammas=2803.5315,3.058E-01,2.595E+08
else:
raise ValueError("which_line can only equal 'h1' or 'd1'!")
Ntot=10.**ncols # column density of H I gas
nlam=4000 # number of elements in the wavelength grid
xsections_onesided=np.zeros(nlam) # absorption cross sections as a
# fun<D-O>ction of wavelength (one side of transition)
u_parameter=np.zeros(nlam) # Voigt "u" parameter
nu0s=ccgs/(lam0s*1e-8) # wavelengths of Lyman alpha in frequency
nuds=nu0s*vdop/c_km # delta nus based off vdop parameter
a_parameter = np.abs(gammas/(4.*np.pi*nuds) ) # Voigt "a" parameter -- damping parameter
xsections_nearlinecenter = np.sqrt(np.pi)*(e**2)*fs*lam0s/(me*ccgs*vdop*1e13) # cross-sections
# near Lyman line center
wave_edge=lam0s - wave_cut_off # define wavelength cut off - this is important for the brightest lines and should be increased appropriately.
wave_symmetrical=np.zeros(2*nlam-1) # huge wavelength array centered around a Lyman transition
wave_onesided = np.zeros(nlam) # similar to wave_symmetrical, but not centered
# around a Lyman transition
lamshifts=lam0s*vshifts/c_km # wavelength shifts from vshifts parameter
## find end point for wave_symmetrical array and create wave_symmetrical array
num_elements = 2*nlam - 1
first_point = wave_edge
mid_point = lam0s
end_point = 2*(mid_point - first_point) + first_point
wave_symmetrical = np.linspace(first_point,end_point,num=num_elements)
wave_onesided = np.linspace(lam0s,wave_edge,num=nlam)
freq_onesided = ccgs / (wave_onesided*1e-8) ## convert "wave_onesided" array to a frequency array
u_parameter = (freq_onesided-nu0s)/nuds ## Voigt "u" parameter -- dimensionless frequency offset
xsections_onesided=xsections_nearlinecenter*voigt.voigt(a_parameter,u_parameter) ## cross-sections
# single sided
## can't do symmetrical
xsections_onesided_flipped = xsections_onesided[::-1]
## making the cross-sections symmetrical
xsections_symmetrical=np.append(xsections_onesided_flipped[0:nlam-1],xsections_onesided)
deltalam=np.max(wave_symmetrical)-np.min(wave_symmetrical)
dellam=wave_symmetrical[1]-wave_symmetrical[0]
nall=np.round(deltalam/dellam)
wave_all=deltalam*(np.arange(nall)/(nall-1))+wave_symmetrical[0]
tau_all = np.interp(wave_all,wave_symmetrical+lamshifts,xsections_symmetrical*Ntot)
return wave_all,tau_all | bc420a23397650ed19623338a7d45068621218e8 | 2,348 |
import logging
import sys
import requests
import os
def main(args):
"""
chandl's entry point.
:param args: Command-line arguments, with the program in position 0.
"""
args = _parse_args(args)
# sort out logging output and level
level = util.log_level_from_vebosity(args.verbosity)
root = logging.getLogger()
root.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
root.addHandler(handler)
if level != logging.DEBUG:
requests.packages.urllib3.disable_warnings()
logger.debug(args)
try:
thread = Thread.from_url(args.url)
except (ValueError, IOError) as e:
_print_error('Error retrieving thread: {0}'.format(e))
return 1
posts = thread.posts
logger.debug('Thread contains %d posts', len(posts))
posts = _remove_unwanted(posts, args)
logger.debug('Will download %d posts', len(posts))
# check whether we still have anything to do
if not posts:
print('All files are either filtered out or excluded')
return 0
# use the first post to validate the --name
try:
post = posts[0]
args.name.format(**post.__dict__)
except KeyError as e:
_print_error('Invalid file name specifier: {0}'.format(e))
return 2
# set an appropriate thread_dir if one was not specified
if not args.thread_dir:
args.thread_dir = util.make_filename(thread.title)
# create --thread-dir
write_dir = os.path.abspath(os.path.join(args.output_dir, args.thread_dir))
if not os.path.isdir(write_dir):
try:
os.mkdir(write_dir, 0o700)
except OSError as e:
_print_error(
'Failed to create the thread directory at {0}: {1}'.format(
write_dir, e))
return 3
# show a relative path is there is a common directory (below root) between
# the `pwd` and the write_dir, otherwise show the absolute path
display_path = write_dir \
if os.path.dirname(os.path.commonprefix([write_dir,
os.getcwd()])) == '/' \
else os.path.relpath(write_dir, os.getcwd())
# download the files
print('Saving \'{0}\' to \'{1}\''.format(thread.title, display_path))
downloader = Downloader(write_dir, args.name, args.parallelism)
print(downloader.download(posts, level >= logging.WARNING))
return 0 | 57bdf9d8a9da0725192b5f9f5e539d35a0fcad47 | 2,349 |
def welcome_page():
""" On-boarding page
"""
g.project.update_on_boarding_state()
if g.project.on_boarding['import']:
return redirect(url_for('data_manager_blueprint.tasks_page'))
return flask.render_template(
'welcome.html',
config=g.project.config,
project=g.project,
on_boarding=g.project.on_boarding
) | 4be4f1447a732a24ecfbee49c3daa7a31a16fec4 | 2,350 |
def brokerUrl(host):
"""We use a different brokerUrl when running the workers than when
running within the flask app. Generate an appropriate URL with that in
mind"""
return '{broker_scheme}://{username}:{password}@{host}:{port}//'.format(
host=host, **CONFIG_JOB_QUEUE) | e55d84f818b17680e196b6c013dd1b3972c30df8 | 2,351 |
def show_lat_lon_gps(
move_data,
kind='scatter',
figsize=(21, 9),
plot_start_and_end=True,
return_fig=True,
save_fig=False,
name='show_gps_points.png',
):
"""
Generate a visualization with points [lat, lon] of dataset.
Parameters
----------
move_data : pymove.core.MoveDataFrameAbstract subclass.
Input trajectory data.
kind : String, optional, default 'scatter'.
Represents chart type_.
figsize : tuple, optional, default (21,9).
Represents dimensions of figure.
plot_start_and_end: boolean
Whether to feature the start and end of the trajectory
return_fig : bool, optional, default True.
Represents whether or not to save the generated picture.
save_fig : bool, optional, default True.
Represents whether or not to save the generated picture.
name : String, optional, default 'show_gps_points.png'.
Represents name of a file.
Returns
-------
matplotlib.pyplot.figure or None
The generated picture.
"""
try:
if LATITUDE in move_data and LONGITUDE in move_data:
fig = move_data.drop_duplicates([LATITUDE, LONGITUDE]).plot(
kind=kind, x=LONGITUDE, y=LATITUDE, figsize=figsize
)
if plot_start_and_end:
plt.plot(
move_data.iloc[0][LONGITUDE],
move_data.iloc[0][LATITUDE],
'yo',
markersize=10,
) # start point
plt.plot(
move_data.iloc[-1][LONGITUDE],
move_data.iloc[-1][LATITUDE],
'yX',
markersize=10,
) # end point
if save_fig:
plt.savefig(name)
if return_fig:
return fig
except Exception as exception:
raise exception | c34f8e868668d01d4214b9a5678440266f8f9a0a | 2,352 |
def filter_seqlets(seqlet_acts, seqlet_intervals, genome_fasta_file, end_distance=100, verbose=True):
""" Filter seqlets by valid chromosome coordinates. """
# read chromosome lengths
chr_lengths = {}
genome_fasta_open = pysam.Fastafile(genome_fasta_file)
for chrom in genome_fasta_open.references:
chr_lengths[chrom] = genome_fasta_open.get_reference_length(chrom)
genome_fasta_open.close()
# check coordinates
filter_mask = np.zeros(len(seqlet_intervals), dtype='bool')
for si, seq_int in enumerate(seqlet_intervals):
left_valid = (seq_int.start > end_distance)
right_valid = (seq_int.end + end_distance < chr_lengths[seq_int.chr])
filter_mask[si] = left_valid and right_valid
if verbose:
print('Removing %d seqlets near chromosome ends.' % (len(seqlet_intervals) - filter_mask.sum()))
# filter
seqlet_acts = seqlet_acts[filter_mask]
seqlet_intervals = [seq_int for si, seq_int in enumerate(seqlet_intervals) if filter_mask[si]]
return seqlet_acts, seqlet_intervals | 6ab32cc2667f0b2e7b9d0dc15f1d3ca7ea2ebe46 | 2,353 |
def load_RIMO(path, comm=None):
"""
Load and broadcast the reduced instrument model,
a.k.a. focal plane database.
"""
# Read database, parse and broadcast
if comm is not None:
comm.Barrier()
timer = Timer()
timer.start()
RIMO = {}
if comm is None or comm.rank == 0:
print("Loading RIMO from {}".format(path), flush=True)
hdulist = pf.open(path, "readonly")
detectors = hdulist[1].data.field("detector").ravel()
phi_uvs = hdulist[1].data.field("phi_uv").ravel()
theta_uvs = hdulist[1].data.field("theta_uv").ravel()
psi_uvs = hdulist[1].data.field("psi_uv").ravel()
psi_pols = hdulist[1].data.field("psi_pol").ravel()
epsilons = hdulist[1].data.field("epsilon").ravel()
fsamples = hdulist[1].data.field("f_samp").ravel()
fknees = hdulist[1].data.field("f_knee").ravel()
alphas = hdulist[1].data.field("alpha").ravel()
nets = hdulist[1].data.field("net").ravel()
fwhms = hdulist[1].data.field("fwhm").ravel()
for i in range(len(detectors)):
phi = (phi_uvs[i]) * degree
theta = theta_uvs[i] * degree
# Make sure we don't double count psi rotation already
# included in phi
psi = (psi_uvs[i] + psi_pols[i]) * degree - phi
quat = np.zeros(4)
# ZYZ conversion from
# http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19770024290.pdf
# Note: The above document has the scalar part of the quaternion at
# first position but quaternionarray module has it at the end, we
# use the quaternionarray convention
# scalar part:
quat[3] = np.cos(0.5 * theta) * np.cos(0.5 * (phi + psi))
# vector part
quat[0] = -np.sin(0.5 * theta) * np.sin(0.5 * (phi - psi))
quat[1] = np.sin(0.5 * theta) * np.cos(0.5 * (phi - psi))
quat[2] = np.cos(0.5 * theta) * np.sin(0.5 * (phi + psi))
# apply the bore sight rotation to the detector quaternion
quat = qa.mult(SPINROT, quat)
RIMO[detectors[i]] = DetectorData(
detectors[i],
phi_uvs[i],
theta_uvs[i],
psi_uvs[i],
psi_pols[i],
epsilons[i],
fsamples[i],
fknees[i],
alphas[i],
nets[i],
fwhms[i],
quat,
)
hdulist.close()
if comm is not None:
RIMO = comm.bcast(RIMO, root=0)
if comm is None or comm.rank == 0:
timer.report_clear("Load and broadcast RIMO")
return RIMO | 67f2cd7f1a345a801ac65dcb54347863c6c7c64a | 2,354 |
def format_top(data):
"""
Format "top" output
:param data: dict
:return: list
"""
result = []
if data:
if 'Titles' in data:
result.append(data['Titles'])
if 'Processes' in data:
for process in data['Processes']:
result.append(process)
result = tabulate(result, headers='firstrow').split('\n')
return result | 6fb5a4a18f8a87ee8bfffd30da1d9829024f987b | 2,355 |
def process_arguments(parser):
"""This function parses the input arguments."""
args = parser.parse_args()
# Distribute input arguments
request = args.request
if "num_tests" in args:
num_tests = int(args.num_tests)
else:
num_tests = None
# Test validity of input arguments
if request not in ["check", "create"]:
raise AssertionError()
if num_tests not in [i for i in np.arange(1001)]:
raise AssertionError(9)
return request, num_tests | 19bb444ff578cc92bb99685e4405a25b8f12eaba | 2,356 |
def generate_sections(logdata: pd.DataFrame):
"""
Generates a list of SectionDescriptors based on iMotions packets
SlideStart and SlideEnd.
If the first Slide related packet is an End packet, the first
descriptor will include all timestamps up to that packet, else it
will drop the packets before.
The last descriptor will include all packets until end.
Assumes that there are SlideStart and SlideEnd packages in data.
"""
slide_start = logdata.Name == 'SlideStart'
slide_end = logdata.Name == 'SlideEnd'
slides = slide_start | slide_end
log_slides = logdata[slides]
time_diffs = log_slides.Timestamp.diff()
sections = []
if log_slides.iloc[0].Name == 'SlideStart':
# Bootstrap condition
sections.append(SectionDescriptor(shortcut.row_index(log_slides.head(1))))
for label, timediff in time_diffs.iteritems():
if not sections:
# If first packet is a SlideEnd, we include all data before
sections.append(SectionDescriptor(0, label, logdata.loc[label].Timestamp))
elif not sections[-1].end:
sections[-1].end = label
sections[-1].duration = timediff
else:
sections.append(SectionDescriptor(label))
last_row = logdata.tail(1).Timestamp
last_label = shortcut.row_index(last_row)
last_timestamp = last_row.values[0]
sections[-1].end = last_label
sections[-1].duration = logdata.loc[last_label].Timestamp - logdata.loc[sections[-1].start].Timestamp
return sections | 3f3d009965449a54a43d4751f15066b798c335d7 | 2,357 |
def etree2dict(element):
"""Convert an element tree into a dict imitating how Yahoo Pipes does it.
"""
i = dict(element.items())
i.update(_make_content(i, element.text, strip=True))
for child in element:
tag = child.tag
value = etree2dict(child)
i.update(_make_content(i, value, tag))
if element.text and not set(i).difference(['content']):
# element is leaf node and doesn't have attributes
i = i.get('content')
return i | e0c14295fb0d8459b7ab3be300213c6a99a43e5e | 2,358 |
def full_chain():
"""
GETing `/chain` will returns the full blockchain.
Returns:
The node's full blockchain list, as a JSON response.
"""
logger.info("Received GET request for the full chain")
return {
"chain": blockchain.chain,
"length": len(blockchain.chain),
} | 9987c95270cbdd89b9578cd987d4be29a2d60433 | 2,359 |
from click import _bashcomplete
def _patched_is_incomplete_option(all_args, cmd_param):
"""Patched version of is_complete_option.
Fixes issue testing a cmd param against the current list of
args. Upstream version does not consider combined short form args
and so a command like `guild check -nt <auto>` doesn't work. The
patched version considers that `t` above is the current param
option.
"""
if not isinstance(cmd_param, _bashcomplete.Option):
return False
if cmd_param.is_flag:
return False
last_option = None
for index, arg_str in enumerate(
reversed([arg for arg in all_args if arg != _bashcomplete.WORDBREAK])
):
if index + 1 > cmd_param.nargs:
break
if _bashcomplete.start_of_option(arg_str):
last_option = arg_str
if not last_option:
return False
if last_option[:2] == "--":
return last_option in cmd_param.opts
assert last_option[:1] == "-", last_option
for i in range(len(last_option), 0, -1):
if "-%s" % last_option[i:] in cmd_param.opts:
return True
return False | f92fa77c7cfa74fec79f853ba948028951b1f736 | 2,360 |
def confirm_install() -> bool:
"""
Confirms that update should be performed on an empty install
"""
message = (
"The pack you are trying to update doesn't have a pack-manifest.json file. "
"Unless you are doing a first install, *THIS SHOULD NOT HAPPEN*. If you are doing a first install, just click 'OK'\n\n"
"Your pack is currently broken from MPM point of view, but should still run."
"\nIf you proceed, the udpate process will duplicate mods and add conflicting overrides:"
" this *WILL BREAK* your pack for minecraft too. It is advised to cancel"
)
root = tk.Tk()
root.withdraw()
try:
return mbox.askokcancel(title="Confirm Update", message=message)
finally:
root.destroy() | 2963e9913c56623064b712e817e6c768a8f600c3 | 2,361 |
def f_cv(x, dt):
""" state transition function for a
constant velocity aircraft"""
F = np.array([[1, dt, 0.5*dt*dt, 0, 0, 0],
[0, 1, dt, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, dt, 0.5*dt*dt],
[0, 0, 0, 0, 1, dt],
[0, 0, 0, 0, 0, 1]], dtype=float)
return np.dot(F, x) | cfb142101598eaa635a6c4a4e45d411043cd99b9 | 2,362 |
import getpass
def prompt_for_password(args):
"""
if no password is specified on the command line, prompt for it
"""
if not args.password:
args.password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
return args | 22fdb01fa07a83e53f0544a23e6ad014f7b21f88 | 2,363 |
def update_hparams(hparams, new_hparams):
""" Update existing with new hyperparameters """
if new_hparams is None:
return hparams
if isinstance(new_hparams, str) and new_hparams.endswith('.json'):
tf.logging.info("Overriding default hparams from JSON")
with open(new_hparams) as fh:
hparams.parse_json(fh.read())
elif isinstance(new_hparams, str):
tf.logging.info("Overriding default hparams from str:")
hparams.parse(new_hparams)
elif isinstance(new_hparams, dict):
tf.logging.info("Overriding default hparams from dict:")
for k, val in new_hparams.items():
if k in hparams:
tf.logging.info(" {} -> {}".format(k, val))
hparams.set_hparam(k, val)
elif isinstance(new_hparams, Namespace):
tf.logging.info("Overriding default hparams from Namespace:")
for k, val in vars(new_hparams).items():
if k in hparams and val is not None:
tf.logging.info(" {} -> {}".format(k, val))
hparams.set_hparam(k, val)
else:
raise ValueError(new_hparams)
return hparams | fa134d1c5b8d9cf406fc9e3133303b9897a59970 | 2,364 |
from typing import Any
import importlib
def relative_subpackage_import(path: str, package: str) -> Any:
"""[summary]
Args:
path (str): [description]
package (str): [description].
Returns:
Any: [description]
"""
if not path.startswith('.'):
path = '.' + path
return importlib.import_module(path, package = package) | 2345267b60947f57098b0678dce845d858f2d2a8 | 2,365 |
def convertToNpArray(train,test):
"""
Converts the data into numpy arrays
:param train: training data csv path
:param test: test data csv path
:return: training data and labels, test data and labels
"""
train_data = pd.read_csv(train, delimiter=',', quotechar='"',
dtype=None, encoding="ISO-8859-1",
usecols=[0, 5])
train_array = create_train_data_subset(train_data)
np.random.shuffle(train_array)
train_target_array = train_array[:, 0]
train_target_array = np.reshape(train_target_array, (len(train_target_array), 1))
train_data_array = train_array[:, 1]
train_data_array = np.reshape(train_data_array, (len(train_data_array), 1))
test_data = pd.read_csv(test, delimiter=',', quotechar='"',
dtype=None, encoding="ISO-8859-1",
usecols=[0, 5], names=['label', 'tweet'])
test_data = test_data[test_data.label != 2]
test_data = test_data.values
test_data = np.append(test_data, create_test_data_subset(train_data), axis=0)
np.random.shuffle(test_data)
test_target = test_data[:, 0]
test_target_array = np.array(test_target)
test_target_array = np.reshape(test_target_array, (len(test_target_array), 1))
test_data = test_data[:, 1]
test_data_array = np.reshape(test_data, (len(test_data), 1))
return train_data_array,test_data_array,train_target_array,test_target_array | bc375946802e6dc75f9f52cd7b1665fe820287e7 | 2,366 |
import json
def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get(
attachment_outcome_str)
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name,
num_dimensions=len(dimensions)
)
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome
)
return measurement | ff758924e1dfab00c49e97eefe6548a0de73c257 | 2,367 |
import torch
def similarity_iou_2d(pred_boxes, true_boxes):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (cx, cy, w, h) format.
Arguments:
pred_boxes (Tensor[B, 4, N])
true_boxes (Tensor[B, 4, M])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
def area(boxes):
return (boxes[:, :, 2] - boxes[:, :, 0]) * (boxes[:, :, 3] - boxes[:, :, 1])
pred_boxes = convert_to_corners(pred_boxes).transpose(1, 2) # BN4
true_boxes = convert_to_corners(true_boxes).transpose(1, 2) # BN4
area1 = area(pred_boxes) # BN
area2 = area(true_boxes) # BM
lt = torch.max(pred_boxes[:,:, None, :2], true_boxes[:,:, :2]) # BNM2
rb = torch.min(pred_boxes[:,:, None, 2:], true_boxes[:,:, 2:]) # BNM2
wh = (rb - lt).clamp(min=0) # BNM2
inter = wh[:, :, :, 0] * wh[:, :, :, 1] # BNM
iou = inter / (area1[:, :, None] + area2 - inter) # BNM
return iou | 7081906483860e475fbf7ad3c7801182ba8a5efd | 2,368 |
def get_atom_coords_by_names(residue, atom_names):
"""Given a ProDy Residue and a list of atom names, this attempts to select and return
all the atoms.
If atoms are not present, it substitutes the pad character in lieu of their
coordinates.
"""
coords = []
pad_coord = np.asarray([GLOBAL_PAD_CHAR] * 3)
for an in atom_names:
a = residue.select(f"name {an}")
if a:
coords.append(a.getCoords()[0])
else:
coords.append(pad_coord)
return coords | 7838ab6352a37b3731ee81f7beeebfde7676b24a | 2,369 |
def calculate_chord(radius, arc_degrees):
"""
Please see the wikipedia link for more information on how this works.
https://en.wikipedia.org/wiki/Chord_(geometry)
"""
# Calculate the arc_degrees in radians.
# We need this because sin() expects it.
arc_radians = radians(arc_degrees)
# Calculate the chord.
return radius * (2 * sin(arc_radians / 2)) | be59a06d33e69d5232fdea1cbdf6c5cef20f30fc | 2,370 |
def broadcast(right, left, left_fk=None, right_pk=None, keep_right_index=False):
"""
Re-indexes a series or data frame (right) to align with
another (left) series or data frame via foreign key relationship.
The index or keys on the right must be unique (i.e. this only supports
1:1 or 1:m relationhips between the right and left).
Parameters:
-----------
right: pandas.DataFrame or pandas.Series
Columns or set of columns to re-project(broadcast) from.
left: pandas.Series, pandas.Index or pandas.DataFrame
Object to align to.
if panadas.Series:
Series values are used as the foreign keys.
if pandas.Index:
The index will be used as the foreign keys.
if pandas.DataFrame
Use the 'left_fk` argument to specify one
or more columns to serve as the foreign keys.
left_fk: str or list of str
Only applicable if 'left' is a dataframe.
Column or list of columns to serve as foreign keys.
If not provided the `left's` index will be used.
right_pk: str or list of str, default None
Column or list of columns that uniquely define each row
in the the `right`. If not provided, the `right's` index will be
used.
keep_right_index: bool, optional, default False
If True, and the `right` is a data frame, and a `right_pk` arg is provided,
then column(s) containing the `right's` index values will be
appended to the result.
Returns:
--------
pandas.Series or pandas.DataFrame with column(s) from
right aligned with the left.
"""
update_index = True
# if we're broadcasting using a data frame , we need to know which column(s)
if isinstance(left, pd.DataFrame) and left_fk is None:
raise ValueError(
'If the left is a DataFrame, must supply the left_fk (column name to join on)')
# if right primary keys are explicitly provided
if right_pk:
if keep_right_index:
right = right.reset_index()
right.set_index(right_pk, inplace=True)
else:
right = right.set_index(right_pk)
# ensure that we can align correctly
if not right.index.is_unique:
raise ValueError("The right's index must be unique!")
# decide how to broadcast based on the type of left provided
if isinstance(left, pd.Index):
update_index = False
# for cases where a left_fk is provided as a list with a single element
if left_fk:
if isinstance(left_fk, list):
if len(left_fk) == 1:
left_fk = left_fk[0]
if isinstance(left, pd.DataFrame):
if left_fk:
left = left[left_fk]
else:
left = left.index
update_index = False
# reindex
a = right.reindex(left)
# update the index if necessary
if update_index:
a.index = left.index.copy()
return a | 0be603caec1530427399a7b816eb63bbad71f239 | 2,371 |
def _is_arraylike(arr):
"""Check if object is an array."""
return (
hasattr(arr, "shape")
and hasattr(arr, "dtype")
and hasattr(arr, "__array__")
and hasattr(arr, "ndim")
) | 71bfbb7f93116879ee63bb4fc1ad8b3a3d8807c3 | 2,372 |
def tokenize_docstring(text):
"""Tokenize docstrings.
Args:
text: A docstring to be tokenized.
Returns:
A list of strings representing the tokens in the docstring.
"""
en = spacy.load('en')
tokens = en.tokenizer(text.decode('utf8'))
return [token.text.lower() for token in tokens if not token.is_space] | c58c1dcf62d2b3de1f947cfd33f2c344b03532fb | 2,373 |
def conv_output_length(input_length, filter_size,
border_mode, stride, dilation=1):
"""Determines output length of a convolution given input length.
# Arguments
input_length: integer.
filter_size: integer.
border_mode: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
# Returns
The output length (integer).
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
elif border_mode == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride | 9e7f44b44e582f140dafdfd48c363d80fe8c4a46 | 2,374 |
import os
def source_ccp4():
"""Function to return bash command to source CCP4"""
if os.name == "nt":
return
return "source {}".format(os.path.join(os.environ["CCP4"], "bin", "ccp4.setup-sh")) | 7b3f2920906ff4e6b680e4696a66a67e56c72d03 | 2,375 |
def dblHour():
"""(read-only) Array of doubles containgin time value in hours for time-sampled monitor values; Empty if frequency-sampled values for harmonics solution (see dblFreq)"""
return get_float64_array(lib.Monitors_Get_dblHour) | a437965873c7764e2ddbebb81163887f6ddfca07 | 2,376 |
def select_uuid_like_indexes_on_table(model, cursor):
"""
Gets a list of database index names for the given model for the
uuid-containing fields that have had a like-index created on them.
:param model: Django model
:param cursor: database connection cursor
:return: list of database rows; the first field of each row is an index
name
"""
# VersionedForeignKey fields as well as the id fields have these useless
# like indexes
field_names = ["'%s'" % f.column for f in model._meta.fields if
isinstance(f, VersionedForeignKey)]
field_names.append("'id'")
sql = """
select i.relname as index_name
from pg_class t,
pg_class i,
pg_index ix,
pg_attribute a
where t.oid = ix.indrelid
and i.oid = ix.indexrelid
and a.attrelid = t.oid
and a.attnum = ANY(ix.indkey)
and t.relkind = 'r'
and t.relname = '{0}'
and a.attname in ({1})
and i.relname like '%_like'
""".format(model._meta.db_table, ','.join(field_names))
cursor.execute(sql)
return cursor.fetchall() | c63cbf2e45a6c6e2591e627781b92a93cd101e27 | 2,377 |
def retrieve_jambalaya(request):
"""
Retrieve a jambalaya recipe by name or country of origin
---
serializer: JambalayaSerializer
parameters:
- name: name
description: name as found in recipe
type: string
paramType: query
required: false
- name: origin
type: string
paramType: query
required: false
"""
if request.method == 'GET':
serializer = JambalayaQuerySerializer(data=request.DATA)
if serializer.data['name'] is not None:
j = Jambalaya.objects.filter(recipe__contains='name=%s' % serializer.data['name'])
else:
j = Jambalaya.objects.filter(recipe__contains="country=%s" % serializer.data['origin'])
serializer = JambalayaSerializer(j, many=True)
return Response(serializer.data)
else:
return Response("", status=status.HTTP_400_BAD_REQUEST) | 2263c091b172d9fce5698b40647484e06d2d37bb | 2,378 |
from typing import Union
def get_pymatgen(optimade_structure: OptimadeStructure) -> Union[Structure, Molecule]:
"""Get pymatgen `Structure` or `Molecule` from OPTIMADE structure.
This function will return either a pymatgen `Structure` or `Molecule` based
on the periodicity or periodic dimensionality of OPTIMADE structure.
For bulk, three-dimensional structures, a pymatgen `Structure` is returned.
This means, if the [`dimension_types`][optimade.models.structures.StructureResourceAttributes.dimension_types]
attribute is comprised of all `1`s (or [`Periodicity.PERIODIC`][optimade.models.structures.Periodicity.PERIODIC]s).
Otherwise, a pymatgen `Molecule` is returned.
Parameters:
optimade_structure: OPTIMADE structure.
Returns:
A pymatgen `Structure` or `Molecule` based on the periodicity of the
OPTIMADE structure.
"""
if "optimade.adapters" in repr(globals().get("Structure")):
warn(PYMATGEN_NOT_FOUND, AdapterPackageNotFound)
return None
if all(optimade_structure.attributes.dimension_types):
return _get_structure(optimade_structure)
return _get_molecule(optimade_structure) | 7cb4dbd9395b57931a27db87516c587062e65bb3 | 2,379 |
import torch
def get_meshgrid_samples(lower, upper, mesh_size: tuple, dtype) ->\
torch.Tensor:
"""
Often we want to get the mesh samples in a box lower <= x <= upper.
This returns a torch tensor of size (prod(mesh_size), sample_dim), where
each row is a sample in the meshgrid.
"""
sample_dim = len(mesh_size)
assert (len(upper) == sample_dim)
assert (len(lower) == sample_dim)
assert (len(mesh_size) == sample_dim)
meshes = []
for i in range(sample_dim):
meshes.append(
torch.linspace(lower[i], upper[i], mesh_size[i], dtype=dtype))
mesh_tensors = torch.meshgrid(*meshes)
return torch.cat(
[mesh_tensors[i].reshape((-1, 1)) for i in range(sample_dim)], dim=1) | 98a2c7b064d7b23824b547d0fc0a16eb37cb0923 | 2,380 |
from datetime import datetime
import random
def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes,
colors):
"""
Function to return the html-string of the node drawings for the
gantt chart
Parameters
----------
start : datetime.datetime obj
start time for first node
nodes_list : list
a list of the node dictionaries
cores : integer
the number of cores given to the workflow via the 'n_procs'
plugin arg
total_duration : float
total duration of the workflow execution (in seconds)
minute_scale : integer
the scale, in minutes, at which to plot line markers for the
gantt chart; for example, minute_scale=10 means there are lines
drawn at every 10 minute interval from start to finish
space_between_minutes : integer
scale factor in pixel spacing between minute line markers
colors : list
a list of colors to choose from when coloring the nodes in the
gantt chart
Returns
-------
result : string
the html-formatted string for producing the minutes-based
time line markers
"""
# Init variables
result = ""
scale = space_between_minutes / minute_scale
space_between_minutes = space_between_minutes / scale
end_times = [
datetime.datetime(
start.year, start.month, start.day, start.hour, start.minute,
start.second
)
for core in range(cores)
]
# For each node in the pipeline
for node in nodes_list:
# Get start and finish times
node_start = node["start"]
node_finish = node["finish"]
# Calculate an offset and scale duration
offset = (
(node_start - start).total_seconds() / 60
) * scale * space_between_minutes + 220
# Scale duration
scale_duration = (node["duration"] / 60) * scale \
* space_between_minutes
if scale_duration < 5:
scale_duration = 5
scale_duration -= 2
# Left
left = 60
for core in range(len(end_times)):
if end_times[core] < node_start:
left += core * 30
end_times[core] = datetime.datetime(
node_finish.year,
node_finish.month,
node_finish.day,
node_finish.hour,
node_finish.minute,
node_finish.second,
)
break
# Get color for node object
color = random.choice(colors)
if "error" in node:
color = "red"
# Setup dictionary for node html string insertion
node_dict = {
"left": left,
"offset": offset,
"scale_duration": scale_duration,
"color": color,
"node_name": node.get("name", node.get("id", "")),
"node_dur": node["duration"] / 60.0,
"node_start": node_start.strftime("%Y-%m-%d %H:%M:%S"),
"node_finish": node_finish.strftime("%Y-%m-%d %H:%M:%S"),
}
# Create new node string
new_node = (
"<div class='node' style='left:%(left)spx;top:%(offset)spx;"
"height:%(scale_duration)spx;background-color:%(color)s;'"
"title='%(node_name)s\nduration:%(node_dur)s\n"
"start:%(node_start)s\nend:%(node_finish)s'></div>" % node_dict
)
# Append to output result
result += new_node
# Return html string for nodes
return result | 83ed57f8d494154d815ec189c002ff86393b2992 | 2,381 |
def has_understood_request(
sys_nlu: dict, slot: str, domain: str, lowercase_slots: bool = True
) -> bool:
"""Check if the system has understood a user request in a particular domain."""
# assume perfect system if NLU not available
if not sys_nlu:
return True
sys_nlu_requested = get_turn_action_params(
sys_nlu,
act_patterns=metadata.REQUEST_ACT_PATTERNS,
service_patterns=[domain],
include_values=False,
use_lowercase=lowercase_slots,
)[
domain
] # type: list[str]
assert all("-" not in slt for slt in sys_nlu_requested)
sys_nlu_requested = [f"{domain}-{slt}" for slt in sys_nlu_requested]
return slot in sys_nlu_requested | 676e11fe996bd88562d1e1404dd1bdd29f3e7d62 | 2,382 |
def lengthOfLongestSubstring(s):
"""
:type s: str
:rtype: int
"""
res = ""
n = 0
for i in s:
if i not in res:
res = res + i
else:
indexofi = res.find(i)
res = res[indexofi+1::] + i
k = len(res)
if k > n:
n = k
print(res)
return n | 951366d46a47030c5d37026bd6712eeb73c34af9 | 2,383 |
async def get_sequence_metadata(checksum: str, accept: str = ""):
"""Return Refget sequence metadata based on checksum value."""
headers = Headers()
url_path = "sequence/" + checksum + "/metadata"
try:
result = await create_request_coroutine(
url_list=metadata_url_list(checksum),
url_path=url_path,
headers=headers,
params={accept: accept},
)
if result == "":
return HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Not Found")
return result
except Exception as e:
logger.log("DEBUG", "Unhandled exception in get_sequence_metadata: " + str(e)) | afe591946644d4723142971519926fcacb2a8aa4 | 2,384 |
from functools import reduce
def getattrs(o, *attrs, **kwargs):
"""
>>> getattrs((), '__iter__', '__name__', 'strip')('_')
'iter'
>>> getattrs((), 'foo', 'bar', default=0)
0
"""
if 'default' in kwargs:
default = kwargs['default']
c = o
for attr in attrs:
try:
c = getattr(c, attr)
except AttributeError:
return default
return c
else:
return reduce(getattr, attrs, o) | 64d55154d2399c7097476a8335eae81749588286 | 2,385 |
def maria_create_account(params):
"""root user and dbuser are created at startup.
grant all to dbuser is all we need to do after the DB starts
:type params: dict
"""
error_msg = 'ERROR: mariadb_util; maria_create_account; '
error_msg += 'action: %s user: %s error: %s'
password = Config.accounts[params['dbtype']]['admin_pass']
iport = int(params['port'])
try:
conn = pymysql.connect(host=Config.container_host, port=iport,
user='root',
password=password)
except pymysql.err.OperationalError as e:
print("ERROR: maria_create_account, connect: %s" % e)
return "connect error"
cur = conn.cursor()
sql_cmd = "GRANT ALL PRIVILEGES ON *.* TO '%s'@'%%' " % params['dbuser']
sql_cmd += "WITH GRANT OPTION"
try:
cur.execute(sql_cmd)
except pymysql.err.InternalError as e:
print(error_msg % ('grant', params['dbuser'], e))
conn.commit()
cur.close()
conn.close()
return 'ok' | e26ba5044689f772b93cdd426d9d2995547ada3a | 2,386 |
def compute_coef_xz(y_val, coef_3d):
"""
compute the 2D polynoimal coefficients for a given x
:param x_val: value of x
:param coef_3d: the original 3D polynomials
:return:
"""
coef_xz = np.zeros((coef_3d.shape[1], coef_3d.shape[2]), dtype=coef_3d.dtype)
max_degree_y = coef_3d.shape[0] - 1
for y_power in range(max_degree_y + 1):
coef_xz += coef_3d[y_power, :, :] * y_val ** (max_degree_y - y_power)
return coef_xz | f65353f41bc142b16578750d15a098ae03458fd1 | 2,387 |
def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious | 665e9478b66b398536de48b250ee21aec16f4be0 | 2,388 |
def me_length_filter(me_iv_pairs, min_length=100):
"""Returns list of (InsertionVertices, InsertionVertices) tuples
with those containing paths going backwards through the ME sequence
filtered out
"""
filtered = []
for iv_pair in me_iv_pairs:
enter_iv, exit_iv = iv_pair
me_seq_len = exit_iv.exit_ref.pos - enter_iv.enter_ref.pos
if me_seq_len > min_length:
filtered.append(iv_pair)
return filtered | 9c344ee913f60aace3b8d94d04500d95166e67d6 | 2,389 |
def build_big_map_schema(data, schema: Schema) -> BigMapSchema:
""" Generate Big_map schema from the contract storage
:param data: Raw storage (Micheline expression)
:param schema: Storage schema
:returns: Mappings: Big_map id to JSON path and vice versa
:rtype: BigMapSchema
"""
bin_to_id = dict()
id_to_bin = dict()
def scan_big_map_ids(node, path):
if len(path) == 0:
assert node.get('int'), (node, path)
yield int(node['int'])
elif isinstance(node, list):
for item in node:
yield from scan_big_map_ids(item, path)
else:
assert node.get('args'), (node, path)
yield from scan_big_map_ids(node['args'][int(path[0])], path[1:])
for bin_path, prim in schema.bin_types.items():
if prim == 'big_map':
for big_map_id in scan_big_map_ids(data, bin_path[1:]):
bin_to_id[bin_path], id_to_bin[big_map_id] = big_map_id, bin_path
return BigMapSchema(bin_to_id, id_to_bin) | 370936598263897e5c0b2416e44bccfdfb151e6c | 2,390 |
import requests
def _get(session, urlTail):
# type: (Session, str) -> Dict
"""Make an HTTP(s) GET request to Batfish coordinator.
:raises SSLError if SSL connection failed
:raises ConnectionError if the coordinator is not available
"""
headers = {CoordConsts.HTTP_HEADER_BATFISH_APIKEY: session.apiKey,
CoordConsts.HTTP_HEADER_BATFISH_VERSION: pybatfish.__version__}
url = session.get_base_url2() + urlTail
response = requests.get(url, headers=headers, verify=session.verifySslCerts)
response.raise_for_status()
return dict(response.json()) | 2f7fb16117670465f4462cd0259932f1bfcb6e48 | 2,391 |
import functools
def compose(fns):
"""Creates a function composition."""
def composition(*args, fns_):
res = fns_[0](*args)
for f in fns_[1:]:
res = f(*res)
return res
return functools.partial(composition, fns_=fns) | 5c791f52f70707078e941fe169679ddc80a32782 | 2,392 |
import os
def load_participants_file():
"""
Load participants.tsv file and build pandas DF of participants
This function assumes that the file participants.tsv is present in the -path-results
:return: participants: pandas dataframe
"""
participants = pd.read_csv(os.path.join('participants.tsv'), sep="\t")
return participants | f55c32789563150fc574c8a98df2780c6a5903da | 2,393 |
def clr_tilcmt(*args):
"""
clr_tilcmt(ea)
"""
return _ida_nalt.clr_tilcmt(*args) | d1edcaee63ac58f6d82249ceb3a5fc9e35224fe9 | 2,394 |
import copy
def canarize_service(args, input_yaml, labels={}):
"""
Create a canary for an existing Service.
We do this by:
- adding a '-canary' suffix to the name of the Service
- adding a '-canary' suffix to all the labels in the Service selector
"""
res = []
# append the -canary to the Service name
output_yaml = copy.deepcopy(input_yaml)
canary_service_name = input_yaml["metadata"]["name"] + args.suffix
output_yaml["metadata"]["name"] = canary_service_name
print(f"# Creating canary Service {canary_service_name}")
# append the -canary to all the labels in the selector
for (k, v) in input_yaml["spec"]["selector"].items():
output_yaml["spec"]["selector"][k] = v + args.suffix
if args.namespace:
output_yaml["metadata"]["namespace"] = args.namespace
res += [output_yaml]
if args.gen_mapping:
canary_service_name = output_yaml["metadata"]["name"]
print(
f"# Creating Mapping for Service {canary_service_name} (weight: {args.canary_weight})")
res += [gen_mapping(args, canary_service_name,
weight=args.canary_weight, labels=labels)]
if len(labels) > 0:
if len(output_yaml["metadata"]["labels"]) > 0:
output_yaml["metadata"]["labels"].update(labels)
else:
output_yaml["metadata"]["labels"] = labels
return res | 835d4f2dfd31d5db57254d97645bdd790c3d17dd | 2,395 |
def get_query_results(query):
"""
Get the data with common fields from the Close using the provided query.
:param query: Any Close search query eg. 'lead_status:Potential has:emails'
:return: 2D array with a header and results
"""
api = Client(CLOSE_API_KEY)
leads = api.get('lead', params={'query': query})
values = [[
'id',
'display_name',
'lead_name',
'description',
'url',
'status_id',
'status_label',
'primary_contact_name',
'primary_contact_first_name',
'primary_contact_last_name',
'primary_contact_title',
'primary_contact_primary_phone',
'primary_contact_primary_phone_type',
'primary_contact_other_phones',
'primary_contact_primary_email',
'primary_contact_primary_email_type',
'primary_contact_other_emails',
'primary_contact_primary_url',
'primary_contact_other_urls',
'created_by',
'created_by_name',
'updated_by',
'updated_by_name',
'date_created',
'date_updated',
'html_url'
]]
for lead in leads['data']:
primary_contact = lead['contacts'][0] if lead['contacts'] else None
id = lead['id']
display_name = lead['display_name']
lead_name = lead['name']
description = lead['description']
url = lead['url']
status_id = lead['status_id']
status_label = lead['status_label']
created_by = lead['created_by']
created_by_name = lead['created_by_name']
updated_by = lead['updated_by']
updated_by_name = lead['updated_by_name']
date_created = lead['date_created']
date_updated = lead['date_updated']
html_url = lead['html_url']
primary_contact_name = None
primary_contact_first_name = None
primary_contact_last_name = None
primary_contact_title = None
primary_contact_primary_phone = None
primary_contact_primary_phone_type = None
primary_contact_other_phones = None
primary_contact_email = None
primary_contact_email_type = None
primary_contact_other_emails = None
primary_contact_primary_url = None
primary_contact_other_urls = None
if primary_contact:
primary_contact_name = primary_contact['name'] if primary_contact else None
primary_contact_title = primary_contact['title'] if primary_contact else None
if 'name' in primary_contact:
primary_contact_first_name = primary_contact['name'].split(' ')[0]
if len(primary_contact['name'].split(' ')) > 1:
primary_contact_last_name = primary_contact['name'].split(' ')[1]
if primary_contact['phones']:
primary_contact_primary_phone = primary_contact['phones'][0]['phone']
primary_contact_primary_phone_type = primary_contact['phones'][0]['type']
if len(primary_contact['phones']) > 1:
primary_contact_other_phones = ", ".join(o['phone'] for o in primary_contact['phones'][1:])
if primary_contact['emails']:
primary_contact_email = primary_contact['emails'][0]['email']
primary_contact_email_type = primary_contact['emails'][0]['type']
if len(primary_contact['emails']) > 1:
primary_contact_other_emails = ", ".join(o['email'] for o in primary_contact['emails'][1:])
if primary_contact['urls']:
primary_contact_primary_url = primary_contact['urls'][0]['url']
if len(primary_contact['urls']) > 1:
primary_contact_other_urls = ", ".join(o['url'] for o in primary_contact['urls'][1:])
values.append([
id,
display_name,
lead_name,
description,
url,
status_id,
status_label,
primary_contact_name,
primary_contact_first_name,
primary_contact_last_name,
primary_contact_title,
primary_contact_primary_phone,
primary_contact_primary_phone_type,
primary_contact_other_phones,
primary_contact_email,
primary_contact_email_type,
primary_contact_other_emails,
primary_contact_primary_url,
primary_contact_other_urls,
created_by,
created_by_name,
updated_by,
updated_by_name,
date_created,
date_updated,
html_url
])
return values | 826b6a3fa522a53c9d486aed7fdc38638f12eef0 | 2,396 |
def width_pcc_dmera_2d(n, D, supp):
"""
Optimal width of the circuit for the pcc after compression
Args:
n(int): Number of scales
D(int): Number of cycles per scale
supp(list): List of integers
Returns:
int: Optimal width
"""
supp_con = [convert_2d_to_1d(c,n) for c in supp]
return optimal_width_freeze(pcc_dmera_2d(n,D,supp_con),supp_con) | 0df74985ed16aa4bb0295655d5093fe4a40f03a2 | 2,397 |
def global_delete(key):
"""Delete an entity from the global cache.
Args:
key (bytes): The key to delete.
Returns:
tasklets.Future: Eventual result will be ``None``.
"""
batch = _batch.get_batch(_GlobalCacheDeleteBatch)
return batch.add(key) | b9d9f8189f10c0d287081e6d65505c15271a672d | 2,398 |
def get_drawing_x(image: Image = None) -> float:
"""
Get the x coordinate value of the current drawing position (x,y).
Some drawing functions will use the current pos to draw.(see line_to(),line_rel(),move_to(),move_rel()).
:param image: the target image whose drawing pos is to be gotten. None means it is the target image
(see set_target() and get_target()).
:return: the x coordinate value of the current drawing position
"""
image = _get_target_image(image)
return image.get_x() | 6e176cb609868730bc44131d78fe646dccb84fdb | 2,399 |