content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def script(text, interpreter="sh"):
"""Execute a shell script.
The script is passed to the interpreter via stdin and the return
code of the interpreter is returned."""
process = Popen(interpreter, stdin=PIPE)
process.communicate(input=text)
process.wait()
return process.returncode | c4dcbe40f2868099bb8986b82753ab6e00c9a1c5 | 675 |
import warnings
from typing import Optional
def mask_to_image(
mask: _T_input, batch_first: bool = False,
color: Optional[str] = None,
origin: str = 'lower'
) -> np.ndarray:
"""
Creates an image from a mask `Tensor` or `ndarray`.
For more details of the output shape, see the tensorboardx docs
Note:
Clips mask to range [0, 1]. Any values outside of this range will be
ignored.
Args:
mask: Mask to plot
batch_first: If `True`, `signal` is expected to have shape
`(batch [optional], frames, features)`. If `False`, the batch axis
is assumed to be in the second position, i.e.,
`(frames, batch [optional], features)`.
color: A color map name. The name is forwarded to
`matplotlib.pyplot.cm.get_cmap` to get the color map. If `None`,
grayscale is used.
origin: Origin of the plot. Can be `'upper'` or `'lower'`.
Returns:
Colorized image with shape (color (1 or 3), features, frames)
"""
mask = to_numpy(mask, detach=True)
clipped_values = np.sum((mask < 0) | (mask > 1))
if clipped_values:
warnings.warn(
f'Mask value passed to mask_to_image out of range ([0, 1])! '
f'{clipped_values} values are clipped!'
)
image = np.clip(mask * 255, 0, 255)
image = image.astype(np.uint8)
image = _remove_batch_axis(image, batch_first=batch_first)
return _colorize(_apply_origin(image.T, origin), color) | a4679d78fd9df003fe91742fb0eb0707ca3fd5f8 | 677 |
def lerp(x0, x1, t):
""" Linear interpolation """
return (1.0 - t) * x0 + t * x1 | 82d9ce36dd5879c7aab64dc5615a2fb298471383 | 678 |
def read_uint4(f):
"""
>>> import io
>>> read_uint4(io.BytesIO(b'\\xff\\x00\\x00\\x00'))
255
>>> read_uint4(io.BytesIO(b'\\x00\\x00\\x00\\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack('<I', data)[0]
raise ValueError('not enough data in stream to read uint4') | 4a2ffd3f58100d44f0e430847f5a7d8ef1f54a33 | 679 |
def get_onto_class_by_node_type(ont: owlready2.namespace.Ontology, node_label: str):
"""Get an object corresponding to an ontology class given the node label.
`owlready2` doesn't make it easy to dynamically retrieve ontology classes.
This uses some (relatively unsafe) string manipulation to hack together a
solution.
Notes
-----
This should be refactored if/when a better solution is available!
"""
matches = [c for c in ont.classes() if str(c).split(".")[-1] == node_label]
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
return None
else:
raise ValueError(
"Error: Something is wrong with your ontology's class hierarchy! Check for duplicate classes with '{0}' in the name".format(
node_label
)
) | 0da35b5dc63b49cddece1cf0886d53315c77bf43 | 681 |
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = frequencies.to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq | 407f3c52781a1e6986feaa455f78224d64cb7ca8 | 682 |
def extract_filtered_series(data_frame, column_list):
"""
Returns a filtered Panda Series one-dimensional ndarray from a targeted column.
Duplicate values and NaN or blank values are dropped from the result set which is
returned sorted (ascending).
:param data_frame: Pandas DataFrame
:param column_list: list of columns
:return: Panda Series one-dimensional ndarray
"""
return data_frame[column_list].drop_duplicates().dropna(axis=0, how='all').sort_values(
column_list)
# return data_frame[column_list].str.strip().drop_duplicates().dropna().sort_values() | 33120a2abedd5e8a7801bd0dfd89b107b1b593cb | 683 |
from re import T
def event_rheader(r):
""" Resource headers for component views """
rheader = None
if r.representation == "html":
if r.name == "event":
# Event Controller
tabs = [(T("Event Details"), None)]
#if settings.has_module("req"):
# tabs.append((T("Requests"), "req"))
rheader_tabs = s3_rheader_tabs(r, tabs)
event = r.record
if event:
if event.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if event.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % T("Name")),
event.name),
TH("%s: " % T("Comments")),
event.comments,
TR(TH("%s: " % T("Zero Hour")),
event.zero_hour),
TR(closed),
), rheader_tabs)
if r.name == "incident":
# Incident Controller
tabs = [(T("Incident Details"), None)]
if settings.has_module("project"):
tabs.append((T("Tasks"), "task"))
if settings.has_module("hrm"):
tabs.append((T("Human Resources"), "human_resource"))
if settings.has_module("asset"):
tabs.append((T("Assets"), "asset"))
tabs.append((T("Facilities"), "site"))
tabs.append((T("Map Configuration"), "config"))
rheader_tabs = s3_rheader_tabs(r, tabs)
record = r.record
if record:
if record.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if record.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % T("Name")),
record.name),
TH("%s: " % T("Comments")),
record.comments,
TR(TH("%s: " % T("Zero Hour")),
record.zero_hour),
TR(closed),
), rheader_tabs)
return rheader | 8e63b275927f2ae1a98076185e56be4f5e565ce3 | 684 |
def backend_is_up(backend):
"""Returns whether a server is receiving traffic in HAProxy.
:param backend: backend dict, like one of those returned by smartstack_tools.get_multiple_backends.
:returns is_up: Whether the backend is in a state that receives traffic.
"""
return str(backend['status']).startswith('UP') | 9cb729bc14821b97d21d3d864c3ca7a1d6d46085 | 686 |
import glob
def get_bot_files_glob(**kwargs):
"""Returns a `list` with the matching file names using the format string for BOT data """
outdict = {}
kwcopy = kwargs.copy()
test_name = kwcopy.pop('testName').lower()
nfiles = kwcopy.get('nfiles', None)
rafts = get_raft_names_dc(kwcopy['run'], kwcopy.get('teststand', 'bot'))
for raft in rafts:
raftdict = {}
slots = getSlotList(raft)
for slot in slots:
glob_string = BOT_FORMATTER(raft=raft, slot=slot, testName=test_name, **kwcopy)
files = sorted(glob.glob(glob_string))
if nfiles is None:
raftdict[slot] = files
else:
raftdict[slot] = files[0:nfiles]
outdict[raft] = raftdict
return outdict | 818abf757b09866afba712c114d1a87938cad990 | 687 |
def minutiae_selection(minutiae):
""" Selects the subset of most reliable minutiae.
"""
M = np.array([(m['x'], m['y'], m['direction'], m['reliability']) for m in minutiae])
M[:,2] = np.round(np.rad2deg(nbis_idx2angle(M[:,2], N=16)))
M[:,3] = np.round(M[:,3] * 100.0)
M = M.astype(int)
M = M[M[:,3] > np.percentile(M[:,3], 5), :]
return M | 2eff6c4f4f92b395da25aaa83a1de46fb07f4269 | 688 |
def alt_blend_value(data, i, j, k):
"""Computes the average value of the three vertices of a triangle in the
simplex triangulation, where two of the vertices are on the upper
horizontal."""
keys = alt_triangle_coordinates(i, j, k)
return blend_value(data, i, j, k, keys=keys) | 3ddf30c8bb983622d2df99eb76511270fcf62c1b | 689 |
def _BinaryCrossEntropy():
"""Returns a layer that computes prediction-target cross entropies."""
def f(model_output, target_category): # pylint: disable=invalid-name
shapes.assert_same_shape(model_output, target_category)
batch_size = model_output.shape[0]
j = jnp.dot(jnp.transpose(target_category), jnp.log(model_output))
j += jnp.dot(jnp.transpose(1 - target_category), jnp.log(1 - model_output))
j = -1.0/batch_size * jnp.squeeze(j)
return j
return Fn('_BinaryCrossEntropy', f) | 2def2ce8e8e32af94ac67de4d99e28e15ff07622 | 690 |
def normalize(subs, strict):
"""
Normalises subtitles.
:param subs: :py:class:`Subtitle` objects
:param bool strict: Whether to enable strict mode, see
:py:func:`Subtitle.to_srt` for more information
:returns: A single SRT formatted string, with each input
:py:class:`Subtitle` represented as an SRT block
:rtype: str
:raises SRTParseError: If parsing fails.
"""
return _cli.compose_suggest_on_fail(subs, strict) | e0e90be189fe77b123bfe960e4e9b9e63977bbe3 | 691 |
def Stern_Brocot(n):
"""
Another way to iterate over rationals
References:
https://stackoverflow.com/questions/24997970/iterating-over-parts-of-the-stern-brocot-tree-in-python
"""
states = [(0, 1, 1, 1)]
result = []
while len(states) != 0:
a, b, c, d = states.pop()
if a + b + c + d <= n:
result.append((a + c, b + d))
states.append((a, b, a + c, b + d))
states.append((a + c, b + d, c, d))
return result | cdaa919932668b33e8233c59964c3ca9bbc30119 | 692 |
def compare_elements(prev_hash_dict, current_hash_dict):
"""Compare elements that have changed between prev_hash_dict and current_hash_dict.
Check if any elements have been added, removed or modified.
"""
changed = {}
for key in prev_hash_dict:
elem = current_hash_dict.get(key, '')
if elem == '':
changed[key] = 'deleted'
elif elem != prev_hash_dict[key]:
changed[key] = 'changed'
for key in current_hash_dict:
elem = prev_hash_dict.get(key, '')
if elem == '':
changed[key] = 'added'
return changed | 2f24863a16aca86ccd3a82a4148b34282349e640 | 693 |
def generator_string(lang_uses: str = 'all', char_count: int = 1,
char_size: str = 'lower') -> str:
"""Generator string
:param lang_uses: набор символов
:type lang_uses: str
:param char_count: сколько символов отдать
:type char_count: int
:param char_size: размер символов
:type char_size: str
:return: str
"""
random_string = ''.join(choices(get_alphabet(lang_uses=lang_uses, char_size=char_size), k=char_count))
return random_string | fc517613ce1df5f3d208b4d6b3e5a356ac2f7e13 | 694 |
def rate_string(rate, work_unit, computer_prefix=False):
"""Return a human-friendly string representing a rate. 'rate' is given
in 'work_unit's per second. If the rate is less than 0.1 then the inverse
is shown.
Examples:
>>> rate_string(200000, "B", True)
'195KB/s'
>>> rate_string(0.01, "file")
'1m40s/file'
>>> rate_string(1.0 / 24 / 3600, "earthrot")
'1d0h/earthrot'
"""
if rate > 0 and rate < 0.1:
return "%s/%s" % (time_string(1.0 / rate), work_unit)
else:
return "%s/s" % (quantity_string(rate, work_unit, computer_prefix)) | 4e7f62684be770525812465014c35dae0ce1806b | 696 |
def get_queue_arn(sqs_client, queue_url: str) -> str:
"""Encapsulates SQS::get_queue_attributes with special attribute QueueArn.
:param sqs_client: The Boto3 AWS SQS client object.
:param queue_url: URL of the queue
:return: The Amazon Resource Name (ARN) of the queue.
"""
try:
response = sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["QueueArn"])
queue_arn = response["Attributes"]["QueueArn"]
logger.info("Retrieved queue ARN: '%s' for URL: '%s'.", queue_arn, queue_url)
except ClientError:
logger.exception("Couldn't retrieve ARN for queue URL: %s.", queue_url)
raise
else:
return queue_arn | a8fbf9e8271e8809494da7634fff682769020ecd | 697 |
import psutil
def any_flexloggers_running() -> bool:
"""Returns whether any FlexLogger.exe processes are running."""
for proc in psutil.process_iter(["pid", "name"]):
if proc.info["name"].lower() == "flexlogger.exe":
return True
return False | 3dc7fb6c5120e41ff26fda41b111dec9c08560a3 | 698 |
def _get_non_white_runs(mask):
"""Returns those runs that are delimeted by white cells."""
res = []
in_a_block = False
last_idx = len(mask) - 1
for idx, cell in enumerate(mask):
if cell != WHITE and not in_a_block:
in_a_block = True
start = idx
if cell == WHITE and in_a_block:
in_a_block = False
end = idx - 1
res.append(Block(start, end, length=end - start + 1))
if idx == last_idx and in_a_block:
res.append(Block(start, last_idx, length=last_idx - start + 1))
return res | 0a1c4251b0a86dc95f1cea8962827b88f4945edb | 699 |
def cov(x, y, w):
"""Calculates weighted covariance"""
return np.sum(
w
* (x - np.average(x, axis=0, weights=w))
* (y - np.average(y, axis=0, weights=w))
) / np.sum(w) | b590c43c02321c3503271c56f6eca1b48a3169d8 | 700 |
from typing import List
from typing import Dict
def eval_metrics_all(
y: List[np.ndarray],
y_hat: List[np.ndarray]
) -> Dict[str, float]:
"""Calculates combined accuracy, f1, precision, recall and AUC scores for
multiple arrays. The arrays are shorted to the minimum length of the
corresponding partner and stacked on top of each other to calculated the
combined scores.
Arguments:
y (np.ndarray): Ground truth.
y_hat (np.ndarray): Prediction.
Returns:
Dict[str, float]: Returns a dict with all scores.
Example:
>>> y = [np.ones((10, 1)), np.zeros((10, 1))]
>>> y_hat = [np.ones((10, 1)), np.zeros((10, 1))]
>>> eval_metrics_all(y, y_hat)
{'accuracy': 1.0, 'precision': 1.0, 'recall': 1.0, 'f1': 1.0,
'roc_auc': 1.0}
"""
if len(y) != len(y_hat):
raise ValueError('y and y_hat must have the same number elements.')
# allow 1d or 2d arrays with the 2nd dimension of 1
check_ndim(*y, *y_hat, ndim=2, strict=False)
check_size(*y, *y_hat, size=1, axis=1, strict=False)
y = list(map(lambda x: x.reshape(-1), y))
y_hat = list(map(lambda x: x.reshape(-1), y_hat))
# truncate corresponding arrays to the same length
y_, y_hat_ = np.hstack(list(truncate(*zip(y, y_hat))))
return eval_metrics(y_, y_hat_) | 79374750a7bf648dc8b898b187fd4b19f470bc0d | 701 |
from re import A
def fake_dataset_no_label(path, range1, batch_size=32, shuffle=False):
"""
Create fake dataset with no label
Args:
path (str) : provide the data settings
range1 (tuple) : range of generated images
batch_size (int): number of samples contained in each generated batch
shuffle (bool) : shuffle the data
Returns:
data loader
"""
list_ids = []
labels = {}
for i in range(range1[0], range1[1]):
list_ids.append(path + 'gen_'+str(i)+'.jpg')
labels[path + 'gen_'+str(i)+'.jpg'] = -1
# as per the author's citation, we have transformed the input image
# (resize to 64 * 64, 256 * 256, 224 * 224)
pre_process = [(64, 64), (256, 256), (224, 224)]
mean_normalize = (0.485, 0.456, 0.406)
std_normalize = (0.229, 0.224, 0.225)
transform = A.Compose([
A.Resize(pre_process[0][0], pre_process[0][1]),
A.Resize(pre_process[1][0], pre_process[1][1]),
A.CenterCrop(width=pre_process[2][0], height=pre_process[2][1]),
A.Normalize(mean=mean_normalize, std=std_normalize)
])
loader = data_iterator_celeba(list_ids, labels,
transform=transform, batch_size=batch_size, shuffle=shuffle)
return loader | 24db6921830cf775ee1c9a2a3797dcc521c202bb | 702 |
def number_of_songs_match(folder, songs):
"""
Checks if the number of music files in folder matches the number of tracks
listed in songs.
Arguments:
- folder: path to folder where music files are found
- songs: list of track numbers
Returns:
True / False
"""
files = [f for f in listdir(folder) if isfile(join(folder, f)) and f.endswith('.mp3')]
if len(files) != len(songs):
return False
return True | 0de44cfdce9add35fba61efd0b0351f450df0e9e | 703 |
def spaces_to_pluses(q, city, state):
"""
"""
if city and state:
return split_text(q), split_text(city), split_text(state)
else:
return split_text(q), 'Nationwide', ' ' | 5ad007d7a307fc58812dc5b1fd55542411a7a9dc | 704 |
from typing import Optional
from typing import List
def _check_str_input(var, input_name: str, valid_options: Optional[List[str]] = None) -> str:
"""
_check_str_input
Convenience function to check if an input is a string. If argument valid_options is given, this
function will also check that var is a valid option from the valid_options specified.
Parameters
----------
var
the input variable to check
input_name : str
the name of the variable to include if an error is raised
valid_options: List[str], optional
a list of valid options for var
Returns
-------
str
the input var after lowering ans stripping the string
"""
if not isinstance(var, str):
raise ValueError("Invalid input {0} for {1}. Input {1} must be a string.".format(
var, input_name))
var = var.strip().lower()
if valid_options is not None:
valid_options = [option.strip().lower() for option in valid_options]
if var not in valid_options:
raise ValueError("Invalid input {0} for {1}. Input {1} must be one of the following "
"options: {2}.".format(var, input_name, valid_options))
return var | 357a8516fe65dddb35b7799ddc68b892da75ea02 | 705 |
def run_U_fixed_dynamics(**kwargs):
"""
Run simulation for a given set of parameter values
and generate relevant plots
"""
# Steady state checks
#print('============================== U fixed, U='+str(kwargs['U']))
a = mpde(**kwargs)
#lib.disp_params(a) # display non-array parameters
#t0 = time.time()
a.run()
#t1 = time.time()
#print('*\t Run time',t1-t0)
initial_mass = np.sum(a.sol[0,:])*a.dx
mass_true = lib.mass_fn(a.t,initial_mass,**kwargs)
#lib.disp_norms(a,ground_truth_values)
fig = plt.figure(figsize=(10,5))
ax11 = fig.add_subplot(121)
ax12 = fig.add_subplot(122)
mass_pde = np.sum(a.sol,axis=1)*a.dx
ax11.plot(a.t,mass_true,label='mass true')
ax11.plot(a.t,mass_pde,label='mass pde')
ax12.plot(a.t,np.abs(mass_pde - mass_true),label='|pde-(true)|')
ax11.set_title('mass over time')
ax12.set_title('mass diff')
ax11.set_xlabel('t')
ax12.set_xlabel('t')
ax11.legend()
ax12.legend()
plt.tight_layout()
# include dt
kwargs = {**kwargs, **{'dt':a.dt}}
fname = (DIR_TESTS
+ 'U_fixed_dynamics_'
+ lib.fname_suffix(**kwargs))
plt.savefig(fname)
plt.close()
return np.amax(np.abs(mass_true - mass_pde)) | 735e70c9082ffd92d2e1f5c20b0ca222f5ca25be | 707 |
def removeDuplicates(bookmarks, newBookmarks):
"""Creates and returns a new list of bookmarks
without any duplicates"""
nodup = []
for bmNew in newBookmarks:
foundDup = False
for bm in bookmarks:
if (bm.linkURL == bmNew.linkURL):
foundDup = True
break
if (not foundDup):
nodup.append(bmNew)
return nodup | 12280e827796b95be30f645c5ca0e495379d6a55 | 708 |
def TNaming_Naming_GetID(*args):
"""
* following code from TDesignStd ==============================
:rtype: Standard_GUID
"""
return _TNaming.TNaming_Naming_GetID(*args) | 4b9c6aa4b6b9029d5ac879853b85780e46984d50 | 709 |
def assigned_user_add(request, location_id, destination):
"""
Assigned user add is a POST function where it will ADD a user to a project/task/opportunity/requirement.
:param request:
:param location_id:
:param destination:
:return:
"""
# Load the template
t = loader.get_template('NearBeach/blank.html')
# context
c = {
}
return HttpResponse(t.render(c, request)) | 92f85aad0eb99f867c8c680ed6e6d49be002ee8c | 710 |
import re
def _parse_challenge(header):
# type: (str) -> Dict[str, str]
"""Parse challenge header into service and scope"""
ret = {}
if header.startswith(BEARER):
challenge_params = header[len(BEARER) + 1 :]
matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params)
_clean(matches)
ret = {}
for i in range(0, len(matches), 2):
ret[matches[i]] = matches[i + 1]
return ret | dc9044cdfa585a9dfb2cb1de9349d945e7afc985 | 711 |
def test_get_batch(source):
""" Creates an input/target pair for evaluation """
seq_len = len(source) - 1
data = source[:seq_len]
target = source[1:1+seq_len].view(-1)
return data, target | 0c26f9f957063bb136f9fe77ed1a8bbdedc38a15 | 712 |
def getReceptorResidues(filename=None, data=None):
"""Accepts a PDB(TQ) file and returns a
nested dictionary of:
chain:residue:atoms
"""
if filename:
lines = getLines(filename)
else:
lines = data
structure = {}
for l in lines:
if l.startswith("ATOM") or l.startswith("HETATM"):
res_t=l[17:20].strip()
res_n=l[22:27].strip()
res=res_t+res_n
chain=l[21].strip()
atom=l[12:17].strip()
if not chain in structure:
structure[chain]={}
if not res in structure[chain]:
structure[chain][res] = []
if not atom in structure[chain][res]:
structure[chain][res].append(atom)
return structure | e409a61bac880bd586a1a21865389b30c1c28838 | 713 |
def extract_first_compute_cell(text):
"""
INPUT: a block of wiki-like marked up text OUTPUT:
- ``meta`` - meta information about the cell (as a
dictionary)
- ``input`` - string, the input text
- ``output`` - string, the output text
- ``end`` - integer, first position after }}} in
text.
"""
# Find the input block
i = text.find('{{{')
if i == -1:
raise EOFError
j = text[i:].find('\n')
if j == -1:
raise EOFError
k = text[i:].find('|')
if k != -1 and k < j:
try:
meta = dictify(text[i+3:i+k])
except TypeError:
meta = {}
i += k + 1
else:
meta = {}
i += 3
j = text[i:].find('\n}}}')
if j == -1:
j = len(text)
else:
j += i
k = text[i:].find('\n///')
if k == -1 or k+i > j:
input = text[i:j]
output = ''
else:
input = text[i:i+k].strip()
output = text[i+k+4:j]
return meta, input.strip(), output, j+4 | 0dabdb5ad7b4b1d6f513d485782d25f134cf3f62 | 714 |
from typing import Union
from typing import IO
from typing import Dict
def check_schema(loader_impl: LoaderImpl) -> LoaderImpl:
"""Wrapper method to check column names and types."""
@wraps(loader_impl)
def wrapped_loader(fp: Union[str, IO], extra_fields: Dict[str, str] = None) -> DataFrame:
name = fp if isinstance(fp, str) else fp.name
data = loader_impl(fp, extra_fields)
schema = FILE_SCHEMA if not extra_fields else {**FILE_SCHEMA, **extra_fields}
for column in list(data.columns):
if column not in schema:
log.info(f'From file ({name}): ignoring column \'{column}\'')
data.drop([column], axis=1, inplace=True)
for column, dtype in schema.items():
if column not in data.columns:
raise RuntimeError(f'From file ({name}): missing column \'{column}\'')
else:
try:
data[column] = data[column].astype(dtype)
except (TypeError, ValueError) as error:
raise RuntimeError(f'From file ({name}), column \'{column}\': {error}') from error
return data
return wrapped_loader | 471153738204a4aabc7219c23f261a8761ff8e91 | 715 |
import logging
from datetime import datetime
def get_album_photos(album, offset, vk_session):
"""Retrieves list of photos within given album from VK.com
:param album:
:type album: str
:param offset:
:type offset: int or None
:param vk_session: instance of :class:`vk_api.VkApi`
:type vk_session: :class:`vk_api.VkApi`
:return:
"""
def normpath(filename):
keepcharacters = [' ', '.', '_', ',']
return "".join(c for c in filename
if c.isalnum() or c in keepcharacters).rstrip()
items = []
try:
if USER_PHOTOS_ALBUM_ID == album['id']:
response = vk_session.method(
'photos.getUserPhotos',
values={
'user_id': vk_session.token['user_id'],
'count': 1000,
'offset': offset or 0,
'photo_sizes': 1
})
else:
response = vk_session.method(
'photos.get',
values={
'owner_id': vk_session.token['user_id'],
'album_id': album['id'],
'offset': offset or 0,
'photo_sizes': 1
})
except Exception as e:
logging.error(e)
return items
image_types = {
's': 0,
'm': 1,
'x': 2,
'o': 3,
'p': 4,
'q': 5,
'r': 6,
'y': 7,
'z': 8,
'w': 9
}
if 'items' in response:
for item in response['items']:
sizes = item.get('sizes')
if not sizes:
logging.info('Item skipped!')
continue
newlist = sorted(
sizes,
key=lambda x: image_types.get(x.get('type')),
reverse=True)
image = {
'id': item['id'],
'date': datetime.datetime.fromtimestamp(item['date']),
'url': newlist[0].get('url')
}
if item.get('text'):
image['title'] = normpath(item['text'])
items.append(image)
return items | e6bc3fae5c0c132d10eb0af200e8e6a9872fa04b | 716 |
def get_view_cursor(**kwargs) -> 'XTextViewCursor':
"""
Gets current view cursor which is a XTextViewCursor
Keyword Args:
o_doc (object, optional): current document (xModel)
Returns:
object: View Cursor
"""
o_doc = kwargs.get('o_doc', None)
if o_doc is None:
o_doc = get_xModel()
# https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1text_1_1XTextViewCursor.html
frame: object = o_doc.CurrentController.Frame
current_controler: object = frame.getController() # XController
view_cursor = current_controler.getViewCursor()
return view_cursor | 6fa8ce40425684e2238337da59c52e3004b18787 | 717 |
def get_common_metrics(test_values, predicted):
"""
Return some common classifier metrics
:param test_values: values to test with
:param predicted: predicted values
:return: accuracy, precision and recall value
"""
accuracy = metrics.accuracy_score(test_values, predicted)
precision = metrics.precision_score(test_values, predicted)
recall = metrics.recall_score(test_values, predicted)
return accuracy, precision, recall | badbe3db4641352c1f36db1996c498f3b467d8f4 | 718 |
def align_jp_and_en_boxes(pd_results) -> pd.DataFrame:
"""boxes are not ordered on the page, so heuristically must match them based on
location on page
"""
japanese_results = pd.DataFrame.copy(
pd_results[pd_results.language == "jp"]).reset_index()
english_results = pd.DataFrame.copy(
pd_results[pd_results.language == "en"]).reset_index()
japanese_vals = japanese_results[["left", "top"]].values
english_vals = english_results[["left", "top"]].values
n = NearestNeighbors(n_neighbors=1)
n.fit((japanese_vals))
dis, index = n.kneighbors(english_vals)
english_results["boxID"] = index.reshape(-1)
return japanese_results.append(english_results).reset_index() | 8292cd2ac91c497ba3e8e737b0194b27ffee4455 | 720 |
def productivity_flag():
"""
Real Name: b'Productivity Flag'
Original Eqn: b'1'
Units: b'Dmnl'
Limits: (None, None)
Type: constant
b''
"""
return 1 | 5de320366584f3e2803172c9e97c5b3b1fc79715 | 721 |
def create_cartpole_network(hidden_layers=2, neurons=56):
"""
Network that can solve gyms 'CartPole-v1' environment.
"""
net = Sequential()
net.add(Dense(
neurons,
input_shape=(4,),
kernel_regularizer=l2(0.001),
kernel_initializer=GlorotNormal(),
activation='relu'),
)
net.add(Dropout(0.1))
for n in range(hidden_layers):
net.add(Dense(
neurons,
kernel_regularizer=l2(0.001),
kernel_initializer=GlorotNormal(),
activation='relu'),
)
net.add(Dropout(0.1))
net.add(Dense(2, activation='relu'))
return net | 52bdf8352595dcd0cb73951c0b2ca575357c38d6 | 722 |
def format_as_rfc2822(*args, **kwrags):
"""Alias of ``format_as_rss()``."""
return format_as_rss(*args, **kwrags) | 3eb94d85241b8a96ee841233c201acd65fc683f3 | 723 |
def train_model_exponentially(train_images, train_labels, parts, exponent):
"""
Trains a model incrementally, using training data partitions that increase exponentially, and exports it.
:param train_images:
:param train_labels:
:param parts:
:param exponent:
:return: The final model
"""
normal_model = model_handler.cnn_model()
# prepare data
train_images, train_labels = data_manipulator.prepare_visual_data(train_images, train_labels)
# split training data to partitions
partitioned_train_images = partition_data_exponentially(train_images, parts, exponent)
partitioned_train_labels = partition_data_exponentially(train_labels, parts, exponent)
# train model
for part in range(parts):
normal_model.fit(partitioned_train_images[part], partitioned_train_labels[part], epochs=5, batch_size=64)
model_handler.save_model(normal_model, 'normal_model_exponential_part_' + str(part + 1) + '_of_' + str(parts))
return normal_model | 0e107b2e3fa69233679b9c84a7a3caf015ba53e4 | 724 |
def get_config_of(tests, test_name):
"""
Find generic values of test
"""
for test in tests:
if test.name == test_name:
try:
return test._test_case._run._config # pylint: disable=protected-access
except AttributeError:
return test._run._config # pylint: disable=protected-access
raise KeyError(test_name) | 821f0b180a1846b432fd55afc39b1b24b4a80de0 | 725 |
from typing import Union
from typing import List
def transmit_format(func):
"""Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset"""
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Dataset" = args[0]
args = args[1:]
else:
self: "Dataset" = kwargs.pop("self")
# don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None
unformatted_columns = set(self.column_names) - set(self._format_columns or [])
self_format = {
"type": self._format_type,
"format_kwargs": self._format_kwargs,
"columns": self._format_columns,
"output_all_columns": self._output_all_columns,
}
# apply actual function
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
# re-apply format to the output
for dataset in datasets:
new_format = self_format.copy()
if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns)
# sort the columns to have a deterministic list of columns that we can compare with `out_format`
new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns)
out_format = {
"type": dataset._format_type,
"format_kwargs": dataset._format_kwargs,
"columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None,
"output_all_columns": dataset._output_all_columns,
}
if out_format != new_format: # only apply if there's a change not to update the fingerprint for nothing
dataset.set_format(**new_format)
return out
wrapper._decorator_name_ = "transmit_format"
return wrapper | 3a20f6ad1d5f7b826742f7c55694e07a5c530273 | 726 |
def parse_version(version: str) -> Version:
"""Parses version string to Version class."""
parsed = version.split(".")
try:
return Version(int(parsed[0]), int(parsed[1]), int(parsed[2] if len(parsed) > 2 else -1))
except ValueError:
return Version(0, 0, -1) | 89a785e97fc40b6e4002f2d35e75777758d665d6 | 727 |
def rate_of_change(x, t_Δ=1):
"""
:param x: a series
:param t_Δ: the intervals between each observation (series or constant)
:return: rate of change for x
"""
diffs = np.diff(x) / t_Δ
return diffs | a6e07fbc8c29a66a6904eb36011d0c093e028d58 | 728 |
def draw_cutout(data, title, lower_bound=0, upper_bound=1, is_mobile=False):
""" Draw a cutout data
"""
# Update graph data for stamps
data = np.nan_to_num(data)
data = sigmoid_normalizer(data, lower_bound, upper_bound)
data = data[::-1]
data = convolve(data, smooth=1, kernel='gauss')
if is_mobile:
mask = create_circular_mask(len(data), len(data[0]), center=None, radius=None)
data[~mask] = np.nan
if is_mobile:
zsmooth = 'fast'
else:
zsmooth = False
fig = go.Figure(
data=go.Heatmap(
z=data, showscale=False, hoverinfo='skip', colorscale='Greys_r', zsmooth=zsmooth
)
)
# Greys_r
axis_template = dict(
autorange=True,
showgrid=False, zeroline=False,
linecolor='black', showticklabels=False,
ticks='')
fig.update_layout(
title='',
margin=dict(t=0, r=0, b=0, l=0),
xaxis=axis_template,
yaxis=axis_template,
showlegend=True,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
if not is_mobile:
fig.update_layout(width=150, height=150)
style = {'display': 'inline-block', 'height': '10pc', 'width': '10pc'}
else:
style = {'display': 'inline-block', 'height': '5pc', 'width': '5pc'}
graph = dcc.Graph(
id='{}-stamps'.format(title),
figure=fig,
style=style,
config={'displayModeBar': False}
)
return graph | 016e624fab9aa3b170dafba509a2d61f5444e9bb | 729 |
def _check_sample(sample_pair: dict):
"""
Controls a sample.
Parameters
----------
sample_pair : dict
Sample must contain image and mask: " "{'image': image, 'mask': mask}
Returns
-------
sample : dict
Sample must contain image and mask: " "{'image': image, 'mask': mask}
"""
if isinstance(sample_pair, dict):
if len(sample_pair) != 2:
raise ValueError(
"Sample must contain image and mask: " "{'image': image, 'mask': mask}"
)
else:
raise TypeError("Sample must be a dict like: {'image': image, 'mask': mask}")
return sample_pair | 112e9e46a753f8754d25ddbfe2505535c2f9ac96 | 730 |
def _make_cls(cls, attrs):
"""Make the custom config class."""
return type(f'Custom{cls.__name__}', (cls, ), attrs, ) | bbe1f7694fbb30bdcb3e3c5df0207ae641d022b3 | 731 |
from datetime import datetime
def get_date(delta):
"""Build a date object with given day offset"""
date = datetime.datetime.now()
if delta is not None:
offset = datetime.timedelta(days=delta)
date = date + offset
date = date.strftime("%A %-m/%-d")
return date | ef3762a088946e81a8a26165395dad356e103ec9 | 732 |
def mover_alfil(tablero, x_inicial, y_inicial, x_final, y_final):
"""
(list of list, int, int, int, int) -> list of list
:param tablero: list of list que representa el tablero
:param x_inicial: int que representa la posicion inicial en X
:param y_inicial: int que representa la posicion inicial en Y
:param x_final: int que representa la posicion final en X
:param y_final: int que representa la posicion final en Y
:return: list of list que representa un tablero final
"""
tab = tablero.copy()
if ((x_inicial - y_inicial == x_final - y_final) or (x_inicial + y_inicial == x_final + y_final)) and tab[x_inicial][y_final].lower() == 'a':
if (x_inicial != x_final) and (y_inicial != y_final):
for x in range(x_inicial +1, x_final):
if tab[x][y_final] != ' ':
raise ValueError('El camino no es valido')
for y in range(y_inicial +1, y_final):
if tab[x_final][y] != ' ':
raise ValueError('El camino no es valido')
tab[x_final][y_final] = 'a'
tab[x_inicial][y_inicial] = ' '
return tab | 4c31db653fd448878f3e629b1237173f3eb26a56 | 733 |
from scipy.interpolate import UnivariateSpline
def interpolate_atmosphere(data, Z, s=0.25):
""" This module generates a 1d array for the model plasma preesure, plasma
density, temperature and mean molecular weight.
"""
hdata = np.array(u.Quantity(data['Z']).to(u.m))
# interpolate total pressure, temperature and density profiles
pdata_f = UnivariateSpline(hdata,np.array(np.log(data['p'])),k=1, s=s)
Tdata_f = UnivariateSpline(hdata,np.array(np.log(data['T'])),k=1, s=s)
rdata_f = UnivariateSpline(hdata,np.array(np.log(data['rho'])),k=1, s=s)
#s=0.0 to ensure all points are strictly used for ionisation state
muofT_f = UnivariateSpline(hdata,np.array(np.log(data['mu'])),k=1, s=0.0)
outdata = Table()
outdata['Z'] = Z
outdata['p'] = np.exp(pdata_f(Z.to(u.m))) * data['p'].unit
outdata['T'] = np.exp(Tdata_f(Z.to(u.m))) * data['T'].unit
outdata['rho'] = np.exp(rdata_f(Z.to(u.m))) * data['rho'].unit
outdata['mu'] = np.exp(muofT_f(Z.to(u.m))) * u.one
return outdata | 3bddc5972fe0e5d4c814a6311775e7fa9777ca79 | 735 |
def exponential_coulomb_uniform_correlation_density(
density,
amplitude=constants.EXPONENTIAL_COULOMB_AMPLITUDE,
kappa=constants.EXPONENTIAL_COULOMB_KAPPA):
"""Exchange energy density for uniform gas with exponential coulomb.
Equation 24 in the following paper provides the correlation energy per length
for 1d uniform gas with exponential coulomb interaction.
One-dimensional mimicking of electronic structure: The case for exponentials.
Physical Review B 91.23 (2015): 235141.
https://arxiv.org/pdf/1504.05620.pdf
y = pi * density / kappa
correlation energy per length
= -amplitude * kappa * y ** 2 / (pi ** 2) / (
alpha + beta * sqrt(y) + gamma * y + delta * sqrt(y ** 3)
+ eta * y ** 2 + sigma * sqrt(y ** 5)
+ nu * pi * kappa ** 2 / amplitude * y ** 3)
correlation energy density
= correlation energy per length * pi / (kappa * y)
= -amplitude * y / pi / (
alpha + beta * sqrt(y) + gamma * y + delta * sqrt(y ** 3)
+ eta * y ** 2 + sigma * sqrt(y ** 5)
+ nu * pi * kappa ** 2 / amplitude * y ** 3)
Note the correlation energy density converge to zero at high density limit.
Args:
density: Float numpy array with shape (num_grids,).
amplitude: Float, parameter of exponential Coulomb interaction.
kappa: Float, parameter of exponential Coulomb interaction.
Returns:
Float numpy array with shape (num_grids,).
"""
y = jnp.pi * density / kappa
alpha = 2.
beta = -1.00077
gamma = 6.26099
delta = -11.9041
eta = 9.62614
sigma = -1.48334
nu = 1.
# The derivative of sqrt is not defined at y=0, we use two jnp.where to avoid
# nan at 0.
finite_y = jnp.where(y == 0., 1., y)
out = -amplitude * finite_y / jnp.pi / (
alpha + beta * jnp.sqrt(finite_y)
+ gamma * finite_y + delta * finite_y ** 1.5
+ eta * finite_y ** 2 + sigma * finite_y ** 2.5
+ nu * jnp.pi * kappa ** 2 / amplitude * finite_y ** 3
)
return jnp.where(y == 0., -amplitude * y / jnp.pi / alpha, out) | dc2227f10cc64a3aa857494322f29f2b82b68da3 | 736 |
import torch
def val(model, dataloader, use_gpu):
"""val. the CNN model.
Args:
model (nn.model): CNN model.
dataloader (dataloader): val. dataset.
Returns:
tuple(int, in): average of image acc. and digit acc..
"""
model.eval() # turn model to eval. mode(enable droupout layers...)
result_digit = []
result_img = []
for i, (data, label) in enumerate(dataloader):
with torch.no_grad(): # disable autograd
if use_gpu:
input = data.cuda()
score = model(input)
pred = decode(score)
tmp = pred == label.numpy()
result_digit += tmp.tolist()
result_img += np.all(tmp, axis=1).tolist()
i = np.random.randint(0, len(dataloader) - 1)
im_show = np.transpose(input[i].detach().cpu().numpy(), (1, 2, 0))
im_show = np.repeat((im_show * 255).astype(np.uint8), 3, -1)
# turn model back to training mode.
model.train()
return np.mean(result_img), np.mean(result_digit), [im_show, pred[i]] | 8cecc0204855a5267a11edf6936b6415fafc8120 | 737 |
def ps(s):
"""Process String: convert a string into a list of lowercased words."""
return s.lower().split() | 9bf25b31d00544d96f96564ce67ff5def9a16348 | 738 |
def login(username,password):
"""
使用账号(邮箱)和密码,选择“记住我”登录
:param username:
:param password:
:return:
"""
global a
a.get("https://account.fangcloud.com/login")
_token = a.b.find("input",{"name":"_token"})["value"]
_fstate = a.b.find("input",{"name":"_fstate"})["value"]
x=a.post("https://account.fangcloud.com/login?_fstate="+_fstate,
"""{"login":"%s","password":"%s","remember_login":true,"login_type":"web","_fstate":"%s"}"""%(username,password, _fstate),
headers={"X-CSRF-TOKEN":_token,"X-Requested-With":"XMLHttpRequest", "Content-Type":"application/json"})
result=x.json()
if "redirect" not in result:
raise Exception("login failed! maybe password incorrect or need captcha")
url = result["redirect"]
x=a.get(url, result=False, o=True, allow_redirects=True)
assert 'apps/files' in x.url
return True | 7003be533ccb3edaff42d3f47c6882b1646a22d2 | 740 |
def rules():
"""Displays a markdown doc describing the predictive modeling contest.
Note ./content/contest/<url calling path>.md must be modified for contest.
"""
file = open('./contest/content/rules.md', 'r')
rawText = file.read()
file.close()
content = Markup(markdown(rawText,
extensions=['markdown.extensions.fenced_code', 'markdown.extensions.tables']))
return render_template('markdowntemplate.html',
title='Rules',
content=content) | 9a5b44b87fbcee378a958586511851ef455d7988 | 741 |
import requests
def get(server: t.Union[Server, str], view_or_url: str, view_data: Kwargs = None, session: requests.Session = None,
params: Kwargs = None, **kwargs) -> Response:
"""Sends a GET request."""
return request('get', server, view_or_url, view_data=view_data, session=session, params=params, **kwargs) | e9cecdb76f6b340a258c5bb1ca3be8cb9e257764 | 743 |
import pandas
def to_float(dataframe, column):
"""General Function to return floats"""
dataframe[column] = dataframe[column].dropna().astype(float)
dataframe[column] = dataframe[column].where(pandas.notnull(dataframe[column]), None)
return dataframe[column] | 2fdae992ec88e40c1e8c67711373d28390569166 | 746 |
import torch
def dadbt(a: torch.Tensor, diag_mat: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""Batched computation of diagonal entries of (A * diag_mat * B^T) where A and B are batches of square matrices and
diag_mat is a batch of diagonal matrices (represented as vectors containing diagonal entries)
:param a: batch square matrices
:param diag_mat: batch of diagonal matrices (represented as vecotrs containing diagonal entries
:param b: batch of square matrices
:returns diagonal entries of A * diag_mat * B^T"""
return bmv(a * b, diag_mat) | bb2f0c9858130c556ffc5c7dfd4e337e0437aadd | 749 |
from operator import invert
def local_minima(image, footprint=None, connectivity=None, indices=False,
allow_borders=True):
"""Find local minima of n-dimensional array.
The local minima are defined as connected sets of pixels with equal gray
level (plateaus) strictly smaller than the gray levels of all pixels in the
neighborhood.
Parameters
----------
image : ndarray
An n-dimensional array.
footprint : ndarray, optional
The footprint (structuring element) used to determine the neighborhood
of each evaluated pixel (``True`` denotes a connected pixel). It must
be a boolean array and have the same number of dimensions as `image`.
If neither `footprint` nor `connectivity` are given, all adjacent
pixels are considered as part of the neighborhood.
connectivity : int, optional
A number used to determine the neighborhood of each evaluated pixel.
Adjacent pixels whose squared distance from the center is less than or
equal to `connectivity` are considered neighbors. Ignored if
`footprint` is not None.
indices : bool, optional
If True, the output will be a tuple of one-dimensional arrays
representing the indices of local minima in each dimension. If False,
the output will be a boolean array with the same shape as `image`.
allow_borders : bool, optional
If true, plateaus that touch the image border are valid minima.
Returns
-------
minima : ndarray or tuple[ndarray]
If `indices` is false, a boolean array with the same shape as `image`
is returned with ``True`` indicating the position of local minima
(``False`` otherwise). If `indices` is true, a tuple of one-dimensional
arrays containing the coordinates (indices) of all found minima.
See Also
--------
skimage.morphology.local_maxima
skimage.morphology.h_maxima
skimage.morphology.h_minima
Notes
-----
This function operates on the following ideas:
1. Make a first pass over the image's last dimension and flag candidates
for local minima by comparing pixels in only one direction.
If the pixels aren't connected in the last dimension all pixels are
flagged as candidates instead.
For each candidate:
2. Perform a flood-fill to find all connected pixels that have the same
gray value and are part of the plateau.
3. Consider the connected neighborhood of a plateau: if no bordering sample
has a smaller gray level, mark the plateau as a definite local minimum.
Examples
--------
>>> from skimage.morphology import local_minima
>>> image = np.zeros((4, 7), dtype=int)
>>> image[1:3, 1:3] = -1
>>> image[3, 0] = -1
>>> image[1:3, 4:6] = -2
>>> image[3, 6] = -3
>>> image
array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, -1, -1, 0, -2, -2, 0],
[ 0, -1, -1, 0, -2, -2, 0],
[-1, 0, 0, 0, 0, 0, -3]])
Find local minima by comparing to all neighboring pixels (maximal
connectivity):
>>> local_minima(image)
array([[False, False, False, False, False, False, False],
[False, True, True, False, False, False, False],
[False, True, True, False, False, False, False],
[ True, False, False, False, False, False, True]])
>>> local_minima(image, indices=True)
(array([1, 1, 2, 2, 3, 3]), array([1, 2, 1, 2, 0, 6]))
Find local minima without comparing to diagonal pixels (connectivity 1):
>>> local_minima(image, connectivity=1)
array([[False, False, False, False, False, False, False],
[False, True, True, False, True, True, False],
[False, True, True, False, True, True, False],
[ True, False, False, False, False, False, True]])
and exclude minima that border the image edge:
>>> local_minima(image, connectivity=1, allow_borders=False)
array([[False, False, False, False, False, False, False],
[False, True, True, False, True, True, False],
[False, True, True, False, True, True, False],
[False, False, False, False, False, False, False]])
"""
return local_maxima(
image=invert(image),
footprint=footprint,
connectivity=connectivity,
indices=indices,
allow_borders=allow_borders
) | 9adaee108130b760077ba0c3698d07f37454d474 | 750 |
import torch
def kron(a, b):
"""
Kronecker product of matrices a and b with leading batch dimensions.
Batch dimensions are broadcast. The number of them mush
:type a: torch.Tensor
:type b: torch.Tensor
:rtype: torch.Tensor
"""
siz1 = torch.Size(tensor(a.shape[-2:]) * tensor(b.shape[-2:]))
res = a.unsqueeze(-1).unsqueeze(-3) * b.unsqueeze(-2).unsqueeze(-4)
siz0 = res.shape[:-4]
return res.reshape(siz0 + siz1) | b108e123817692f70f0e501c7a515171a3b08270 | 751 |
from datetime import datetime
import pytz
def process_query(request):
"""the function is called upon "news/" URL. it processes the query and calls the apifunction to fetch news articles
from third party news APIs.
If a query is new, it makes a fresh request to third party APIs and returns the query results and adds the
query and query results into the database.
Otehrwise, if the query is repeated, it fetches the results from the database; if it has not passed the expiry time( set to 120s).
If it has passed the expiry team a new request is sent to the third party news APIs and the results are updated in the database.
Args:
request (GET)
Returns:
json: returns the list of query results in the form of json object.
"""
if request.method =='POST':
return JsonResponse({'Response': 'Invalid Request type, please use "GET"'}, status=400)
try:
keyword = request.GET.get('query')
request_time = datetime.datetime.now(pytz.UTC)
obj, created = Query.objects.get_or_create(
keyword = keyword
)
if created==True:
add_to_db(obj,keyword)
elif (request_time - obj.query_time).seconds > EXPIRY_TIME:
obj.query_result.all().delete()
Query.objects.filter(keyword = keyword).update(query_time = request_time)
add_to_db(obj,keyword)
response=[]
for item in obj.query_result.all():
response.append(item.to_dict())
return JsonResponse(response, safe = False, status=200)
except Exception as e:
print(e)
return JsonResponse({'Response': 'Something went wrong'}, status=400) | 000b6e4f47e06e29ff0e0ba1ba9d3311e9f40263 | 752 |
def fix_header(params, recipe, infile=None, header=None,
raise_exception=False, **kwargs):
"""
Instrument specific header fixes are define in pseudo_const.py for an
instrument and called here (function in pseudo_const.py is HEADER_FIXES)
:param params:
:param infile:
:return:
"""
# deal with no header
if header is None:
header = infile.header
hdict = infile.hdict
filename = infile.filename
has_infile = True
else:
has_infile = False
hdict = Header()
filename = None
# load pseudo constants
pconst = constants.pload(params['INSTRUMENT'])
# use pseudo constant to apply any header fixes required (specific to
# a specific instrument) and update the header
try:
header, hdict = pconst.HEADER_FIXES(params=params, recipe=recipe,
header=header, hdict=hdict,
filename=filename,
**kwargs)
except lang.drs_exceptions.DrsHeaderError as e:
if raise_exception:
raise e
else:
eargs = [e.key, e.filename]
WLOG(params, 'error', TextEntry('01-001-00027', args=eargs))
# if the input was an infile return the infile back
if has_infile:
# return the updated infile
infile.header = header
infile.hdict = hdict
return infile
# else return the header (assuming input was a header only)
else:
# else return the header
return header, hdict | 37550406cb76b77ccb1d64e85f5f192989ad4bcd | 753 |
from typing import List
def encode(df: pd.DataFrame,
cols: List[str],
drop_first: bool = True) -> pd.DataFrame:
"""Do a dummy encoding for the columsn specified
Args:
df: DataFrame
cols: List of columns to perform dummy encoding on
drop_first: parameter for dummy encoding
"""
dfs = []
for col in df.columns:
ds = df[col]
if col not in cols:
dfs.append(ds.to_frame())
else:
dfs.append(pd.get_dummies(ds, prefix=col, drop_first=drop_first))
return pd.concat(dfs, axis=1) | 9299378d67c69ebd964a7187431c67c12556c43b | 754 |
from typing import Optional
def binary_search(pool: list, target) -> Optional[int]:
"""Search for a target in a list, using binary search.
Args:
pool (list): a pool of all elements being searched.
target: the target being searched.
Returns:
int: the index of the target.
"""
sorted_pool = sorted(pool)
low = 0
high = len(sorted_pool) - 1
while low + 1 != high:
mid = (low + high) // 2
if sorted_pool[mid] == target:
return mid
if sorted_pool[mid] < target:
low = mid
else:
high = mid
return None | 7e7ef70126e02b3dc706b3b88bd950aa6322904e | 755 |
def load_array(filename):
"""
Given a valid image, load the image and return the pixels as a numpy array
:param filename: The filename as a string
:returns: A numpy array which stores the pixel data from a snowmap
Convention is as follows: pixels that read 0,0,0, 255 are read as snow-free and contain the value 0;
pixels that read 0,0,0,0 assume no data and return -1, and pixels that read (255, 255, 255, 255)
are read as snow and get the value 1
"""
image = Image.open(filename)
image.load()
height, width = image.size
snowmap = np.zeros((height, width), dtype=int)
for row in range(height):
for col in range(width):
a = image.getpixel((row,col))
if a == (0, 0, 0, 255): # This is no snow
snowmap[row, col] = 0
elif a == (0, 0, 0, 0): # this is no data
snowmap[row, col] = -1
elif a == (255, 255, 255, 255): # that's for snow
snowmap[row, col] = 1
else:
raise ValueError("Unknown Pixel value {}".format(a))
return snowmap | 829e97936fb63486bc1c373bdf283f02dbb833bd | 756 |
def create_anchors_3d_stride(feature_size,
anchor_strides,
sizes=[1.6, 3.9, 1.56],
anchor_offsets=[0, -20, -1], # [0.2, -39.8, -1.78],
rotations=[0, 1.57], # np.pi / 2
dtype=np.float32):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
# almost 2x faster than v1
x_stride, y_stride, z_stride = anchor_strides
x_offset, y_offset, z_offset = anchor_offsets
z_centers = np.arange(feature_size[0], dtype=dtype)
y_centers = np.arange(feature_size[1], dtype=dtype)
x_centers = np.arange(feature_size[2], dtype=dtype)
z_centers = z_centers * z_stride + z_offset
y_centers = y_centers * y_stride + y_offset
x_centers = x_centers * x_stride + x_offset
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
rets = np.meshgrid(
x_centers, y_centers, z_centers, rotations, indexing='ij')
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = np.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5]) | 2d6d31a45c5f2f0a9adfe39195ae37719d78fd73 | 757 |
def sample_unit(name='oz'):
"""Create and return a sample unit"""
return Unit.objects.create(name=name) | affa250d46b5b50e69af013035f6b73b45b787b4 | 758 |
def CreateBlendCurve2(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1, multiple=False):
"""
Makes a curve blend between 2 curves at the parameters specified
with the directions and continuities specified
Args:
curve0 (Curve): First curve to blend from
t0 (double): Parameter on first curve for blend endpoint
reverse0 (bool): If false, the blend will go in the natural direction of the curve.
If true, the blend will go in the opposite direction to the curve
continuity0 (BlendContinuity): Continuity for the blend at the start
curve1 (Curve): Second curve to blend from
t1 (double): Parameter on second curve for blend endpoint
reverse1 (bool): If false, the blend will go in the natural direction of the curve.
If true, the blend will go in the opposite direction to the curve
continuity1 (BlendContinuity): Continuity for the blend at the end
Returns:
Curve: The blend curve on success. None on failure
"""
url = "rhino/geometry/curve/createblendcurve-curve_double_bool_blendcontinuity_curve_double_bool_blendcontinuity"
if multiple: url += "?multiple=true"
args = [curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1]
if multiple: args = list(zip(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 08553ffbc307e792f5f794812c6b54d6bc995766 | 759 |
def make_raster_from_images(modeladmin, request, queryset):
"""Make a raster of the selected `ImageMeta`s.
This is an action on `ImageMeta`
"""
imset = make_image_set_from_images(modeladmin, request, queryset)
return _make_raster_from_image_set(imset) | 1d5d855d986ee37875b85ec119dacd51f4af5e69 | 761 |
def is_rotation(first, second):
"""Given two strings, is one a rotation of the other."""
if len(first) != len(second):
return False
double_second = second + second
return first in double_second | f02576761014e1dc395f88f937dfdd0de15508d2 | 762 |
def bin_entities(uri_set, delimiter="/", splitpos=-1):
""" Takes iteratable elemts and splits them according to the position
(splitpos) of the delimiter. The first part is used as a key,
whereas the second appended to a list connected to the former key.
return: dict {key1: [id11, id12, id13, …], key2: […}}
"""
ent_dict = dict()
for res in uri_set:
# split entity up to splitpos using delimiter
entity = delimiter.join(res.split(delimiter)[:splitpos])
# id_ is the remainder
id_ = delimiter.join(res.split(delimiter)[splitpos:])
if entity in ent_dict:
ent_dict[entity].append(id_)
else:
ent_dict[entity] = [id_]
return ent_dict | fcbcddbff909d74fe14fe7cb3a21560c8ca9549a | 763 |
def frequency(state_1, state_2):
""" The frequency interval between state_1 and state_2 in GHz.
"""
return 1e-9 * interval(state_1, state_2) / h | 6276f946e08d9b2e115f004395b5cf420f048c68 | 764 |
from typing import OrderedDict
def dac(dns_val=None) -> OrderedDict:
"""
Domain Availability Checker (DNS lookup)
:param _dns: URL string
:return: Availability [True, False]
"""
ip_values = None
avail = False
if dns_val is None:
raise ValueError("Sorry, DNS is needed")
if isinstance(dns_val, str) is False:
raise TypeError("Sorry, \'DNS\' must be type \'str\'")
try:
output = dns.resolver.resolve(dns_val, 'A')
ip_values = [ipval.to_text() for ipval in output]
except dns.resolver.NXDOMAIN:
avail = True
return OrderedDict([
("DNS", dns_val),
("IP", ip_values),
("AVAIL", avail),
]) | d2c4097686f2edb17fbd674098592ec797ecac46 | 765 |
def display_timestamp(num_seconds):
"""get a string to conveniently display a timestamp"""
seconds = num_seconds % 60
minutes = int(num_seconds / 60) % 60
hrs = int(num_seconds / 3600)
return "{}:{}:{}".format(hrs, minutes, seconds) | bdcc34ade38855df910d5005f6dac9b5e826f543 | 766 |
def get_bloglist(content_dict={}):
"""
输入的指令为-m,则列出博客的文章列表
:param content_dict:
:return:
"""
bloglist = crawlBlog.get_archives(5)
tousername = content_dict["FromUserName"]
fromusername = content_dict["ToUserName"]
return WeixinUtils.make_news(bloglist, tousername, fromusername) | 541fbf7f10f137b995fd0d9a91e8bc651b90b697 | 767 |
import bisect
def get_closest(arr, value):
"""
Return the array values closest to the request value, or +/-inf if
the request value is beyond the range of the array
Parameters
----------
arr : sequence
array of values
value : numeric
Returns
-------
2-tuple: largest value in array less than value (or -inf) and
smallest value in array larger than value (or +inf)
"""
arr_sorted = sorted(arr)
index = bisect(arr_sorted, value)
lower_limit = -np.inf if index == 0 else arr_sorted[index - 1]
upper_limit = np.inf if index == len(arr_sorted) else arr_sorted[index]
return lower_limit, upper_limit | e59216c7d0332ae91e75583b7dc42f956c785e4c | 768 |
def filename(config, key, ext = '.h5', set = ''):
"""
Get the real file name by looking up the key in the config and suffixing.
:param key: key to use in the config
:type key: str
:param ext: extension to use
:type ext: str
:param set: set name
:type set: str
:return: filepath
:rtype: str
"""
name = config[key] + '_'
if set:
name += set + '_'
name += str(config['multiplier']) + '_' + str(config['height']) + 'x' + str(config['width']) + 'x' + str(config['depth'])\
if ext:
name += ext
return name | f389a48e7e06a31722423857814149f474e46316 | 769 |
def isUp():
""" Whether this docker container is up """
return 'True' | e99c32dee79c4df516193c1a9d3fb8d34f8b0abc | 771 |
def rand_perm_(img, x, y, x_max, y_max, kernel, flatten):
"""
Applies INPLACE the random permutation defined in `kernel` to the image
`img` on the zone defined by `x`, `y`, `x_max`, `y_max`
:param img: Input image of dimension (B*C*W*H)
:param x: offset on x axis
:param y: offset on y axis
:param x_max: end of the zone to permute on the x axis
:param y_max: end of the zone to permute on the y axis
:param kernel: LongTensor of dim 1 containing one value for each point in
the zone to permute
:return: the permuted image.
"""
assert img.dim() == 4
if img.size(1) != 1:
raise NotImplementedError('Not Implemented for multi-channel images')
zone = img[:, :, x:x_max, y:y_max].contiguous()
img[:, :, x:x_max, y:y_max] = zone.view(zone.size(0), -1)\
.index_select(1, kernel).view(zone.size())
return img.view(img.size(0), -1) if flatten else img | c838840c2428320825486c0cdacf23f5fb40a9a6 | 772 |
import torch
def test(model, data_loader, use_cuda, loss_func):
"""
The function to evaluate the testing data for the trained classifiers
:param model:
:param data_loader:
:param use_cuda:
:return:
"""
softmax = torch.nn.Softmax(dim=1)
columns = ['participant_id', 'session_id', 'slice_id', 'true_label', 'predicted_label', 'proba0', 'proba1']
results_df = pd.DataFrame(columns=columns)
total_loss = 0
if use_cuda:
model.cuda()
model.eval() # set the model to evaluation mode
torch.cuda.empty_cache()
with torch.no_grad():
for i, data in enumerate(data_loader):
if use_cuda:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
output = model(imgs)
normalized_output = softmax(output)
loss = loss_func(output, labels)
total_loss += loss.item()
_, predicted = torch.max(output.data, 1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
row = [sub, data['session_id'][idx], data['slice_id'][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]
row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns)
results_df = pd.concat([results_df, row_df])
del imgs, labels, output
torch.cuda.empty_cache()
# calculate the balanced accuracy
results = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
results_df.reset_index(inplace=True, drop=True)
results['total_loss'] = total_loss
torch.cuda.empty_cache()
return results_df, results | 37859b0f477326a8a606205c84ea0827d43925d8 | 773 |
def is_fundamental_error(path, error):
"""
Returns True if error is not field related. (So type related, for example.)
"""
return not is_any_field_error(path, error) | 8eef548f9520cbd92ff3989f2f11b180e8099981 | 774 |
def migrate_to_latest(json_dict, info):
"""Migrates the STAC JSON to the latest version
Args:
json_dict (dict): The dict of STAC JSON to identify.
info (STACJSONDescription): The info from
:func:`~pystac.serialzation.identify.identify_stac_object` that describes
the STAC object contained in the JSON dict.
Returns:
dict: A copy of the dict that is migrated to the latest version (the
version that is pystac.STAC_VERSION)
"""
result = deepcopy(json_dict)
version = info.version_range.latest_valid_version()
if version != STAC_VERSION:
_object_migrations[info.object_type](result, version, info)
for ext in info.common_extensions:
_extension_migrations[ext](result, version, info)
result['stac_version'] = STAC_VERSION
return result | 0e159ea565038a4b8fa8b2525c8adc35cbd97dc6 | 776 |
def contact_infectivity_symptomatic_20x50():
"""
Real Name: b'contact infectivity symptomatic 20x50'
Original Eqn: b'contacts per person symptomatic 20x50*infectivity per contact'
Units: b'1/Day'
Limits: (None, None)
Type: component
b''
"""
return contacts_per_person_symptomatic_20x50() * infectivity_per_contact() | b6472192451dcf484cbe7ac802c06750c3d63fff | 777 |
def smart_wn_search(wn, query, pos=None, report_file=None, compact=True, lang='eng', with_eng=True):
""" Search synset in WordNet Gloss Corpus by term"""
if report_file is None:
report_file = TextReport() # Default to stdout
report_file.print("Search Wordnet: Query=%s | POS=%s" % (query, pos))
with wn.ctx() as ctx:
synsets = search_wn_full_text(wn, query, pos=pos, lang=lang, ctx=ctx)
if with_eng and lang != 'eng':
synsets_eng = SynsetCollection()
for synset in synsets:
synset_eng = wn.get_synset(synset.ID, lang='eng', ctx=ctx)
synsets_eng.add(synset_eng)
dump_synsets(synsets, synsets_eng, report_file=report_file, compact=compact)
else:
dump_synsets(synsets, report_file=report_file, compact=compact)
return synsets | 4d600ca77c6e4012225dfc4b1212739542817c83 | 778 |
def _parse_integrator(int_method):
"""parse the integrator method to pass to C"""
#Pick integrator
if int_method.lower() == 'rk4_c':
int_method_c= 1
elif int_method.lower() == 'rk6_c':
int_method_c= 2
elif int_method.lower() == 'symplec4_c':
int_method_c= 3
elif int_method.lower() == 'symplec6_c':
int_method_c= 4
elif int_method.lower() == 'dopr54_c':
int_method_c= 5
elif int_method.lower() == 'dop853_c':
int_method_c= 6
else:
int_method_c= 0
return int_method_c | 20a44b596860fdaa72b5aa37c7853bbcf47c3c91 | 779 |
import time
def get_diffusion_features(repo_path, branch):
"""
Function that extracts the first commits diffusion features. It then starts
a number of processes(equal to the number of cores on the computer), and then
distributes the remaining commits to them.
"""
repo = Repository(repo_path)
head = repo.references.get(branch)
commits = list(
repo.walk(head.target, GIT_SORT_TOPOLOGICAL | GIT_SORT_REVERSE))
initial = commits[0]
init_tree = initial.tree
# Count inital total lines of code
init_total_additions = 0
init_file_addtions = []
init_subdirectories = 0
init_modules = 0
for entry in init_tree:
if entry.type == "tree":
added, file_additions, subdirectories = parse_tree(entry, repo)
init_modules += 1
init_file_addtions.extend(file_additions)
init_total_additions += added
init_subdirectories += subdirectories
else:
try:
additions = len(str(repo[entry.id]).split('\n'))
init_total_additions += additions
init_file_addtions.append(additions)
except:
continue
diffusion_features = []
diffusion_features.append(initial.hex)
diffusion_features.append(init_subdirectories)
diffusion_features.append(init_modules)
diffusion_features.append(
count_entropy(init_file_addtions, init_total_additions))
# Check how many processes that could be spawned
cpus = cpu_count()
print("Using {} cpus...".format(cpus))
# Divide the commits eqaully between the processes.
quote, remainder = divmod(len(commits), cpus)
processes = [
Process(
target=parse_diffusion_features,
args=(i, repo_path, branch, i * quote + min(i, remainder),
(i + 1) * quote + min(i + 1, remainder))) for i in range(cpus)
]
for process in processes:
process.start()
start_time = time.time()
for process in processes:
process.join()
end_time = time.time()
print("Done")
print("Overall processing time {}".format(end_time - start_time))
# Assemble the results
features = []
for _, feat in RES.items():
features.extend(feat)
features = list(reversed(features))
features.append(diffusion_features)
return features | cd94b722c0f98d55206a5c5cad32e6f855ae304d | 780 |
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
"""
Builds model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens. And creates a mask from the two sequences passed
to be used in a sequence-pair classification task.
A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If only one sequence, only returns the first portion of the mask (0's).
Args:
example(obj:`list[str]`): List of input data, containing text and label if it have label.
tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer`
which contains most of the methods. Users should refer to the superclass for more information regarding methods.
label_list(obj:`list[str]`): All the labels that the data has.
max_seq_len(obj:`int`): The maximum total input sequence length after tokenization.
Sequences longer than this will be truncated, sequences shorter will be padded.
is_test(obj:`False`, defaults to `False`): Whether the example contains label or not.
Returns:
input_ids(obj:`list[int]`): The list of token ids.
token_type_ids(obj: `list[int]`): List of sequence pair mask.
label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test.
"""
text = example
encoded_inputs = tokenizer(text=text, max_seq_len=max_seq_length)
input_ids = encoded_inputs["input_ids"]
token_type_ids = encoded_inputs["token_type_ids"]
if not is_test:
# create label maps
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
label = label_map[label]
label = np.array([label], dtype="int64")
return input_ids, token_type_ids, label
else:
return input_ids, token_type_ids | 986503a34f055b890f5979ca146708c1b45a45fe | 781 |
import yaml
import json
def load_config_file(filepath):
"""
Load a configuration as an options dict.
Format of the file is given with filepath extension.
:param filepath:
:type filepath:
:return:
:rtype:
"""
if filepath.endswith('.json'):
with open(filepath) as config_file_data:
return json.load(config_file_data)
if filepath.endswith('.yaml') or filepath.endswith('.yml'):
try:
with open(filepath) as config_file_data:
return yaml.load(config_file_data)
except ImportError: # pragma: no cover
raise ConfigurationException('Configuration file extension is not supported. '
'PyYAML should be installed to support "%s" file' % (
filepath,))
try:
# Try to load input as JSON
return json.loads(filepath)
except: # pylint: disable=bare-except
pass
raise ConfigurationException('Configuration file extension is not supported for "%s" file.' % (filepath,)) | 60b5ea592f8f101be279cfe5897e70fbef11f9b0 | 782 |
def coordinator_setup(start_heart=True):
"""
Sets up the client for the coordination service.
URL examples for connection:
zake://
file:///tmp
redis://username:password@host:port
mysql://username:password@host:port/dbname
"""
url = cfg.CONF.coordination.url
lock_timeout = cfg.CONF.coordination.lock_timeout
member_id = get_member_id()
if url:
coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout)
else:
# Use a no-op backend
# Note: We don't use tooz to obtain a reference since for this to work we would need to
# register a plugin inside setup.py entry_point and use python setup.py develop for tests
# to work
coordinator = NoOpDriver(member_id)
coordinator.start(start_heart=start_heart)
return coordinator | b39b736e39b6c98badd148ac111b01dae85eea2f | 783 |
def _to_jraph(example):
"""Converts an example graph to jraph.GraphsTuple."""
example = jax.tree_map(lambda x: x._numpy(), example) # pylint: disable=protected-access
edge_feat = example['edge_feat']
node_feat = example['node_feat']
edge_index = example['edge_index']
labels = example['labels']
num_nodes = example['num_nodes']
senders = edge_index[:, 0]
receivers = edge_index[:, 1]
return jraph.GraphsTuple(
n_node=num_nodes,
n_edge=np.array([len(edge_index) * 2]),
nodes=node_feat,
edges=np.concatenate([edge_feat, edge_feat]),
# Make the edges bidirectional
senders=np.concatenate([senders, receivers]),
receivers=np.concatenate([receivers, senders]),
# Keep the labels with the graph for batching. They will be removed
# in the processed batch.
globals=np.expand_dims(labels, axis=0)) | e3bc4bb468ae4e6dfbb4387c0a913a87ba76ac26 | 784 |
def get_urls(page_links):
"""Insert page links, return list of url addresses of the json"""
urls = []
for link in page_links:
link1 = link.replace('v3', 'VV')
game_id = ''.join([char for char in link1 if char in list(map(str, list(range(10))))])
json_url = f'http://www.afa.com.ar/deposito/html/v3/htmlCenter/data/deportes/futbol/primeraa/events/{game_id}.json'
urls.append(json_url)
return urls | 68c6796ad5a77676674252a0060776eabc4fb8e0 | 785 |
def KK_RC79_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
R59 = params["R59"]
R60 = params["R60"]
R61 = params["R61"]
R62 = params["R62"]
R63 = params["R63"]
R64 = params["R64"]
R65 = params["R65"]
R66 = params["R66"]
R67 = params["R67"]
R68 = params["R68"]
R69 = params["R69"]
R70 = params["R70"]
R71 = params["R71"]
R72 = params["R72"]
R73 = params["R73"]
R74 = params["R74"]
R75 = params["R75"]
R76 = params["R76"]
R77 = params["R77"]
R78 = params["R78"]
R79 = params["R79"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
+ (R56 / (1 + w * 1j * t_values[55]))
+ (R57 / (1 + w * 1j * t_values[56]))
+ (R58 / (1 + w * 1j * t_values[57]))
+ (R59 / (1 + w * 1j * t_values[58]))
+ (R60 / (1 + w * 1j * t_values[59]))
+ (R61 / (1 + w * 1j * t_values[60]))
+ (R62 / (1 + w * 1j * t_values[61]))
+ (R63 / (1 + w * 1j * t_values[62]))
+ (R64 / (1 + w * 1j * t_values[63]))
+ (R65 / (1 + w * 1j * t_values[64]))
+ (R66 / (1 + w * 1j * t_values[65]))
+ (R67 / (1 + w * 1j * t_values[66]))
+ (R68 / (1 + w * 1j * t_values[67]))
+ (R69 / (1 + w * 1j * t_values[68]))
+ (R70 / (1 + w * 1j * t_values[69]))
+ (R71 / (1 + w * 1j * t_values[70]))
+ (R72 / (1 + w * 1j * t_values[71]))
+ (R73 / (1 + w * 1j * t_values[72]))
+ (R74 / (1 + w * 1j * t_values[73]))
+ (R75 / (1 + w * 1j * t_values[74]))
+ (R76 / (1 + w * 1j * t_values[75]))
+ (R77 / (1 + w * 1j * t_values[76]))
+ (R78 / (1 + w * 1j * t_values[77]))
+ (R79 / (1 + w * 1j * t_values[78]))
) | 386f84adf3dd4a1b122ef1ef9572f1d3733fb94c | 786 |
def _resampling_from_str(resampling: str) -> Resampling:
"""
Match a rio.warp.Resampling enum from a string representation.
:param resampling: A case-sensitive string matching the resampling enum (e.g. 'cubic_spline')
:raises ValueError: If no matching Resampling enum was found.
:returns: A rio.warp.Resampling enum that matches the given string.
"""
# Try to match the string version of the resampling method with a rio Resampling enum name
for method in rio.warp.Resampling:
if str(method).replace("Resampling.", "") == resampling:
resampling_method = method
break
# If no match was found, raise an error.
else:
raise ValueError(
f"'{resampling}' is not a valid rasterio.warp.Resampling method. "
f"Valid methods: {[str(method).replace('Resampling.', '') for method in rio.warp.Resampling]}"
)
return resampling_method | e0e4020361313205fd0afc90e19bb02ebe0d5abb | 787 |
def _call_twitter_api(query):
"""helper function to call twitter api
Args:
query (str): query string made by _preprocess_query function
Returns:
generator: response object in generator
"""
return sntwitter.TwitterSearchScraper(query=query).get_items() | 3b75150e7a83e7dfdbc5bd836745af13d3b5a90f | 788 |
from typing import List
import re
def parse_superfamilies(filepath: str) -> List[Method]:
"""
Parse the CathNames.txt file distributed with CATH-Gene3D releases
:param filepath:
:return:
"""
signatures = []
reg = re.compile(r"^(\d\.\d+\.\d+\.\d+)\s+([a-zA-Z0-9]+)\s+:(.*)$")
with open(filepath, "rt") as fh:
for line in fh:
if line[0] == '#':
continue
m = reg.match(line)
if m is None:
continue
supfam, model, name = m.groups()
accession = f"{_PREFIX}{supfam}"
m = Method(accession, _TYPE_SUPFAM, description=name)
signatures.append(m)
return signatures | db91d288133ed64b27ffdc1852ca5d62390792eb | 789 |
def Weekday(datetime):
"""Returns a weekday for display e.g. Mon."""
return datetime.strftime('%a') | bae413f0fa86f9e27bd6d7f6ee4480a6ddd564e7 | 790 |