content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _load_edge_data(graph, regions):
"""Load and return all relevant edges from the graph."""
has_seat = _load_edges_from_query(
graph,
'SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Seat')
# The edges in the existing dataset point from parent to child region / settlement.
# In the desired dataset, we want the edge to be the other way, so we switch
# the "in_rid" and "out_rid" names.
has_parent_region = _load_edges_from_query(
graph, '''
SELECT inV().@rid AS out_rid, outV().@rid AS in_rid FROM E WHERE
(
@this INSTANCEOF "Has_Castles" OR
@this INSTANCEOF "Has_Cities" OR
@this INSTANCEOF "Has_Towns" OR
@this INSTANCEOF "Has_Villages" OR
@this INSTANCEOF "Has_Regional+capital" OR
@this INSTANCEOF "Has_Places"
) AND (
inV() INSTANCEOF "Region" OR inV() INSTANCEOF "Settlement"
) AND (
outV() INSTANCEOF "Region" OR outV() INSTANCEOF "Settlement"
)
''') + _load_missing_region_edges(regions)
lives_in = _load_edges_from_query(
graph, '''
SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Place WHERE (
(inV() INSTANCEOF "Region" OR inV() INSTANCEOF "Settlement") AND
outV() INSTANCEOF "Character"
)''')
owes_allegiance_to = _load_edges_from_query(
graph, '''
SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Allegiance WHERE (
(
inV() INSTANCEOF "Character" OR
inV() INSTANCEOF "Noblehouse" OR
inV() INSTANCEOF "Noble_house"
) AND (
outV() INSTANCEOF "Character" OR
outV() INSTANCEOF "Noblehouse" OR
outV() INSTANCEOF "Noble_house"
)
)''')
return set(has_seat), set(has_parent_region), set(lives_in), set(owes_allegiance_to) | d7a002c6214b614e95edc42d850dc9df51a26462 | 3,658,725 |
def get_story_assignee(jira_sheet, process):
""" Accessor for Story Assignee
Accessor method for retrieving the value for Story Assignee on the
JIRA Stories Sheet.
There is a check to make certain the process in question is amongst those
qualified to exist.
Args:
jira_sheet: A variable holding an Excel Workbook sheet in memory.
process: A variable holding the process of an Issue.
Returns:
A string value of the Parent
"""
if process in PROCESS_DICT:
return (jira_sheet[PROCESS_DICT.get(process) + "6"].value)
else:
print("""Error: " + process + " is an invalid process.
The following QE processes are acceptable: Complaints, Inquiry,
CAPA, Quality Event, Change Control.\n""") | 3f49c10e540b001cf0f4eebf69a1821a16ec9476 | 3,658,726 |
def predict_mhalo(obs_dsigma, mock_use, logms_mod_tot, logms_mod_inn, sig_logms=None):
"""Halo mass and its scatter in each bin.
Parameters
----------
obs_dsigma: list
List of observed DeltaSigma profiles.
mock_use: numpy array
UniverseMachine mock catalog.
logms_mod_tot : ndarray
Total stellar mass (e.g. M100) predicted by UM.
logms_mod_inn : ndarray
Inner stellar mass (e.g. M10) predicted by UM.
sig_logms: numpy array, optional
Uncertainties of stellar mass. Default: None
"""
# The mock catalog and precomputed mass files for subsamples
return [get_mean_mhalo(mock_use, obs_prof, logms_mod_tot, logms_mod_inn, sig_logms=sig_logms)
for obs_prof in obs_dsigma] | 0c68d773155f997d85361ae663bf0eaae09be258 | 3,658,727 |
def create_agent_model(env, lr=1e-4, h_size=128, epsilon=0.2, beta=1e-3, max_step=5e6, normalize=False, num_layers=2):
"""
Takes a Unity environment and model-specific hyper-parameters and returns the
appropriate PPO agent model for the environment.
:param env: a Unity environment.
:param lr: Learning rate.
:param h_size: Size of hidden layers/
:param epsilon: Value for policy-divergence threshold.
:param beta: Strength of entropy regularization.
:return: a sub-class of PPOAgent tailored to the environment.
:param max_step: Total number of training steps.
"""
if num_layers < 1: num_layers = 1
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
if brain.action_space_type == "continuous":
return ContinuousControlModel(lr, brain, h_size, epsilon, max_step, normalize, num_layers)
if brain.action_space_type == "discrete":
return DiscreteControlModel(lr, brain, h_size, epsilon, beta, max_step, normalize, num_layers) | ef43219e9e12ba46c81ed3a39ecb1b82e8953585 | 3,658,728 |
from typing import List
def decode_to_sequence(encoded_sequence: Bytes) -> List[RLP]:
"""
Decodes a rlp encoded byte stream assuming that the decoded data
should be of type `Sequence` of objects.
Parameters
----------
encoded_sequence :
An RLP encoded Sequence.
Returns
-------
decoded : `Sequence[RLP]`
Sequence of objects decoded from `encoded_sequence`.
"""
if encoded_sequence[0] <= 0xF7:
len_joined_encodings = encoded_sequence[0] - 0xC0
ensure(len_joined_encodings < len(encoded_sequence))
joined_encodings = encoded_sequence[1 : 1 + len_joined_encodings]
else:
joined_encodings_start_idx = 1 + encoded_sequence[0] - 0xF7
ensure(joined_encodings_start_idx - 1 < len(encoded_sequence))
# Expectation is that the big endian bytes shouldn't start with 0
# while trying to decode using RLP, in which case is an error.
ensure(encoded_sequence[1] != 0)
len_joined_encodings = Uint.from_be_bytes(
encoded_sequence[1:joined_encodings_start_idx]
)
ensure(len_joined_encodings >= 0x38)
joined_encodings_end_idx = (
joined_encodings_start_idx + len_joined_encodings
)
ensure(joined_encodings_end_idx - 1 < len(encoded_sequence))
joined_encodings = encoded_sequence[
joined_encodings_start_idx:joined_encodings_end_idx
]
return decode_joined_encodings(joined_encodings) | cb33dd9da8deb2096ce3ad205a743c4c22c0f4c8 | 3,658,729 |
def list_field_override_choices(override_map=None, html=True):
"""
This returns either a list of allowable choices, or an HTML-formatted unordered list (default).
"""
if override_map:
if html:
choices = '<b>These are the allowable field override choices for field name:<ul>'
else:
choices = []
for item in override_map:
if html:
choices += '<li>{}</li>'.format(item['field'])
else:
choices.append(item['field'])
return choices
return None | 9b29493af651d95d67f8bd2c4283f53e737e7c5c | 3,658,730 |
import six
def _safe_resolve_url(url):
"""
Previously, resolve_url_lazy would fail if the url was a unicode object.
See <https://github.com/fusionbox/django-authtools/issues/13> for more
information.
Thanks to GitHub user alanwj for pointing out the problem and providing
this solution.
"""
return six.text_type(resolve_url(url)) | 9b06bc346ebe03b1e5209aa8c108b76aae895089 | 3,658,733 |
def get_metrics(
reset: bool = False, include_custom: bool = True, raise_errors: bool = True,
) -> pd.DataFrame:
"""
Returns table of available metrics used for CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> all_metrics = get_metrics()
reset: bool, default = False
When True, will reset all changes made using the ``add_metric``
and ``remove_metric`` function.
include_custom: bool, default = True
Whether to include user added (custom) metrics or not.
raise_errors: bool, default = True
If False, will suppress all exceptions, ignoring models that
couldn't be created.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.get_metrics(
reset=reset, include_custom=include_custom, raise_errors=raise_errors,
) | 1d2ed9372aa6f26cd740e6987a2e94baaef647dc | 3,658,734 |
def get_unique_wikilinks(filepath):
"""Get UNIQUE wikilinks from a md file.
The links' order of appearance in the file IS preserved in the output.
This accounts for:
- Aliases / alt text, so [[Lorem ipsum|L.I.]]
will be represented as 'Lorem ipsum'.
- Header text links, so [[Lorem ipsum#Dummy text]]
will be represented as 'Lorem ipsum'.
Args:
filepath (pathlib Path): Path object representing the file from
which info will be extracted.
Returns:
list of strings
"""
plaintext = _get_ascii_plaintext_from_md_file(filepath, remove_code=True)
wikilinks = _get_unique_wikilinks(plaintext, remove_aliases=True)
return wikilinks | ca02428942d8a555d606a5c4b8190859917c22c7 | 3,658,735 |
def parse_single_example(serialized_example, params):
"""Parses a singel serialized TFExample string."""
decoder = tf_example_decoder.TfExampleDecoder()
data = decoder.decode(serialized_example)
image = data['image']
source_id = data['source_id']
source_id = dataloader_utils.process_source_id(source_id)
height = data['height']
width = data['width']
boxes = data['groundtruth_boxes']
boxes = box_utils.denormalize_boxes(boxes, tf.shape(image)[:2])
classes = data['groundtruth_classes']
is_crowds = data['groundtruth_is_crowd']
areas = data['groundtruth_area']
image = input_utils.normalize_image(image)
image, image_info = input_utils.resize_and_crop_image(
image,
params.retinanet_parser.output_size,
padded_size=input_utils.compute_padded_size(
params.retinanet_parser.output_size, 2 ** params.anchor.max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
anchors = anchor.Anchor(
params.anchor.min_level,
params.anchor.max_level,
params.anchor.num_scales,
params.anchor.aspect_ratios,
params.anchor.anchor_size,
image.get_shape().as_list()[:2])
labels = {
'anchor_boxes': anchors.multilevel_boxes,
'image_info': image_info,
}
groundtruths = {
'source_id': source_id,
'height': height,
'width': width,
'num_detections': tf.shape(classes),
'boxes': boxes,
'classes': classes,
'areas': areas,
'is_crowds': tf.cast(is_crowds, tf.int32),
}
return image, labels, groundtruths | e274a6ebfe7e7aa51dc7bc6b779ef222081a7e47 | 3,658,736 |
def eval_blocking(lamb, mu, k):
"""Finds the blocking probability of a queue.
Args:
lamb (float): The rate into the queue.
mu (float): The rate out of the queue.
k (int): Maximum number of customers able to be in the queue.
"""
rho = lamb/mu
return rho**k*((1-rho)/(1-rho**(k+1))) | 4c1ea7f5f7984fb24c85a5c1c6c77cdbc2e1e76a | 3,658,737 |
def get_dependent_columns(covar):
"""
Get the list of dependent columns
:param covar: The covariance matrix
:return: Dependent columns
"""
ind_columns = (np.where(~covar.any(axis=1))[0]).tolist()
dep_columns_z = []
for i in range(0, covar.shape[0]):
if i not in ind_columns:
dep_columns_z.append(i)
return exclude_linear_combination_variables(covar, dep_columns_z) | d0145649ce685a4d609809a57d374b1e362c303e | 3,658,738 |
def results_to_answers(guess_hints, answers):
"""Provide remaining valid answers matching a list of guesses and
corresponding hints
"""
gh_stack = guess_hints.copy()
new_ans = answers.copy()
while len(gh_stack) > 0:
gh = gh_stack.pop()
guess = gh[0]
hint = gh[1]
new_ans = answers_guess_hint_to_answers(new_ans, guess, hint)
return new_ans | 243cbaeb2d36c66e49cd570c1487bbca7636cd2c | 3,658,739 |
from typing import Optional
def get_archive_map(data: DataFrame, row_col: Optional[str] = "ROW") -> Series:
"""
Get a series mapping object names to archive names
:param data: Dataset with archive names as ARCHIVE column and object names
in index
:type data: DataFrame
:param row_col: column with rol index, defaults to "ROW". Set to None if
not applicable
:type row_col: str, optional
:return: Series mapping object names to archive names
:rtype: Series
"""
archive_map = data.ARCHIVE.drop_duplicates()
if row_col is not None:
archive_map = archive_map.droplevel(row_col)
return archive_map | 2d66c55c64dab89e7523778411a7bf70ac784bf6 | 3,658,740 |
from typing import Iterable
import math
def gain_ratio(x_mat: ndarray, y_row: ndarray, prop: int, prop_values: Iterable, gain_value: float = None) -> float:
"""
计算使用属性 prop 对样本集进行划分的信息增益率,值越大表示使用属性 prop 进行划分
所获得的纯度提升越大。此方法对可取值数目较少的属性有所偏好
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出向量。是一个只有一个维度的行向量,要和 x_mat 匹配
:param prop: 进行划分的属性
:param prop_values: 属性的取值
:param gain_value: 信息增益。给出该值是为了避免重复计算。
:return: 信息增益率
"""
prop_x = x_mat[:, prop]
prop_y_num = []
for v in prop_values:
prop_y_num.append(len(y_row[prop_x == v]))
m = y_row.shape[0]
intrinsic_value = 0
for num in prop_y_num:
tmp = num / m
intrinsic_value = intrinsic_value - tmp * (0 if math.isclose(tmp, 0) else math.log2(tmp))
if gain_value is None:
gain_value = gain(x_mat, y_row, prop, prop_values)
return gain_value / intrinsic_value | 08a26ba4c3dc7712ca515f128f7e3039f005b993 | 3,658,741 |
def _LengthError(e: ByteList):
"""Check if the length of the EDID is a multiple of 128.
Args:
e: The list form of the EDID to be checked.
Returns:
A list of error.Error objects, or None.
"""
if not len(e) % 128:
return None
else:
return [
error.Error(
"Overall EDID",
"Invalid length",
"Length % 128 = 0",
"Length %% 128 = %d" % (len(e) % 128),
)
] | 940b6f4b2648eefe79afe69f623b0f1e02583ce1 | 3,658,742 |
def __single_auc_score__(feature_i,
clf,
cv_indices,
X,
y,
sample_weight=None):
"""Method determining the 'area under curve' for a single test set.
This function is intended for internal use.
Parameters
----------
feature_i: int
Index of the tested feature.
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
cv_indices: list of tuples
Indices for all the cross validation steps. They are explicit
pass, so all test sets use the same splitting.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contain the sample weights.
None in the case of no weights.
Returns
-------
feature_i: int
Index of the tested feature. It is need as a return value for
asynchronous parallel processing
auc_score: float
Returns calculated auc score.
"""
y_pred = np.zeros_like(y, dtype=float)
for i, [train_idx, test_idx] in enumerate(cv_indices):
X_train = X[train_idx]
X_test = X[test_idx]
y_train = y[train_idx]
if sample_weight is None:
sample_weight_train = None
sample_weight_test = None
else:
sample_weight_train = sample_weight[train_idx]
sample_weight_test = sample_weight[test_idx]
clf = clf.fit(X=X_train,
y=y_train,
sample_weight=sample_weight_train)
y_pred[test_idx] = clf.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y, y_pred, sample_weight=sample_weight_test)
return feature_i, auc_score | 51738218bde23aeb3633bcfa47dff918af29c4cd | 3,658,743 |
def IntCurveSurface_ThePolygonToolOfHInter_Bounding(*args):
"""
:param thePolygon:
:type thePolygon: IntCurveSurface_ThePolygonOfHInter &
:rtype: Bnd_Box
"""
return _IntCurveSurface.IntCurveSurface_ThePolygonToolOfHInter_Bounding(*args) | 294da704fcc9a59a8e7fc2042d050255aa45accb | 3,658,744 |
def identity_show(client, resource_group_name, account_name):
"""
Show the identity for Azure Cognitive Services account.
"""
sa = client.get(resource_group_name, account_name)
return sa.identity if sa.identity else {} | 19018c895f3fdf0b2b79788547bf80a400724336 | 3,658,745 |
import re
def normalize_country_code(country_code):
""" Normalize country codes a bit by making capitalization consistent and
removing trailing comments (and other words). """
if not country_code:
return country_code
country_code = re.match(r'^(\w+)', country_code).group(1)
return country_code.upper() | 37dce64b62ae4ec20cb2d9b10c66beeba73c5683 | 3,658,747 |
import math
def get_angle(p1, p2):
"""Get the angle between two points."""
return math.atan2(p2[1] - p1[1], p2[0] - p1[0]) | a29ea1ed74a6c071cf314d1c38c6e2f920bd1c3a | 3,658,748 |
def per_application():
"""
:return:
a seeder function that always returns 1, ensuring at most one delegate is ever spawned
for the entire application.
"""
return lambda msg: 1 | 7ecc568846ab484557e768ad372f4faf85238401 | 3,658,749 |
import requests
from bs4 import BeautifulSoup
def get_job_information(url):
""" Uses bs4 to grab the information from each job container based on the url.
Parameters
----------
url : str
Career builder url of any job
Returns
------
job_data : dict
Contains Job Name, Company Name, Job Location, Description, Skills and apply link.
"""
website = requests.get(url).text
job_soup = BeautifulSoup(website, 'html.parser')
job_name = "N/A"
try:
job_name = job_soup.select('h2.h3')[0].getText()
except Exception as err:
print(f"The job tile could not be selected properly")
print(err)
print(f'Skipping {url}...')
company_name = "N/A"
try:
company_name = job_soup.select('.data-details > span:nth-child(1)')[0].getText()
except Exception as err:
print(f"The company name could not be selected properly")
print(err)
print(f'Skipping {url}...')
job_location = "N/A"
try:
job_location = job_soup.select('.data-details > span:nth-child(2)')[0].getText()
except Exception as err:
print(f"The location could not be selected properly")
print(err)
print(f'Skipping {url}...')
job_description = job_soup.select('#jdp_description > div.col-2 > div.col.big.col-mobile-full > p')
job_description_2 = job_soup.select('#jdp_description > div:nth-child(1) > div:nth-child(1)')
desc = [ ]
for idx, paragraph in enumerate(job_description):
desc.append(job_description[idx].text)
if len(desc) == 0:
for idx, paragraph in enumerate(job_description_2):
desc.append(job_description_2[idx].text)
job_skills = [ ]
skills_container = job_soup.findAll("div", {"class": "check-bubble"})
for idx, skill in enumerate(skills_container):
job_skills.append(skills_container[idx].text)
job_data = {'Job Title': job_name,
'Company': company_name,
'Location': job_location,
'Description': desc,
'Skills': job_skills,
'Application Url': url}
return job_data | a5c0b53338dbacc7fe0e7c7eb91b66855968af2b | 3,658,750 |
def idiv(self, other):
"""Compute the element-wise division.
Parameters
----------
other : Union[dragon.Tensor, number]
The value to divide.
Returns
-------
dragon.Tensor
The self.
See Also
--------
`dragon.math.div(...)`_
"""
return _apply_binary_op([self, other], 'Div', [self]) | 05afbc883ec835e06cceaa9a13119fbac0df8f5c | 3,658,751 |
def getAudioMetadata(fileRef):
"""Extract metadata for audio file"""
args = [config.mediaInfoExe]
args.append("--Output=EBUCore")
args.append(fileRef)
# Command line as string (used for logging purposes only)
cmdStr = " ".join(args)
status, out, err = shared.launchSubProcess(args)
# Configure XML parser to get rid of blank lines in MediaInfo output
parser = etree.XMLParser(remove_blank_text=True)
# Parse string to element
outElt = etree.XML(out.encode('utf-8'), parser=parser)
# Main results to dictionary
dictOut = {}
dictOut["cmdStr"] = cmdStr
dictOut["status"] = status
dictOut["outElt"] = outElt
dictOut["stderr"] = err
return dictOut | 4f954d45e6b029b22001a02e49ad453a2f572bb8 | 3,658,752 |
def simulation_test(**kwargs):
"""Decorate a unit test and mark it as a simulation test.
The arguments provided to this decorator will be passed to
:py:meth:`~reviewbot.tools.testing.testcases.BaseToolTestCase
.setup_simulation_test`.
Args:
**kwargs (dict):
Keyword arguments to pass during setup.
Returns:
callable:
The new unit test function.
"""
def _dec(func):
func.simulation_setup_kwargs = kwargs
return func
return _dec | 56aa51374e66bb765bfc3d4da51e3254d06c0b55 | 3,658,753 |
def update_action_state():
""" :type action: dart.model.action.Action """
# we receive a list of {action_id, action_status, workflow_instance_id/status}
# We will update the database for each such entry
try:
action_status_updates = request.get_json()
_logger.info("AWS_Batch: extracted json from request: {0}".format(action_status_updates))
except Exception as err:
_logger.error("AWS_Batch: Failed to extract json from request")
return {'result': str(err)}, 500
try:
for action_status in action_status_updates:
# updating the action state
current_action = action_service().get_action(action_status['action_id'])
if should_update(action_status['action_status'], current_action.data.state):
_logger.info("AWS_Batch: Updating action={0} from {1} to state {2}".format(current_action.id, current_action.data.state, action_status['action_status']))
action_service().update_action_state(current_action, action_status['action_status'])
# if we receive a workflow_instance_id (not empty) then we need to set workflow_instance status.
# we may need to set workflow and datastore status if they need to be deactivated on failure.
if action_status.get('workflow_instance_id'):
wfs = action_status.get('workflow_instance_status')
wf_instance_status = WorkflowInstanceState.FAILED if (wfs == 'FAILED') else WorkflowInstanceState.COMPLETED
_logger.info("AWS_Batch: Updating workflow_instance={0} to state {1}".format(action_status.get('workflow_instance_id'), wf_instance_status))
# Updating workflow_instance with the status sent (success or failure).
wf_instance = workflow_service().get_workflow_instance(action_status.get('workflow_instance_id'))
workflow_service().update_workflow_instance_state(wf_instance, wf_instance_status)
# check if need to deactivate workflow and datastore.
if wf_instance_status == WorkflowInstanceState.FAILED:
workflow_id = wf_instance.data.workflow_id
master_workflow = workflow_service().get_workflow(workflow_id)
# Failed action with deactivate on_failure should deactivate the current workflow.
if current_action.data.on_failure == ActionOnFailure.HALT:
_logger.info("AWS_Batch: Action in workflow={0} failed. Halting on failure and remaining in state {2}".format(master_workflow.id, WorkflowState.ACTIVE))
elif current_action.data.on_failure == ActionOnFailure.DEACTIVATE:
_logger.info("AWS_Batch: Updating workflow={0} to state {2}".format(master_workflow.id, WorkflowState.INACTIVE))
workflow_service().update_workflow_state(master_workflow, WorkflowState.INACTIVE)
if master_workflow.data.on_failure == WorkflowOnFailure.DEACTIVATE:
datastore_id = master_workflow.data.datastore_id
_logger.info("AWS_Batch: Updating datastore={0} to state {2}".format(datastore_id, DatastoreState.INACTIVE))
datastore = datastore_service().get_datastore(datastore_id)
datastore_service().update_datastore_state(datastore, DatastoreState.INACTIVE)
except Exception as err:
_logger.error("AWS_Batch: Failed to update action state. err= {0}".format(err))
return {'result': str(err)}, 501
# if all pass we send success status (200) otherwise we will try again later.
return {'result': "OK"}, 200 | f89142b6877f615cce253d727c001737729394fa | 3,658,754 |
def page_not_found():
"""Directs to error page if user is not logged in.
:return: HTML file for error page.
"""
error = 'You must be logged in to view this page.'
return render_template('error.html', error=error) | ff3cc2c369154bec1303658bb3c691de448d8231 | 3,658,755 |
def mtf_toy_model_parallel():
"""Set of hyperparameters."""
hparams = mtf_toy_base()
hparams.add_hparam("layout", "hidden:0")
return hparams | 74c01e9f8c68f07d332119fd7cead21b92e4de84 | 3,658,756 |
from typing import Union
from pathlib import Path
def to_dataframe(sas7bdat_file: Union[str, Path]) -> pd.DataFrame:
"""Converts a sas7bdat and/or xpt file into a pandas dataframe.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
return:
A pandas dataframe containing the data from the sas7bdat file.
"""
df = pd.read_sas(sas7bdat_file)
# convert binary strings to utf-8
str_df = df.select_dtypes([np.dtype(object)])
if len(str_df.columns) > 0:
str_df = str_df.stack().str.decode("utf-8").unstack()
for col in str_df:
df[col] = str_df[col]
# end conversion to utf-8
return df | 70564f16c43a6c6fdaf65841ee1d0c48d8f550f2 | 3,658,757 |
def shift_scale_rmsf(rmsf_double, phi, cellsize, ccomp, faraday_peak):
"""Shift and scale the RMSF, to the parameters of the found clean component.
Args:
rmsf_double (numpy array): double sized array of complex point spread
function values in Faraday space.
phi (numpy array): array of Faraday depths.
cellsize (float): advised cellsize in Faraday space.
ccomp (float): the complex-valued clean component.
faraday_peak (int): the index of the peak of the clean component.
Returns:
ccomp*rmsf_shifted: the shifted and scaled RMSF.
"""
# Calculate the integer number of pixels required to shift the RMSF:
faraday_shift = phi[faraday_peak]/cellsize
faraday_shift = faraday_shift.astype(int)
# Shift the RMSF and pad with zeros based upon its sign:
if faraday_shift > 0:
rmsf_shifted = np.roll(rmsf_double, faraday_shift)
rmsf_shifted[0:faraday_shift] = 0.0
elif faraday_shift < 0:
rmsf_shifted = np.roll(rmsf_double, faraday_shift)
rmsf_shifted[len(rmsf_shifted)+faraday_shift:len(rmsf_shifted)] = 0.0
elif faraday_shift == 0:
rmsf_shifted = np.copy(rmsf_double)
# The shifted RMSF is double the width of the sampled Faraday space
# to ensure the shifted beam is subtracted correctly.
# Truncate the RMSF so it has same dimension as sampled parameter space:
rmsf_len = len(rmsf_shifted)
rmsf_shifted = np.delete(rmsf_shifted, np.arange((3*((rmsf_len-1)//4))+1,
rmsf_len))
rmsf_shifted = np.delete(rmsf_shifted, np.arange(0, ((rmsf_len-1)//4)))
# Scale the RMSF by the magnitude of the clean component:
return ccomp*rmsf_shifted | d658cfece87276075b7c53b987772906908b5b80 | 3,658,758 |
def region_filter(annos, annotation):
"""filter for Region annotations.
The 'time' parameter can match either 'time' or 'timeEnd' parameters.
"""
result = []
for anno in annos:
time = annotation.get("time")
timeEnd = annotation.get("timeEnd")
for key in ['text', 'tags']:
if anno.get(key) != annotation.get(key):
continue
if anno.get("regionId") == 0:
continue
if anno.get("time") not in [time, timeEnd]:
continue
result.append(anno)
return result | 3ca4c6ba39d44370b3022f5eb17a25e0e1c9f056 | 3,658,759 |
def estimator_mixt_default(sample):
"""Default estimator of mixture distribution
This estimator returns tuple with two non-overlaping parts of `sample`
which are estimated to come from continuous and discrete parts of mixture
distribution. Estimation is done by deciding sample element to be from
discrete part if it is present at least twice in input `sample`.
If some part of estimation has no elements, it is represented as `None` in
output.
Parameters
----------
sample : array_like
This should be a valid input to `np.asarray()` so that its output is
numeric.
Returns
-------
sample_cont, sample_disc : tuple with two elements
Elements can be `None` if estimation showed no elements from
corresponding mixture part.
"""
# Detect sample from discrete part
sample = np.asarray(sample)
vals, inverse, counts = np.unique(sample, return_inverse=True, return_counts=True)
disc_inds = np.nonzero(counts >= 2)[0]
sample_is_disc = np.isin(inverse, disc_inds)
# Return separation
if np.all(sample_is_disc):
return (None, sample)
elif np.all(~sample_is_disc):
return (sample, None)
else:
return (sample[~sample_is_disc], sample[sample_is_disc]) | 31394305d9da7afe553f0dab9753d919b6aa7c73 | 3,658,760 |
import inspect
def get_post_processors():
"""
Loads post processors by inspecting members of the 'post_processors' package.
"""
post_processor_classes = []
for _, member in inspect.getmembers(post_processors):
if inspect.isclass(member):
post_processor_classes.append(member)
return post_processor_classes | 6b65c438657230661b189c8851ca5b662714c4df | 3,658,762 |
def vulcanize(name: str) -> str:
"""Add prefixes to names that are similar to the prefixes seen
in Vulcan characters in the Star Trek™ franchise.
:param name: The name to modify.
:return: A :class:str object.
:rtype: str
Usage:
>>> # Seed the RNG to make the example predictable. Don't do
>>> # this if you want the modification to be random.
>>> seed('spam')
>>>
>>> name = 'Bacon'
>>> vulcanize(name)
"T'Bacon"
"""
letter = 't'
if roll('1d6') > 5:
letters = 'd k l m n p s su v'.split()
index = roll(f'1d{len(letters)}') - 1
letter = letters[index]
letter = letter.title()
name = name.title()
return f"{letter}'{name}" | 00cd22427ab873852af519a6657bf9504b945fb3 | 3,658,763 |
def B(j, p, x, knots):
""" Compute B-splines using recursive definition. """
if p == 0:
if knots[j] <= x < knots[j+1]:
return 1.0
else:
return 0.0
else:
left = special_div((x-knots[j])*B(j,p-1,x,knots), knots[j+p]-knots[j])
right = special_div((knots[j+1+p]-x)*B(j+1,p-1,x,knots), knots[j+1+p]-knots[j+1])
return left + right | 1c578e317a3e2ff00f31b8e0b31b4f184e9bd338 | 3,658,764 |
from re import T
def not_falsy(item: T, item_name: str) -> T:
"""
Check if a value is falsy and throw an exception if so.
:param item: the item to check for falsiness.
:param item_name: the name of the item to include in any exception.
:raises ValueError: if the item is falsy.
:returns: the item.
"""
if not item:
raise ValueError(f"{item_name} cannot be a value that evaluates to false")
return item | b758d3ffe8f4c30086248fc9df2a9e82e05553d3 | 3,658,765 |
def _apply_limit_abs_unit(x, lim, unit):
"""Return one limit with applied unit(abs(x)). See get_limits."""
if unit is None:
return lim
unit = unit.lower()
if unit == 'near':
return lim * np.nanmin(np.abs(x))
if unit == 'far':
return lim * np.nanmax(np.abs(x))
elif unit == 'median':
return lim * np.nanmedian(np.abs(x))
elif unit == 'mean':
return lim * np.nanmean(np.abs(x))
else:
raise ValueError("Unknown unit %s"%unit) | e3c77192b90b04b4c488ca8bac41f79024517a6b | 3,658,766 |
def load_fits(name):
""" Open a fits file image
Inputs:
name: name of the .fits file (str).
Output:
image:
"""
while True:
try:
file = fits.open(name)
image = file.copy()
return image, name
except FileNotFoundError:
print(f"File {name} not found")
name = input('Please enter a different file name: ') | 24a348239e89cc9e565238e9f124875090ffe92b | 3,658,767 |
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup() | 984d2c3b297c47c1ffaec43302cfb741cfe369e4 | 3,658,769 |
def social_bonus_count(user, count):
"""Returns True if the number of social bonus the user received equals to count."""
return user.actionmember_set.filter(social_bonus_awarded=True).count() >= count | b2469833f315410df266cd0a9b36933edb1f9ac6 | 3,658,770 |
def del_category_tag_lib(self,c_uuid,t_uuid):
"""04删除便签或分类"""
if c_uuid:
category = Category.by_uuid(c_uuid)
if category is None:
flash(self, '分类不存在', 'error')
return {'status':False}
if category.articles:
flash(self,'分类下面有文章,请先删除文章','error')
return {'status': False}
self.db.delete(category)
self.db.commit()
flash(self, '分类删除成功', 'success')
return {'status':True}
if t_uuid:
tag = Tag.by_uuid(t_uuid)
if tag is None:
flash(self, '标签不存在', 'error')
return {'status':False}
if tag.articles:
flash(self, '标签下面有文章,请先删除文章', 'error')
return {'status': False}
self.db.delete(tag)
self.db.commit()
flash(self, '标签删除成功', 'success')
return {'status':True}
flash(self, '请输入标签或分类', 'error')
return {'status': False} | db915fe29943d9bb63122d73d59a052715798818 | 3,658,771 |
import math
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two `LocationGlobal` or `LocationGlobalRelative` objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5 | 57a56fac2d0a3a83083b769b5f896cb82d55dc56 | 3,658,772 |
import copy
def get_export_summary(results):
"""Prints to screen the exporting results of example programs.
Args:
results - results of the compilation stage. which is the output of and export_repos()
Returns: Numbers of failed results
"""
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "IDE", "EXPORT RESULT", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "FAILED", ""])
failure_counter+=1
for summary in status[4]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "FAILED"])
failure_counter+=1
for summary in status[5]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "SKIPPED"])
print("\n\nPassed Example Exporting:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Exporting:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
return failure_counter | 0f68e8da955a73c401536f83e18faa223d603d15 | 3,658,774 |
import numpy
def _misfitfunc(data, predicted):
"""
Calculate the total data misfit function between the observed and predicted
data.
"""
result = 0.
for d, p, in zip(data, predicted):
residuals = d.observed - p
result += sqrt(numpy.dot(d.weights*residuals, residuals))/d.norm
return result | c21fb4c8d68a2abe20ca155e5776124c69ce2eff | 3,658,775 |
def stream_doi(app, doi):
"""Returns tuple of URL string and a urlopen() return value."""
apikey = app.cfg.get_or_die('api-keys', 'crossref')
url = ('http://crossref.org/openurl/?id=%s&noredirect=true&pid=%s&'
'format=unixref' % (wu.urlquote(doi), wu.urlquote(apikey)))
return url, wu.urlopen(url) | 7c3569c4492b52c68ed13bcaac9dae0b6805bdb6 | 3,658,776 |
from typing import Optional
import getpass
from datetime import datetime
import json
def do_evaluation(
*,
input_path,
training_path: Optional[str] = None,
testing_path: Optional[str] = None,
method,
prediction_task,
dimensions: int = 300,
number_walks: int = 8,
walk_length: int = 8,
window_size: int = 4,
p: float = 1.5,
q: float = 2.1,
alpha: float = 0.1,
beta: float = 4,
epochs: int = 5,
kstep: int = 4,
order: int = 3,
embeddings_path: Optional[str] = None,
predictive_model_path: Optional[str] = None,
training_model_path: Optional[str] = None,
evaluation_file: Optional[str] = None,
classifier_type: Optional[str] = None,
weighted: bool = False,
labels_file: Optional[str] = None,
):
"""Train and evaluate an NRL model."""
if prediction_task == 'link_prediction':
node_list = None
labels = None
graph, graph_train, testing_pos_edges, train_graph_filename = create_graphs(
input_path=input_path,
training_path=training_path,
testing_path=testing_path,
weighted=weighted,
)
else:
if not labels_file:
raise ValueError("No input label file. Exit.")
node_list, labels = read_node_labels(labels_file)
train_graph_filename = input_path
graph, graph_train, testing_pos_edges = None, None, None
model = embedding_training(
train_graph_filename=train_graph_filename,
method=method,
dimensions=dimensions,
number_walks=number_walks,
walk_length=walk_length,
window_size=window_size,
p=p,
q=q,
alpha=alpha,
beta=beta,
epochs=epochs,
kstep=kstep,
order=order,
weighted=weighted,
)
if training_model_path is not None:
model.save_model(training_model_path)
if embeddings_path is not None:
model.save_embeddings(embeddings_path)
if method == 'LINE':
embeddings = model.get_embeddings_train()
else:
embeddings = model.get_embeddings()
_results = dict(
input=input_path,
method=method,
dimension=dimensions,
user=getpass.getuser(),
date=datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S'),
)
if prediction_task == 'link_prediction':
auc_roc, auc_pr, accuracy, f1, mcc = do_link_prediction(
embeddings=embeddings,
original_graph=graph,
train_graph=graph_train,
test_pos_edges=testing_pos_edges,
save_model=predictive_model_path,
classifier_type=classifier_type,
)
_results['results'] = dict(
auc_roc=auc_roc,
auc_pr=auc_pr,
accuracy=accuracy,
f1=f1,
mcc=mcc,
)
else:
accuracy, macro_f1, micro_f1, mcc = do_node_classification(
embeddings=embeddings,
node_list=node_list,
labels=labels,
save_model=predictive_model_path,
classifier_type=classifier_type,
)
_results['results'] = dict(
accuracy=accuracy,
macro_f1=macro_f1,
micro_f1=micro_f1,
mcc=mcc,
)
if evaluation_file is not None:
json.dump(_results, evaluation_file, sort_keys=True, indent=2)
return _results | ab5939065d9cf70c6e5ddba8530f91cb2577a31c | 3,658,777 |
import re
def test_structure_fatal_deformities(good_structure, deformity):
"""Make specific checks upon performing single invalidating deformations
of the data of a good structure.
"""
if deformity is None:
return StructureResource(**good_structure)
deformity, message = deformity
good_structure["attributes"].update(deformity)
with pytest.raises(ValidationError, match=fr".*{re.escape(message)}.*"):
StructureResource(**good_structure) | 28acc95fb29564ddbf844de70704e31212e59b9f | 3,658,778 |
def edit_user():
""" 返回待编辑用户信息 """
data = request.json
user_id = data.get('id')
_edit = User.query.filter_by(id=user_id).first()
_data = {'account': _edit.account, 'name': _edit.name, 'role_id': _edit.role_id}
return jsonify({'data': _data, 'status': 1}) | 7423eb2342dd135a219bbb6f34ba7f82740b49d0 | 3,658,779 |
def transactions(request):
"""See all transactions that have been contained in blocks."""
vote_list = Vote.objects.all().order_by('timestamp')
paginator = Paginator(vote_list, 100, orphans=20, allow_empty_first_page=True)
page = request.GET.get('page')
votes = paginator.get_page(page)
hashes = [SHA3_256.new(str(v).encode('utf-8')).hexdigest() for v in votes]
# This happens if you don't use foreign key
block_hashes = []
for i in range(0, len(votes)):
try:
b = Block.objects.get(id=votes[i].block_id)
h = b.h
except:
h = 404
block_hashes.append(h)
# zip the three iters
votes_pg = votes # for pagination
votes = zip(votes, hashes, block_hashes)
# Calculate the voting result of 3 cands, the ugly way
result = []
for i in range(0, 3):
try:
r = Vote.objects.filter(vote=i+1).count()
except:
r = 0
result.append(r)
context = {
'votes': votes,
'result': result,
'votes_pg': votes_pg,
}
return render(request, 'simulation/transactions.html', context) | 7ed0d4a8b997a41112eccfc67a19784283e65fd8 | 3,658,780 |
import pandas
def elections_vote_places_geo(source="xd", folder=".", fLOG=noLOG):
"""
Retrieves data vote places (bureaux de vote in French)
with geocodes.
@param source should be None unless you want to use the backup plan ("xd")
@param folder where to download
@param fLOG logging function
@return list of dataframe
"""
if source is None:
raise NotImplementedError("use source='xd'")
url = source
file = "bureauxvotegeo.zip"
data = download_data(file, website=url, whereTo=folder, fLOG=fLOG)
for d in data:
if d.endswith(".txt"):
df = pandas.read_csv(d, sep="\t", encoding="utf-8")
return df
raise DataNotAvailableError(
"Unable to find any csv file in '{0}'".format(file)) | b81abbeeed1968e01477cb71897a373a113ffafb | 3,658,781 |
from typing import Optional
def erfc(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the :math:`1-erf(x)`, for more details of `erf` function
please refer to `math.erf`.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def erfc_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.erfc(x)
x = np.array([1, 2, 3]).astype(np.float32)
out = erfc_Job(x)
# out [1.5729921e-01 4.6777353e-03 2.2090495e-05]
"""
return build_unary_elemwise_math_op("erfc", x, name) | 0fd6f01b0d6dbbdf7449a5a7dc2ac9ee3f0bce0e | 3,658,782 |
from typing import Union
from typing import Iterable
from typing import Optional
from typing import List
from typing import Dict
from typing import Any
def ner_manual_tokenizers_bert(
dataset: str,
source: Union[str, Iterable[dict]],
loader: Optional[str] = None,
label: Optional[List[str]] = None,
tokenizer_vocab: Optional[str] = None,
lowercase: bool = False,
hide_special: bool = False,
hide_wp_prefix: bool = False,
) -> Dict[str, Any]:
"""Example recipe that shows how to use model-specific tokenizers like the
BERT word piece tokenizer to preprocess your incoming text for fast and
efficient NER annotation and to make sure that all annotations you collect
always map to tokens and can be used to train and fine-tune your model
(even if the tokenization isn't that intuitive, because word pieces). The
selection automatically snaps to the token boundaries and you can double-click
single tokens to select them.
Setting "honor_token_whitespace": true will ensure that whitespace between
tokens is only shown if whitespace is present in the original text. This
keeps the text readable.
Requires Prodigy v1.10+ and usese the HuggingFace tokenizers library."""
stream = get_stream(source, loader=loader, input_key="text")
# You can replace this with other tokenizers if needed
tokenizer = BertWordPieceTokenizer(tokenizer_vocab, lowercase=lowercase)
sep_token = tokenizer._parameters.get("sep_token")
cls_token = tokenizer._parameters.get("cls_token")
special_tokens = (sep_token, cls_token)
wp_prefix = tokenizer._parameters.get("wordpieces_prefix")
def add_tokens(stream):
for eg in stream:
tokens = tokenizer.encode(eg["text"])
eg_tokens = []
idx = 0
for (text, (start, end), tid) in zip(
tokens.tokens, tokens.offsets, tokens.ids
):
# If we don't want to see special tokens, don't add them
if hide_special and text in special_tokens:
continue
# If we want to strip out word piece prefix, remove it from text
if hide_wp_prefix and wp_prefix is not None:
if text.startswith(wp_prefix):
text = text[len(wp_prefix) :]
token = {
"text": text,
"id": idx,
"start": start,
"end": end,
# This is the encoded ID returned by the tokenizer
"tokenizer_id": tid,
# Don't allow selecting spacial SEP/CLS tokens
"disabled": text in special_tokens,
}
eg_tokens.append(token)
idx += 1
for i, token in enumerate(eg_tokens):
# If the next start offset != the current end offset, we
# assume there's whitespace in between
if i < len(eg_tokens) - 1 and token["text"] not in special_tokens:
next_token = eg_tokens[i + 1]
token["ws"] = (
next_token["start"] > token["end"]
or next_token["text"] in special_tokens
)
else:
token["ws"] = True
eg["tokens"] = eg_tokens
yield eg
stream = add_tokens(stream)
return {
"dataset": dataset,
"stream": stream,
"view_id": "ner_manual",
"config": {
"honor_token_whitespace": True,
"labels": label,
"exclude_by": "input",
"force_stream_order": True,
},
} | 982ddc4ab2e574870a5790dc37854ff5ffec648a | 3,658,783 |
def test_nested_simple_condition() -> None:
"""
Iterates and maps expressions over a complex Condition:
(A=B OR A=B) AND (A=B OR A=B)
"""
c1 = Column(None, "t1", "c1")
c2 = Column(None, "t1", "c2")
co1 = binary_condition(None, ConditionFunctions.EQ, c1, c2)
c3 = Column(None, "t1", "c1")
c4 = Column(None, "t1", "c2")
co2 = binary_condition(None, ConditionFunctions.EQ, c3, c4)
or1 = binary_condition(None, BooleanFunctions.OR, co1, co2)
c5 = Column(None, "t1", "c1")
c6 = Column(None, "t1", "c2")
co4 = binary_condition(None, ConditionFunctions.EQ, c5, c6)
c7 = Column(None, "t1", "c1")
c8 = Column(None, "t1", "c2")
co5 = binary_condition(None, ConditionFunctions.EQ, c7, c8)
or2 = binary_condition(None, BooleanFunctions.OR, co4, co5)
and1 = binary_condition(None, BooleanFunctions.AND, or1, or2)
ret = list(and1)
expected = [c1, c2, co1, c3, c4, co2, or1, c5, c6, co4, c7, c8, co5, or2, and1]
assert ret == expected
cX = Column(None, "t1", "cX")
co1_b = binary_condition(None, ConditionFunctions.EQ, c1, cX)
co2_b = binary_condition(None, ConditionFunctions.EQ, c3, cX)
or1_b = binary_condition(None, BooleanFunctions.OR, co1_b, co2_b)
co4_b = binary_condition(None, ConditionFunctions.EQ, c5, cX)
co5_b = binary_condition(None, ConditionFunctions.EQ, c7, cX)
or2_b = binary_condition(None, BooleanFunctions.OR, co4_b, co5_b)
and1_b = binary_condition(None, BooleanFunctions.AND, or1_b, or2_b)
def replace_col(e: Expression) -> Expression:
if isinstance(e, Column) and e.column_name == "c2":
return cX
return e
and1 = and1.transform(replace_col)
ret = list(and1)
expected = [
c1,
cX,
co1_b,
c3,
cX,
co2_b,
or1_b,
c5,
cX,
co4_b,
c7,
cX,
co5_b,
or2_b,
and1_b,
]
assert ret == expected | f047f916f3ace9142e8940a39fd47d36d43dc108 | 3,658,784 |
import functools
def _deep_setattr(obj, key, val):
"""
Set an attribute `key` on the object. If any of the prefix attributes do
not exist, they are set to :class:`~pyro.nn.PyroModule`.
"""
def _getattr(obj, attr):
obj_next = getattr(obj, attr, None)
if obj_next is not None:
return obj_next
setattr(obj, attr, PyroModule())
return getattr(obj, attr)
lpart, _, rpart = key.rpartition(".")
# Recursive getattr while setting any prefix attributes to PyroModule
if lpart:
obj = functools.reduce(_getattr, [obj] + lpart.split("."))
setattr(obj, rpart, val) | a28b01484de71dc486c73fe9ad01238675b15a04 | 3,658,785 |
def inverse_max_dcg(labels,
gain_fn=lambda labels: tf.pow(2.0, labels) - 1.,
rank_discount_fn=lambda rank: 1. / tf.math.log1p(rank),
topn=None):
"""Computes the inverse of max DCG.
Args:
labels: A `Tensor` with shape [batch_size, list_size]. Each value is the
graded relevance of the corresponding item.
gain_fn: A gain function. By default this is set to: 2^label - 1.
rank_discount_fn: A discount function. By default this is set to:
1/log(1+rank).
topn: An integer as the cutoff of examples in the sorted list.
Returns:
A `Tensor` with shape [batch_size, 1].
"""
ideal_sorted_labels, = sort_by_scores(labels, [labels], topn=topn)
rank = tf.range(tf.shape(input=ideal_sorted_labels)[1]) + 1
discounted_gain = gain_fn(ideal_sorted_labels) * rank_discount_fn(
tf.cast(rank, dtype=tf.float32))
discounted_gain = tf.reduce_sum(
input_tensor=discounted_gain, axis=1, keepdims=True)
return tf.compat.v1.where(
tf.greater(discounted_gain, 0.), 1. / discounted_gain,
tf.zeros_like(discounted_gain)) | 60e5b05af91fbd8e51a58894f9f19a5a8f92d1b5 | 3,658,786 |
def get(url):
"""
用 GET 请求 url 并返回响应,对301进行了处理
:param url:
:return:status_code, headers, body
"""
protocol, host, port, path = parsed_url(url)
s = socket_by_protocol(protocol)
s.connect((host, port))
request = 'GET {} HTTP/1.1\r\nhost: {}\r\nConnection: close\r\n\r\n'.format(path, host)
encoding = 'utf-8'
s.send(request.encode(encoding))
response = response_by_socket(s)
r = response.decode(encoding)
status_code, headers, body = parsed_response(r)
if status_code == 301:
url = headers['Location']
return get(url)
else:
return status_code, headers, body | 3ef816149e8b4953e119c807726112feeacc6eed | 3,658,787 |
def NotEqual(data1, data2, target=utils.CCE):
"""
check whether data1 notequals to data2.
Args:
data1 (tvm.tensor.Tensor): Tensor.
data2 (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor. If data1 notequal to data2 return True, else return False.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _not_equal_ascend(data1, data2)
else:
return _not_equal(data1, data2) | 88be9ea40900644a61dd3f37c0a05f9fa8c3eb76 | 3,658,789 |
def read_labels(labels_path):
"""Reads list of labels from a file"""
with open(labels_path, 'rb') as f:
return [w.strip() for w in f.readlines()] | 3ebc61c76dd1ae83b73aa8b77584661c08a51321 | 3,658,790 |
def calc_distance_two_points(long_from, lat_from, long_to, lat_to):
"""Calculate distance between two points
Parameters
----------
long_from : float
Longitute coordinate from point
lat_from : float
Latitute coordinate from point
long_to : float
Longitute coordinate to point
lat_to : float
Latitue coordinate to point
Return
------
distance : float
Distance
"""
distance_in_km = haversine(
(long_from, lat_from),
(long_to, lat_to),
miles=False)
return distance_in_km | 0c35c22458db165684242389470248632f2e1edb | 3,658,792 |
from typing import Counter
def modified_precision(reference_max_counts, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = reference_max_counts[n - 1]
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {
ngram: min(count, max_counts.get(ngram, 0)) for ngram, count in counts.items()
}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False) | cbaf2ca391a6b0ac8bfcf9e2f85aba83f4b585d0 | 3,658,793 |
def bin_spectrum(bin_width, wavelength, doppler_shift, flux, flux_uncertainty,
final_uncertainty='combine'):
"""
Args:
wavelength:
doppler_shift:
flux:
flux_uncertainty:
Returns:
"""
bw = bin_width
wv = wavelength
ds = doppler_shift
f = flux
u = flux_uncertainty
v_bins = np.arange(min(ds), max(ds) + bw, bw)
binned_data, edges, inds = binned_statistic(ds, [wv, ds, f], bins=v_bins,
statistic='mean')
wv_bin = binned_data[0]
v_bin = binned_data[1]
f_bin = binned_data[2]
# Combine uncertainties assuming Gaussian regime
if final_uncertainty == 'combine':
u_bin, edges, inds = binned_statistic(ds, u ** 2, bins=v_bins,
statistic='sum')
u_count, edges, inds = binned_statistic(ds, u ** 2, bins=v_bins,
statistic='count')
u_bin = u_bin ** 0.5 / u_count ** 0.5
elif final_uncertainty == 'poisson':
confidence_interval = poisson_conf_interval(f_bin)
u_bin = np.mean(confidence_interval, axis=0)
else:
raise ValueError('This final uncertainty type is not implemented.')
return wv_bin, v_bin, f_bin, u_bin | 3c71977ae845161156ed95b42f68d7de65b80f66 | 3,658,794 |
def scrub(old_fs: Vfs, new_fs: Vfs) -> Vfs:
"""Try to eliminate files which were previously installed but are no longer used."""
old_fs = old_fs.copy()
new_fs = new_fs.copy()
# Look for files in the old log which are no longer present in the new log
for txn in old_fs._log:
if txn[0] == "link" and txn not in new_fs._log:
new_fs.unlink(txn[2])
elif txn[0] == "mkdir" and txn not in new_fs._log:
new_fs.unlink(txn[1])
return new_fs | c1cdfad6c658e481b05658d3041458ab6fd3419c | 3,658,795 |
def get_filename(row):
"""
Assembles the name of the feature file.
Parameters
----------
row : pandas.Series
A row fom the sequence dataframe. Must have the following index values:
"sample_name", "inj_number", "batch_name", "acquisition_date_and_time".
Returns
-------
filename : str
The filename of the feature file.
"""
acquisition = row.acquisition_date_and_time
if pd.isna(acquisition):
acquisition = "1900-01-01_000000"
filename = (
"_".join(
[
str(row.sample_name),
str(row.inj_number),
str(row.batch_name),
acquisition,
]
)
+ ".featureXML"
)
return filename | 68624971527442da110734043bdbaa1c68dc4875 | 3,658,796 |
def create_plotly_trace(data_x, data_y, namexy, chosen_mode='lines', use_gl = True, swap_xy = False):
"""
Создание одного trace по данным
:param data_x: данные для оси x
:param data_y: данные для оси y
:param namexy: название для trace
:param chosen_mode: настройка отображения 'lines', 'markers'
:return: один trace
"""
if swap_xy:
data_x, data_y = data_y, data_x
hovertemplate = namexy + ": %{x}<extra></extra>"
else:
hovertemplate = namexy + ": %{y}<extra></extra>"
if use_gl == True:
one_trace = go.Scattergl(
x=data_x,
y=data_y,
name=namexy,
mode=chosen_mode,
hovertemplate=hovertemplate
)
else:
one_trace = go.Scatter(
x=data_x,
y=data_y,
name=namexy,
mode=chosen_mode,
hovertemplate=hovertemplate
)
return one_trace | dd90d370c27968053bfaf98f509868d959416d39 | 3,658,797 |
import yaml
def read_metadata() -> dict:
"""Reads and returns raw metadata."""
with open(metadata_path().resolve(), "r") as fd:
return yaml.safe_load(fd) | 0eafc0a722ac5cae69407a7e76d5bf62b7541b69 | 3,658,798 |
def new_token():
"""
Generate an access token for the user.
This endpoint requires basic auth with nickname and password.
"""
return jsonify({'token': generate_token(g.current_user['id'])}) | 07497cebfd29a133ab986c86b72b603975378ed8 | 3,658,800 |
def get_room_info(room_real_id: int, verify: utils.Verify = None, cookies = None):
"""
获取直播间信息(标题,简介等)
:param room_real_id: 真实房间ID
:param verify:
:return:
"""
if verify is None:
verify = utils.Verify()
api = API["live"]["info"]["room_info"]
if cookies is None:
resp = utils.get(api["url"], {"room_id": room_real_id}, cookies=verify.get_cookies())
else:
resp = utils.get(api["url"], {"room_id": room_real_id}, cookies=cookies)
return resp | a6aa07886034a5f8c539026f8afacaa149860252 | 3,658,801 |
def parse_raw(setup, id=None, first_line_is_header=(-1,0,1)):
"""Used in conjunction with lazy_import and parse_setup in order to make alterations
before parsing.
Parameters
----------
setup : dict
Result of h2o.parse_setup
id : str, optional
An id for the frame.
first_line_is_header : int, optional
-1,0,1 if the first line is to be used as the header
Returns
-------
H2OFrame
"""
if id: setup["destination_frame"] = _quoted(id).replace("%",".").replace("&",".")
if first_line_is_header != (-1,0,1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr | 56d490eeaa28258ee668ed5efcc0f8a869acaa2b | 3,658,802 |
from datetime import datetime
def import_year(year: int = None) -> bool:
"""Downloads, extracts and imports the Losungen of a given year.
The year defaults to the next year."""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
year = datetime.date.today().year + 1 if year is None else year
losungen = repo.get_by_year(year)
session.close()
if losungen:
return True # Already imported
if download_zip(year):
extract_zip()
import_xml()
logger.info("Successfully imported Losungen for %i", year)
return True
logger.warning("Failed to download zip archive for %i", year)
return False | a0e5933178f5d18332f0b231f7a6ec43c0651714 | 3,658,803 |
import re
def isURL(url: str) -> bool:
""" Check whether a given string is a URL. """
return url is not None and re.match(urlregex, url) is not None | 6d32fee1fa374c07214d2a75cc39b868338ffa1c | 3,658,804 |
def rmse(Y_true, Y_hat):
"""
returns root mean squared error
Args:
Y_true : true outputs [N,(1)]
Y_hat : predicted outputs [N, (1)]
"""
if Y_true.ndim == 2:
Y_true = Y_true[:, 0]
if Y_hat.ndim == 2:
Y_hat = Y_hat[:, 0]
return np.sqrt(np.mean((Y_true - Y_hat)**2)) | 676d14a5058632fbf1cd40e4d60d5cfb4c46e137 | 3,658,805 |
def getAllDescWords(itemList):
"""Returns a list of "description words" for each item named in itemList."""
itemList = list(set(itemList)) # make itemList unique
descWords = []
for item in itemList:
descWords.extend(NYCitems[item][DESCWORDS])
return list(set(descWords)) | fb7ea77fac5aae3abc2e6dbcc1c3af7ac404b5c2 | 3,658,806 |
def create_constrained_mechanical_system_from_component(structural_component, constant_mass=False,
constant_damping=False, constraint_formulation='boolean',
**formulation_options):
"""
Create a mechanical system from a component where the constraints are applied by a constraint formulation
Parameters
----------
structural_component : amfe.component.StructuralComponent
Structural component describing the mechanical system
constant_mass : bool
Flag indicating if mass matrix is constant
constant_damping : bool
Flag indicating if damping matrix is constant
constraint_formulation : str {'boolean', 'lagrange', 'nullspace_elimination'}
String describing the constraint formulation that shall be used
formulation_options : dict
options passed to the set_options method of the constraint formulation
Returns
-------
system : amfe.solver.translators.MechanicalSystem
formulation : amfe.constraint.ConstraintFormulation
"""
system_unconstrained = create_mechanical_system_from_structural_component(structural_component)
constraint_formulation = _create_constraint_formulation(system_unconstrained, structural_component,
constraint_formulation, **formulation_options)
if constant_mass:
M = MemoizeConstant(constraint_formulation.M)
else:
M = constraint_formulation.M
if constant_damping:
D = MemoizeConstant(constraint_formulation.D)
else:
D = constraint_formulation.D
f_int = constraint_formulation.f_int
K = constraint_formulation.K
f_ext = constraint_formulation.f_ext
dimension = constraint_formulation.dimension
system = MechanicalSystem(dimension, M, D, K, f_ext, f_int)
return system, constraint_formulation | e661ba16a691266e60b14d4594db16e09d81c2e2 | 3,658,808 |
def parse_certificate_issuer_id(id):
"""
:param id: The resource collection type.
:type id: str
:rtype: KeyVaultId
"""
return parse_object_id('certificates/issuers', id) | 919ad42ede4081c67c38f9d44945045d3f84bf87 | 3,658,809 |
def normalize_whitespace(
text, no_line_breaks=False, strip_lines=True, keep_two_line_breaks=False
):
"""
Given ``text`` str, replace one or more spacings with a single space, and one
or more line breaks with a single newline. Also strip leading/trailing whitespace.
"""
if strip_lines:
text = "\n".join([x.strip() for x in text.splitlines()])
if no_line_breaks:
text = constants.MULTI_WHITESPACE_TO_ONE_REGEX.sub(" ", text)
else:
if keep_two_line_breaks:
text = constants.NONBREAKING_SPACE_REGEX.sub(
" ", constants.TWO_LINEBREAK_REGEX.sub(r"\n\n", text)
)
else:
text = constants.NONBREAKING_SPACE_REGEX.sub(
" ", constants.LINEBREAK_REGEX.sub(r"\n", text)
)
return text.strip() | 46d60967f48cb2b14ee44eaa4979592b87e8d811 | 3,658,810 |
import numpy
def nancumprod(x1, **kwargs):
"""
Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one.
For full documentation refer to :obj:`numpy.nancumprod`.
Limitations
-----------
Parameter ``x`` is supported as :obj:`dpnp.ndarray`.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the functions will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
.. seealso:: :obj:`dpnp.cumprod` : Return the cumulative product of elements along a given axis.
Examples
--------
>>> import dpnp as np
>>> a = np.array([1., np.nan])
>>> result = np.nancumprod(a)
>>> [x for x in result]
[1.0, 1.0]
>>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]])
>>> result = np.nancumprod(b)
>>> [x for x in result]
[1.0, 2.0, 2.0, 8.0, 8.0, 48.0]
"""
if not use_origin_backend(x1) and not kwargs:
if not isinstance(x1, dparray):
pass
else:
return dpnp_nancumprod(x1)
return call_origin(numpy.nancumprod, x1, **kwargs) | e388081ca78decb8b05a6138173cb487a1c72c58 | 3,658,811 |
def error(data, mn, mx, confidence):
"""
Compute the error components.
:param data: the collected data.
:param mn: the critical value (minimum).
:param mx: the critical value (maximum).
:param confidence: the confidence level.
:return: (Dict) the dictionary of errors.
"""
return errutils.error_two_tails(data, mn, mx, confidence) | 31ba96b58a5017a3bd3a5166b460878a886f2bb3 | 3,658,813 |
def retry_connection(f):
"""Decorator. Recconect on failure.
"""
def retry(*args, **kwargs):
seconds_to_retry = 5
success = False
while (not success):
try:
result = f(*args, **kwargs)
success = True
return result
except:
print "{0}: {1} --> connection problems . retry in {2} seconds.".format(curr_date(), f.__name__, seconds_to_retry)
time.sleep(seconds_to_retry)
# return None
return retry | d9ccbe725f50a6061f77ac76d02e11c52dd91cb1 | 3,658,814 |
def shift_mean(x_mod, x_org):
"""
Shift the mean value of `x_mod` such that it equals the mean of `x_org`.
Parameters
----------
x_org : ndarray
The array which hold the "true" mean value.
x_mod : ndarray
The modified copy of `x_org` which must have its mean value shifted.
Returns
-------
shifted_x_mod : ndarray
A copy of `x_mod` with the same mean value as `x_org`.
Examples
--------
For example,
>>> import numpy as np
>>> from magni.imaging.visualisation import shift_mean
>>> x_org = np.arange(4).reshape(2, 2)
>>> x_mod = np.ones((2, 2))
>>> print('{:.1f}'.format(x_org.mean()))
1.5
>>> print('{:.1f}'.format(x_mod.mean()))
1.0
>>> shifted_x_mod = shift_mean(x_mod, x_org)
>>> print('{:.1f}'.format(shifted_x_mod.mean()))
1.5
>>> np.set_printoptions(suppress=True)
>>> shifted_x_mod
array([[ 1.5, 1.5],
[ 1.5, 1.5]])
"""
@_decorate_validation
def validate_input():
_numeric('x_mod', ('integer', 'floating', 'complex'), shape=(-1, -1))
_numeric('x_org', ('integer', 'floating', 'complex'),
shape=x_mod.shape)
validate_input()
return x_mod + (x_org.mean() - x_mod.mean()) | 0f04e37a9434548cff77a1c92d7540595ee5a1cf | 3,658,815 |
def conversation_detail(request, pk):
"""
Retrieve, update or delete a conversation.
"""
try:
conversation = Conversation.objects.get(pk=pk)
except Conversation.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = Conv_Serializer(conversation)
return Response("serializer.data")
elif request.method == 'PUT':
serializer = Conv_Serializer(conversation, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
conversation.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | 5c4a0b20f38ca7b75415ecb88f25e9992e2a3e57 | 3,658,816 |
def purchase_products(product_id):
"""Purchase a product"""
app.logger.info("Request to purchase product with id %s", product_id)
check_content_type("application/json")
product = Product.find(product_id)
if not product:
abort(
status.HTTP_404_NOT_FOUND, "product with id '{}' was not found.".format(product_id)
)
return make_response(jsonify(product.serialize()), status.HTTP_200_OK) | c6681110ffaa25cab1ea2fd649c845c513b7b178 | 3,658,817 |
def process_alerts(data):
"""
Returns a Pandas DataFrame from the API call.
:return: A pandas DataFrame.
"""
data_dicts = data.get("data", [])
lines = []
for data_dict in data_dicts:
data_dict["alertDescription"] = helper.extract_json_field(
data_dict.get("alertProps", {}), "description.descriptionId")
description_dict = helper.extract_json_field(
data_dict.get("alertProps", {}), "description.descriptionObj")
data_dict.update(description_dict)
alert_context = helper.extract_json_field(
data_dict.get("keys", {}), "src.keys.alert")
if alert_context:
data_dict.update(alert_context)
lines.append(data_dict)
return pd.DataFrame(lines) | 64a06486ebfde2610f11110b55a73a359fe8d0c0 | 3,658,818 |
def validate(df):
"""Validate the timeseries dataframe
"""
err_msgs = []
warn_msgs = []
# check column names
for col in EXP_COLS:
if col not in df:
err_msgs.append(f"**{col}** column missing")
msgs = {
"errors": err_msgs,
"warnings": warn_msgs
}
is_valid_file = len(err_msgs) == 0
return msgs, is_valid_file | 74480413646d1f7480c7915cdd1d28116ace83c6 | 3,658,819 |
def _gcs_uri_rewriter(raw_uri):
"""Rewrite GCS file paths as required by the rewrite_uris method.
The GCS rewriter performs no operations on the raw_path and simply returns
it as the normalized URI. The docker path has the gs:// prefix replaced
with gs/ so that it can be mounted inside a docker image.
Args:
raw_uri: (str) the raw GCS URI, prefix, or pattern.
Returns:
normalized: a cleaned version of the uri provided by command line.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
"""
docker_path = raw_uri.replace('gs://', 'gs/', 1)
return raw_uri, docker_path | 6e476860cb175dd2936cc0c080d3be1d09e04b77 | 3,658,820 |
def remove_apostrophe(text):
"""Remove apostrophes from text"""
return text.replace("'", " ") | c7d918e56646a247564a639462c4f4d26bb27fc4 | 3,658,821 |
def generate_initials(text):
"""
Extract initials from a string
Args:
text(str): The string to extract initials from
Returns:
str: The initials extracted from the string
"""
if not text:
return None
text = text.strip()
if text:
split_text = text.split(" ")
if len(split_text) > 1:
return (split_text[0][0] + split_text[-1][0]).upper()
else:
return split_text[0][0].upper()
return None | 709e53392c790585588da25290a80ab2d19309f8 | 3,658,822 |
def nmf_manifold_vec_update(X, U, V, k_to_W, k_to_D, k_to_L, k_to_feat_inds, n_steps=10, gamma=1.0, delta=1.0, i=0, verbose=False, norm_X=None):
"""
Perform <n_steps> update steps with a fixed Laplacian matrix for each latent factor
Parameters
----------
X : np.array
data to factor
U : np.array
previous setting of U to update
V : np.array
previous setting of V to update
k_to_W : dict
mapping of latent factor to weighted adjacency matrix
k_to_D : dict
mapping of latent factor to diagonal matrix that is the sum of W along a row (or column)
k_to_L : dict
mapping of latent factor to L = D - W
n_steps : int
number of update steps to perform
gamma : float
relative importance of manifold regularization term
delta : float
relative importance of ignoring manifold penalty
i : int
number of previous iterations
verbose : bool
if True, print objective function value after each iteration
norm_X : float or None
stored value of the norm of X
"""
obj_data = None
m, k_latent = U.shape
n, k_latent = V.shape
for n_step in range(n_steps):
U_up_num = X.dot(V)
U_up_denom = U.dot((V.transpose().dot(V))) + U
U = np.multiply(U, np.divide(U_up_num, U_up_denom, out=np.ones_like(U_up_num), where=U_up_denom!=0)) # 0 / 0 := 1
V_up_num_recon = X.transpose().dot(U)
V_up_denom_recon = V.dot((U.transpose().dot(U)))
# update each column vector of V separately to accomodate different Laplacians
V_up_num_man = np.zeros((n, k_latent))
V_up_denom_man = np.zeros((n, k_latent))
V_up_num_ign = np.zeros((n, k_latent))
for k in range(k_latent):
W = k_to_W[k]
D = k_to_D[k]
V_up_num_man[:,k] = gamma * W.dot(V[:,k])
V_up_denom_man[:,k] = gamma * D.dot(V[:,k])
nz_inds = k_to_feat_inds[k]
V_up_num_ign[nz_inds,k] = delta * np.power(V[nz_inds,k] + 1, -2)
V_up_num = V_up_num_recon + (V_up_num_man + V_up_num_ign)
V_up_denom = V_up_denom_recon + V_up_denom_man
V_up_denom[V_up_denom < EPSILON] = EPSILON
V = np.multiply(V, np.divide(V_up_num, V_up_denom, out=np.ones_like(V_up_num), where=V_up_denom!=0))
V[V < EPSILON] = EPSILON
obj_data = nmf_manifold_vec_obj(X, U, V, k_to_L, k_to_feat_inds, gamma=gamma, delta=delta)
print(i+n_step+1, obj_data['obj'])
if(verbose):
print(obj_data)
return U, V, obj_data | f1998d8ccd000892f441341240216ada5fd46a70 | 3,658,823 |
def check_xyz_species_for_drawing(xyz, species):
"""A helper function to avoid repetative code"""
if species is not None and xyz is None:
xyz = xyz if xyz is not None else species.final_xyz
if species is not None and not isinstance(species, ARCSpecies):
raise InputError('Species must be an ARCSpecies instance. Got {0}.'.format(type(species)))
if species is not None and not species.final_xyz:
raise InputError('Species {0} has an empty final_xyz attribute.'.format(species.label))
return xyz | 26caa32c55eee43dab53f85e442775095da92580 | 3,658,824 |
def GetUDPStreamSample(command_out, sending_vm, receiving_vm, request_bandwidth,
network_type, iteration):
"""Get a sample from the nuttcp string results.
Args:
command_out: the nuttcp output.
sending_vm: vm sending the UDP packets.
receiving_vm: vm receiving the UDP packets.
request_bandwidth: the requested bandwidth in the nuttcp sample.
network_type: the type of the network, external or internal.
iteration: the run number of the test.
Returns:
sample from the results of the nuttcp tests.
"""
data_line = command_out.split('\n')[0].split(' ')
data_line = [val for val in data_line if val]
actual_bandwidth = float(data_line[6])
units = data_line[7]
packet_loss = data_line[16]
metadata = {
'receiving_machine_type': receiving_vm.machine_type,
'receiving_zone': receiving_vm.zone,
'sending_machine_type': sending_vm.machine_type,
'sending_zone': sending_vm.zone,
'packet_loss': packet_loss,
'bandwidth_requested': request_bandwidth,
'network_type': network_type,
'iteration': iteration
}
return sample.Sample('bandwidth', actual_bandwidth, units, metadata) | d9f0e75602768ee574d280215ebc78ebd67a520b | 3,658,825 |
def setSwaggerParamDesc(swagger,searchParams):
"""
Set the Swagger GET Parameter Description to what is stored in the search Parameters using helper function
"""
for id in range(len(swagger['tags'])):
# Paths are prefaced with forward slash
idName = '/'+swagger['tags'][id]['name']
# Filter out Capability statement
if idName != '/CapabilityStatement':
for paramId in range(len(swagger['paths'][idName]['get']['parameters'])):
# Get the parameter name to use getParamDesc function
paramName = swagger['paths'][idName]['get']['parameters'][paramId]['name']
# Set description to what is returned from search Parameters
swagger['paths'][idName]['get']['parameters'][paramId]['description'] = getParamDesc(searchParams,idName,paramName)
swagger = removeFormatParam(swagger)
return swagger | e83c4c713718d382e5ce6f2429d029d4eb9ae588 | 3,658,826 |
def parse_args(args=[], doc=False):
"""
Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage
"""
parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}")
parser.add_argument("username", help="Username of the new user to add")
parser.add_argument("-p", dest="password", help="Password for the new user")
parser.add_argument("-n", dest="noninteractive", action="store_false", help="Don't ask for user input")
parser.add_argument("--version", action="store_true", help=f"print program version")
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps]
NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}"
SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... "
DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n"
for item in arg_helps:
# Its a positional argument
if len(item.option_strings) == 0:
# If the argument is optional:
if item.nargs == "?":
SYNOPSIS += f"[{item.dest.upper()}] "
elif item.nargs == "+":
SYNOPSIS += f"[{item.dest.upper()}]... "
else:
SYNOPSIS += f"{item.dest.upper()} "
else:
# Boolean flag
if item.nargs == 0:
if len(item.option_strings) == 1:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n"
elif item.nargs == "+":
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n"
if doc:
return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n"
else:
return args, parser | ca77aad1d31287f1394678db90c0857dbdae6a43 | 3,658,827 |
import array
def interact(u, v):
"""Compute element-wise mean(s) from two arrays."""
return tuple(mean(array([u, v]), axis=0)) | 9dd567568d5301dd62fcf19b7b4ac0130fc5b527 | 3,658,828 |
def part_allocation_count(build, part, *args, **kwargs):
""" Return the total number of <part> allocated to <build> """
return build.getAllocatedQuantity(part) | 84c94ca4e1b1006e293851189d17f63fc992b420 | 3,658,829 |
def stat_threshold(Z,mce='fdr_bh',a_level=0.05,side='two',copy=True):
"""
Threshold z maps
Parameters
----------
mce: multiple comparison error correction method, should be
among of the options below. [defualt: 'fdr_bh'].
The options are from statsmodels packages:
`b`, `bonferroni` : one-step correction
`s`, `sidak` : one-step correction
`hs`, `holm-sidak` : step down method using Sidak adjustments
`h`, `holm` : step-down method using Bonferroni adjustments
`sh`, `simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
`fdr_i`, `fdr_bh` : Benjamini/Hochberg (non-negative)
`fdr_n`, `fdr_by` : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (Benjamini/Hochberg)
'fdr_tsbky' : two stage fdr correction (Benjamini/Krieger/Yekutieli)
'fdr_gbs' : adaptive step-down fdr correction (Gavrilov, Benjamini, Sarkar)
"""
if copy:
Z = Z.copy()
if side=='one':
sideflag = 1
elif side=='two' or 'double':
sideflag = 2
Idx = np.triu_indices(Z.shape[0],1)
Zv = Z[Idx]
Pv = sp.norm.cdf(-np.abs(Zv))*sideflag
[Hv,adjpvalsv] = smmt.multipletests(Pv,method = mce)[:2]
adj_pvals = np.zeros(Z.shape)
Zt = np.zeros(Z.shape)
Zv[np.invert(Hv)] = 0
Zt[Idx] = Zv
Zt = Zt + Zt.T;
adj_pvals[Idx] = adjpvalsv
adj_pvals = adj_pvals + adj_pvals.T;
adj_pvals[range(Z.shape[0]),range(Z.shape[0])] = 0
return Zt, binarize(Zt), adj_pvals | 3c582c0a59f8bd5544f8620870732562200f4f0a | 3,658,830 |
def esmf_grid(lon, lat, periodic=False, mask=None):
"""
Create an ESMF.Grid object, for constructing ESMF.Field and ESMF.Regrid.
Parameters
----------
lon, lat : 2D numpy array
Longitute/Latitude of cell centers.
Recommend Fortran-ordering to match ESMPy internal.
Shape should be ``(Nlon, Nlat)`` for rectilinear grid,
or ``(Nx, Ny)`` for general quadrilateral grid.
periodic : bool, optional
Periodic in longitude? Default to False.
Only useful for source grid.
mask : 2D numpy array, optional
Grid mask. According to the ESMF convention, masked cells
are set to 0 and unmasked cells to 1.
Shape should be ``(Nlon, Nlat)`` for rectilinear grid,
or ``(Nx, Ny)`` for general quadrilateral grid.
Returns
-------
grid : ESMF.Grid object
"""
# ESMPy expects Fortran-ordered array.
# Passing C-ordered array will slow down performance.
for a in [lon, lat]:
warn_f_contiguous(a)
warn_lat_range(lat)
# ESMF.Grid can actually take 3D array (lon, lat, radius),
# but regridding only works for 2D array
assert lon.ndim == 2, 'Input grid must be 2D array'
assert lon.shape == lat.shape, 'lon and lat must have same shape'
staggerloc = ESMF.StaggerLoc.CENTER # actually just integer 0
if periodic:
num_peri_dims = 1
else:
num_peri_dims = None
# ESMPy documentation claims that if staggerloc and coord_sys are None,
# they will be set to default values (CENTER and SPH_DEG).
# However, they actually need to be set explicitly,
# otherwise grid._coord_sys and grid._staggerloc will still be None.
grid = ESMF.Grid(
np.array(lon.shape),
staggerloc=staggerloc,
coord_sys=ESMF.CoordSys.SPH_DEG,
num_peri_dims=num_peri_dims,
)
# The grid object points to the underlying Fortran arrays in ESMF.
# To modify lat/lon coordinates, need to get pointers to them
lon_pointer = grid.get_coords(coord_dim=0, staggerloc=staggerloc)
lat_pointer = grid.get_coords(coord_dim=1, staggerloc=staggerloc)
# Use [...] to avoid overwritting the object. Only change array values.
lon_pointer[...] = lon
lat_pointer[...] = lat
# Follows SCRIP convention where 1 is unmasked and 0 is masked.
# See https://github.com/NCPP/ocgis/blob/61d88c60e9070215f28c1317221c2e074f8fb145/src/ocgis/regrid/base.py#L391-L404
if mask is not None:
# remove fractional values
mask = np.where(mask == 0, 0, 1)
# convert array type to integer (ESMF compat)
grid_mask = mask.astype(np.int32)
if not (grid_mask.shape == lon.shape):
raise ValueError(
'mask must have the same shape as the latitude/longitude'
'coordinates, got: mask.shape = %s, lon.shape = %s' % (mask.shape, lon.shape)
)
grid.add_item(ESMF.GridItem.MASK, staggerloc=ESMF.StaggerLoc.CENTER, from_file=False)
grid.mask[0][:] = grid_mask
return grid | 8087cfbf0c4923338984913dcd1a421e3a46dd29 | 3,658,831 |
def convert_to_numeral(decimal_integer: int, roman_format="brackets"):
"""Convert decimal to Roman numeral.
roman_format is a str containing either 'brackets' or 'latex'
The default option, 'brackets', converts 3,000,000,000 to [[MMM]] and
3,000,000 to [MMM].
'latex' outputs a LaTeX formula for the numeral.
"""
def barfunction_latex(prefix: str,
unbarred_string: str,
num_of_bars: int,
separator_size: int = 2):
"""Return a LaTeX-renderable representation of overline bars."""
bars_before = (r"\overline{" * num_of_bars) + r"\text{"
bars_after = r"}" + ("}" * num_of_bars)
if prefix:
separation = f"\\hspace{{{separator_size}pt}}"
else:
separation = ""
return prefix + separation + bars_before + unbarred_string + bars_after
def barfunction_brackets(prefix: str, unbarred_string: str,
num_of_bars: int):
"""Represent bars as (possibly nested) square brackets.
For example, 3,000,000,000 is converted to [[MMM]].
"""
bars_before = ("[" * num_of_bars)
bars_after = ("]" * num_of_bars)
return prefix + bars_before + unbarred_string + bars_after
def latex_surround_with_dollars(string):
"""Surround LaTeX math expression with dollar signs."""
return "$" + string + "$"
def list_occurring_roman_symbols(roman_symbols, integer_value):
"""List symbols that occur in Roman representation of number.
+ roman_symbols is [(int, str)], a list of tuples, each of which
representing one Roman symbol and its corresponding integer value.
For example, (3, 'III').
+ integer_value is the value to be converted.
Return: remainder, list_of_occurring_symbols
+ remainder: what remains from the number, which was too small to
represent with the provided symbols
+ list_of_occurring_symbols: a list of the symbols present in the Roman
representation of the number.
"""
remainder = integer_value
list_of_occurring_symbols = []
for integer_value, str_roman_symbol in roman_symbols:
repetitions, remainder = divmod(remainder, integer_value)
list_of_occurring_symbols.append(str_roman_symbol * repetitions)
return remainder, list_of_occurring_symbols
def apply_barfunction(list_of_occurring_symbols, barfunction,
numeral_string, num_of_bars):
"""Build up Roman numeral representation applying barfunction.
The barfunction is only applied if list_of_occurring_symbols is not
empty, otherwise the original numeral_string is returned untouched.
"""
unbarred_string = "".join(list_of_occurring_symbols)
if unbarred_string:
numeral_string = barfunction(numeral_string, unbarred_string,
num_of_bars)
return numeral_string
if roman_format == 'latex':
barfunction = barfunction_latex
elif roman_format == 'brackets':
barfunction = barfunction_brackets
else:
raise ValueError('roman_format should be either "latex" or "brackets"')
remainder = decimal_integer
numeral_string = ""
for symbolset in ROMAN_NUMERAL_TABLE:
num_of_bars = symbolset["bars"]
symbols = symbolset["symbols"]
remainder, list_of_occurring_symbols = list_occurring_roman_symbols(
symbols, remainder)
numeral_string = apply_barfunction(list_of_occurring_symbols,
barfunction, numeral_string,
num_of_bars)
if roman_format == 'latex':
return latex_surround_with_dollars(numeral_string)
return numeral_string | ebfd2b323879bcca9e20be0d9598104bf0f31e33 | 3,658,833 |
def transpose(x):
"""Tensor transpose """
return np.transpose(x) | 286c7e36629ff8e38ad5d0233bd1f8fd823514f2 | 3,658,834 |
from typing import Optional
from typing import List
from typing import Tuple
def greedy_reduction_flat(m: Mat2) -> Optional[List[Tuple[int, int]]]:
"""Returns a list of tuples (r1,r2) that specify which row should be added to which other row
in order to reduce one row of m to only contain a single 1.
In contrast to :func:`greedy_reduction`, it preforms the brute-force search starting with the
highest indices, and places the row operations in such a way that the resulting depth is log_2
of the number of rows that have to be added together.
Used in :func:`lookahead_extract_base`"""
indicest = find_minimal_sums(m, True)
if indicest is None: return indicest
return flat_indices(m, list(indicest))[0] | 85c8098dd6e727abe64c3d1410c63161309b5135 | 3,658,835 |
def estimate_psd(vec, num_segs=DEFAULT_NUM_SEGS, overlap=DEFAULT_OVERLAP, dt=DEFAULT_DT, tukey_alpha=DEFAULT_TUKEY_ALPHA, one_sided=True):
"""
estimates the PSD using a DFT
divides vec into "num_segs" with a fractional overlap of "overlap" between neighbors
returns the average PSD from these samples (arithmetic mean)
if one_sided, returns the one-sided PSD. Otherwise, returns the two-sided PSD (one half the one-sided PSD).
WARNING: your logic on how to split segments may be fragile...
"""
N = len(vec)
if overlap > N - num_segs:
raise ValueError, "overlap is too big!"
n = N/(1. + (num_segs-1.)*(1.-overlap)) ### compute the number of entries per segment
overlap = int(n*overlap) ### compute the number of overlapping entries
n = int(n)
seglen = dt*n
### compute dfts for each segment separately
psds = np.empty((n/2, num_segs), complex)
for segNo in xrange(num_segs):
start = segNo*(n-overlap)
psds[:,segNo], freqs = dft(vec[start:start+n]*tukey(n, tukey_alpha), dt=dt)
### average
mean_psd = np.sum(psds.real**2 + psds.imag**2, axis=1) / (seglen*num_segs)
if one_sided:
mean_psd *= 2 ### multiply by 2 to account for the power at negative frequencies in the one-sided PSD
return mean_psd, freqs | 1c2d8c51bfd75d617f75dbc4aa3304c05c36e899 | 3,658,836 |