text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(self):
os.chdir(self.old_cwd)<|fim_middle|>tear_down<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Test preloop
"""
self.assertIn("txtai console", self.preloop())<|fim_middle|>test_preloop<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
assert Defaults.get_exclude_list_for_root_data_sync() == [
'image', '.profile', '.kconfig',
'run/*', 'tmp/*',
'.buildenv', 'var/cache/kiwi'
]
assert Defaults.get_exclude_list_for_root_data_sync(no_tmpdirs=False) == [
'image', '.profile', '.kconfig',
'.buildenv', 'var/cache/kiwi'
]<|fim_middle|>test_get_exclude_list_for_root_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(paths, jobs, split, part):
print("Using {}x parallelism".format(jobs))
# Create build folder to prevent process race
cache_dir.mkdir(exist_ok=True, parents=True)
(cache_dir / "config").write_text('{"prefix_len": 2}')
# Validate that paths exist!
invalid_paths = [p for p in paths if not Path(p).exists()]
if invalid_paths: print("Invalid paths:\n- " + "\n- ".join(invalid_paths));
results = len(invalid_paths)
# Find all project files
projects = [p for path in paths for p in Path(path).glob("**/project.xml")]
projects.sort()
# Split projects up into parts
if split > 1:
chunk_size = math.ceil(len(projects) / args.split)
projects = projects[chunk_size*args.part:min(chunk_size*(args.part+1), len(projects))]
# first generate all projects
with ThreadPool(jobs) as pool:
projects = pool.map(generate, projects)
results += projects.count(None)
# Filter projects for successful generation
projects = [p for p in projects if p is not None]
# Then build the successfully generated ones
with ThreadPool(jobs) as pool:
projects = pool.map(build, projects)
results += projects.count(None)
# Filter projects for successful compilation and runablity
projects = [p for p in projects if p is not None and "CI: run" in p.read_text()]
# Then run the successfully compiled ones
with ThreadPool(jobs) as pool:
projects = pool.map(run, projects)
results += projects.count(None)
return results<|fim_middle|>compile_examples<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(alert_rule: AlertRule, metric_value: str) -> str:
"""Returns a human readable current status of an incident"""
agg_display_key = alert_rule.snuba_query.aggregate
if CRASH_RATE_ALERT_AGGREGATE_ALIAS in alert_rule.snuba_query.aggregate:
agg_display_key = agg_display_key.split(f"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}")[0].strip()
agg_text = QUERY_AGGREGATION_DISPLAY.get(agg_display_key, alert_rule.snuba_query.aggregate)
if agg_text.startswith("%"):
if metric_value is not None:
metric_and_agg_text = f"{metric_value}{agg_text}"
else:
metric_and_agg_text = f"No{agg_text[1:]}"
else:
metric_and_agg_text = f"{metric_value} {agg_text}"
time_window = alert_rule.snuba_query.time_window // 60
interval = "minute" if time_window == 1 else "minutes"
# % change alerts have a comparison delta
if alert_rule.comparison_delta:
metric_and_agg_text = f"{agg_text.capitalize()} {int(metric_value)}%"
higher_or_lower = (
"higher" if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value else "lower"
)
comparison_delta_minutes = alert_rule.comparison_delta // 60
comparison_string = TEXT_COMPARISON_DELTA.get(
comparison_delta_minutes, f"same time {comparison_delta_minutes} minutes ago"
)
return _(
f"{metric_and_agg_text} {higher_or_lower} in the last {time_window} {interval} "
f"compared to the {comparison_string}"
)
return _("%(metric_and_agg_text)s in the last %(time_window)d %(interval)s") % {
"metric_and_agg_text": metric_and_agg_text,
"time_window": time_window,
"interval": interval,
}<|fim_middle|>get_incident_status_text<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Get the current config as a dict and output Javascript."""
data = 'window.config=' + json.dumps(config_p) + ';'
return Response(data, mimetype='application/javascript; charset=utf-8')<|fim_middle|>get_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> bool:
return self.signal_catcher.caught and not self.signal_catcher.killed<|fim_middle|>good<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>( options ):
old_vim_options = test_utils.VIM_OPTIONS.copy()
test_utils.VIM_OPTIONS.update( DEFAULT_CLIENT_OPTIONS )
test_utils.VIM_OPTIONS.update( options )
try:
yield
finally:
test_utils.VIM_OPTIONS = old_vim_options<|fim_middle|>user_options<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
a, theo_mean, theo_stdev, mean_tol=1e-2, stdev_tol=1.0
):
if True:
aa = np.array(a)
average = np.mean(aa)
stdev = np.std(aa)
else: # keeping this path for further investigation
average = num.mean(a)
stdev = num.sqrt(
num.mean((a - average) ** 2)
) # num.std(a) -> does not work
print(
f"average = {average} - theoretical {theo_mean}"
+ f", stdev = {stdev} - theoretical {theo_stdev}\n"
)
assert abs(theo_mean - average) < mean_tol * max(1.0, abs(theo_mean))
# the theoretical standard deviation can't be 0
assert theo_stdev != 0
# TODO: this check is not a good proxy to validating that the samples
# respect the assumed random distribution unless we draw
# extremely many samples. until we find a better validation
# method, we make the check lenient to avoid random
# failures in the CI. (we still need the check to catch
# the cases that are obviously wrong.)
assert abs(theo_stdev - stdev) / min(theo_stdev, stdev) <= stdev_tol<|fim_middle|>assert_distribution<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
#################################################################
# ok_press(self)
# Callback function
# This function will be called when user click on the "OK" button on the window.
#################################################################
self.quit()<|fim_middle|>ok_press<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> list[str]:
"""Return the list of branches."""
cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)']
return self.run_git_split(cmd)<|fim_middle|>get_branches<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
file_manager: FileManager, element_factory, merge_conflict, monkeypatch, resolution
):
replace_merge_conflict_dialog(monkeypatch, resolution)
file_manager.resolve_merge_conflict(merge_conflict)
assert element_factory.size() > 0<|fim_middle|>test_load_model_with_merge_conflict<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss<|fim_middle|>balanced_l1_loss<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(state, action_str):
for action in state.legal_actions():
if action_str == state.action_to_string(state.current_player(), action):
return action
return None<|fim_middle|>get_action<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(obj, *, _memo=None, **kwargs):
# Don't bother memoizing primitive types
if isinstance(obj, (bytes, Enum, float, int, str, type(None))):
return obj
if _memo is None:
_memo = {}
if id(obj) in _memo: # Already frozen?
return _memo[id(obj)]
if hasattr(obj, "freeze"):
frozen = obj.METHOD_NAME(_memo=_memo, **kwargs)
else:
# At the moment, I don't have a need for passing extra data into
# items that live inside containers. If we're relaxing this, just
# be sure to add `**kwargs` to each `freeze()` call below.
assert kwargs == {}, kwargs
# This is a lame-o way of identifying `NamedTuple`s. Using
# `deepfrozen` would avoid this kludge.
if (
isinstance(obj, tuple)
and hasattr(obj, "_replace")
and hasattr(obj, "_fields")
and hasattr(obj, "_make")
):
frozen = obj._make(METHOD_NAME(i, _memo=_memo) for i in obj)
elif isinstance(obj, (list, tuple)):
frozen = tuple(METHOD_NAME(i, _memo=_memo) for i in obj)
elif isinstance(obj, dict):
frozen = frozendict(
{METHOD_NAME(k, _memo=_memo): METHOD_NAME(v, _memo=_memo) for k, v in obj.items()}
)
elif isinstance(obj, (set, frozenset)):
frozen = frozenset(METHOD_NAME(i, _memo=_memo) for i in obj)
elif isinstance(obj, DoNotFreeze):
frozen = obj
else:
raise NotImplementedError(type(obj))
_memo[id(obj)] = frozen
return frozen<|fim_middle|>freeze<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(results):
for result in results:
aspect, opinions, sentiment = result["aspect"], result["opinions"], result["sentiment_polarity"]
print(f"aspect: {aspect}, opinions: {opinions}, sentiment_polarity: {sentiment}")
print()<|fim_middle|>format_print<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
check = ConsistencyCheck()
self.assertEqual(check.check_component(self.component), [])
# Add triggering units
unit = self.add_unit(self.translation_1, "one", "One", "Jeden")
self.assertFalse(check.check_target_unit([], [], unit))
unit = self.add_unit(self.translation_2, "one", "One", "Jedna", increment=False)
self.assertTrue(check.check_target_unit([], [], unit))
self.assertNotEqual(check.check_component(self.component), [])<|fim_middle|>test_consistency<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(deps):
infos = [dep[J2ObjcMappingFileInfo] for dep in deps if J2ObjcMappingFileInfo in dep]
transitive_header_mapping_files = [info.header_mapping_files for info in infos]
transitive_class_mapping_files = [info.class_mapping_files for info in infos]
transitive_dependency_mapping_files = [info.dependency_mapping_files for info in infos]
transitive_archive_source_mapping_files = [info.archive_source_mapping_files for info in infos]
return J2ObjcMappingFileInfo(
header_mapping_files = depset([], transitive = transitive_header_mapping_files),
class_mapping_files = depset([], transitive = transitive_class_mapping_files),
dependency_mapping_files = depset([], transitive = transitive_dependency_mapping_files),
archive_source_mapping_files = depset([], transitive = transitive_archive_source_mapping_files),
)<|fim_middle|>mapping_file_provider<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Return Entity Description."""
return self.Query or self.NetworkMessageIds or self.__class__.__name__<|fim_middle|>description_str<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
where_clause_suffix = f"where table_schema in {SUPPORTED_SCHEMA_SQL_IN_CLAUSE}"
tmp_folder = '/var/tmp/amundsen/table_metadata'
node_files_folder = f'{tmp_folder}/nodes/'
relationship_files_folder = f'{tmp_folder}/relationships/'
job_config = ConfigFactory.from_dict({
f'extractor.athena_metadata.{AthenaMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY}': where_clause_suffix,
f'extractor.athena_metadata.extractor.sqlalchemy.{SQLAlchemyExtractor.CONN_STRING}': connection_string(),
f'extractor.athena_metadata.{AthenaMetadataExtractor.CATALOG_KEY}': "'AwsDataCatalog'",
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.NODE_DIR_PATH}': node_files_folder,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.RELATION_DIR_PATH}': relationship_files_folder,
f'publisher.neo4j.{neo4j_csv_publisher.NODE_FILES_DIR}': node_files_folder,
f'publisher.neo4j.{neo4j_csv_publisher.RELATION_FILES_DIR}': relationship_files_folder,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_END_POINT_KEY}': neo4j_endpoint,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_USER}': neo4j_user,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_PASSWORD}': neo4j_password,
f'publisher.neo4j.{neo4j_csv_publisher.JOB_PUBLISH_TAG}': 'unique_tag', # should use unique tag here like {ds}
})
job = DefaultJob(conf=job_config,
task=DefaultTask(extractor=AthenaMetadataExtractor(), loader=FsNeo4jCSVLoader(),
transformer=NoopTransformer()),
publisher=Neo4jCsvPublisher())
job.launch()<|fim_middle|>create_table_extract_job<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(lines, numberOfLinesToRemove: int = 2):
dict = []
positions = []
keys = []
# Remove the first 2 lines of the output (don't contain necessary information)
lines = lines.split("\n", numberOfLinesToRemove)[-1]
# Detect the columns of the text table
# Simply look for three whitespaces as separator
for line in lines.splitlines():
keys = re.split(r"\s{3,}", line)
for key in keys:
pos = line.find(key)
positions.append(pos)
break
# Remove the first line (the one with the keys)
dataRows = lines.split("\n", 1)[-1]
for row in dataRows.splitlines():
i = 0
dataInRow = convertStringToJson("{}")
for key in keys:
posStart = positions[i]
posEnd = len(row)
if i + 1 < len(positions):
posEnd = positions[i + 1]
value = row[posStart:posEnd]
value = value.strip()
dataInRow = addKeyValuePair(dataInRow, key, value)
i = i + 1
dict.append(dataInRow)
json = dictToJson(dict)
json = convertStringToJson(json)
return json<|fim_middle|>convert_cloud_foundry_command_output_to_json<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(s, prefix):
assert s.startswith(prefix), (prefix, s)
return s.removeprefix(prefix)<|fim_middle|>remove_prefix<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
# Empty string
stream = StringStream(b'\x00\x00\x00\x00')
reader = StreamReader(stream, False)
assert reader.get_string32() == ''
# String size but no string contents
stream = StringStream(b'\x01\x00\x00\x00')
reader = StreamReader(stream, False)
assert reader.get_string32() == ''
# String of length 1
stream = StringStream(b'\x01\x00\x00\x00A')
reader = StreamReader(stream, False)
assert reader.get_string32() == 'A'
# String with excess data
stream = StringStream(b'\x01\x00\x00\x00AB')
reader = StreamReader(stream, False)
assert reader.get_string32() == 'A'
# EOF before end of string
stream = StringStream(b'\x04\x00\x00\x00AB')
reader = StreamReader(stream, False)
assert reader.get_string32() == 'AB'
# Preserves null bytes
stream = StringStream(b'\x02\x00\x00\x00\x00\x00')
reader = StreamReader(stream, False)
assert reader.get_string32() == '\x00\x00'<|fim_middle|>test_streamreader_string32<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(string):
return True if isinstance(string, text_type) else False<|fim_middle|>is_unicode_str<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self):
python = self.dir / 'venv' / 'bin' / 'python'
port = _scan_free_port()
logger.info(str(python))
self.process = await asyncio.create_subprocess_exec(
str(python), '-m', 'uvicorn',
'--port', str(port),
'--app-dir', str(self.dir),
'--reload',
f'app:main',
env=dict(H2O_WAVE_APP_ADDRESS=f'http://{_localhost}:{port}')
)<|fim_middle|>start<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
METHOD_NAME = self.base_dist.METHOD_NAME()
return _sum_rightmost(METHOD_NAME, self.reinterpreted_batch_ndims)<|fim_middle|>entropy<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(date, default_tz=None):
"""
Returns a non-naive datetime.datetime.
Interprets numbers as ms-since-epoch.
Parameters
----------
date : `int` or `datetime.datetime`
The datetime to convert
default_tz : tzinfo
The TimeZone to use if none is found. If not supplied, and the
datetime doesn't have a timezone, then we raise ValueError
Returns
-------
Non-naive datetime
"""
if isinstance(date, (int, long)):
return ms_to_datetime(date, default_tz)
elif date.tzinfo is None:
if default_tz is None:
raise ValueError("Must specify a TimeZone on incoming data")
return date.replace(tzinfo=default_tz)
return date<|fim_middle|>to_dt<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(mol, expected_cgmp, label, mtype):
mol.update_geometry()
dAB = mol.to_dict()
test_chgmult(expected_cgmp['AB'], dAB, label + ' AB')
mAB = mtype.from_dict(dAB)
qcdb.compare_molrecs(dAB, mAB.to_dict(), label + ' AB roundtrip', atol=1.e-6)
aB = mol.extract_subsets(2, 1)
daB = aB.to_dict()
test_chgmult(expected_cgmp['aB'], daB, label + ' aB')
maB = mtype.from_dict(daB)
qcdb.compare_molrecs(daB, maB.to_dict(), label + ' aB roundtrip', atol=1.e-6)
Ab = mol.extract_subsets(1, 2)
dAb = Ab.to_dict()
test_chgmult(expected_cgmp['Ab'], dAb, label + ' Ab')
mAb = mtype.from_dict(dAb)
qcdb.compare_molrecs(dAb, mAb.to_dict(), label + ' Ab roundtrip', atol=1.e-6)
A_ = mol.extract_subsets(1)
dA_ = A_.to_dict()
test_chgmult(expected_cgmp['A_'], dA_, label + ' A_')
mA_ = mtype.from_dict(dA_)
qcdb.compare_molrecs(dA_, mA_.to_dict(), label + ' A_ roundtrip', atol=1.e-6)
_B = mol.extract_subsets(2)
d_B = _B.to_dict()
test_chgmult(expected_cgmp['_B'], d_B, label + ' _B')
m_B = mtype.from_dict(d_B)
qcdb.compare_molrecs(d_B, m_B.to_dict(), label + ' _B roundtrip', atol=1.e-6)
qcdb.compare_integers(True, type(mol) == mtype, label + ': AB type')
qcdb.compare_integers(True, type(Ab) == mtype, label + ': Ab type')<|fim_middle|>test_dimer<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(recorder):
result_df = fmp_model.get_rating(symbol="TSLA")
recorder.capture(result_df)<|fim_middle|>test_get_rating<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(username, data):
if not data:
element = Element('span')
element.text = username
return element
rating = data[1]
element = Element('a', {'class': 'rate-group', 'href': reverse('user_page', args=[username])})
if rating:
rating_css = rating_class(rating)
rate_box = Element('span', {'class': 'rate-box ' + rating_css})
rate_box.append(Element('span', {'style': 'height: %3.fem' % rating_progress(rating)}))
user = Element('span', {'class': 'rating ' + rating_css})
user.text = username
element.append(rate_box)
element.append(user)
else:
element.text = username
return element<|fim_middle|>get_user_rating<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
from kolibri.core.auth.models import FacilityUser
return FacilityUser.objects.all().values_list("id", flat=True)<|fim_middle|>learner_ids<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(nuc_data, build_dir=""):
"""Adds the NDS fission yields to the nuc_data library.
Parameters
----------
nuc_data : str
Path to nuclide data file.
"""
build_filename = os.path.join(build_dir, "nds-fpyield.html")
with open(build_filename, "rb") as f:
raw_data = f.read().decode("iso-8859-1")
spdat = raw_data.split("<table>")
alldata = []
for i in range(1, 31, 5):
alldata.append(readtable(i, spdat))
alldata = numpy.lib.recfunctions.stack_arrays(alldata, asrecarray=True)
db = tb.open_file(nuc_data, "a", filters=BASIC_FILTERS)
if not hasattr(db.root, "neutron"):
neutron_group = db.create_group("/", "neutron", "Neutron Data")
fpy_table = db.create_table(
"/neutron/",
"nds_fission_products",
alldata,
"NDS Fission Product Yields, percent [unitless]",
)
fpy_table.flush()
db.close()<|fim_middle|>make_fpy_table<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertEqual(
pikaur("-Qs pkg --repo").returncode, 1,
)
self.assertEqual(
pikaur("-Qs pkg --aur").returncode, 1,
)<|fim_middle|>test_incompatible_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')<|fim_middle|>needsquoting<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(*method_args):
self, args = method_args
if start_trading and not LiveEventArgsValidator(
self._strategy.log.error).are_start_trading_event_args_valid(args):
return
if not start_trading and not LiveEventArgsValidator(
self._strategy.log.error).are_stop_trading_event_args_valid(args):
return
func(self, args)<|fim_middle|>validate_event<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, env_var_value, flag_value, expect_seg_fault):<|fim_middle|>run_and_verify<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(gnmi_out, certs_out):
""" Extracts dictionary from redis output.
"""
gnmi_list = []
gnmi_list = get_list_stdout(gnmi_out) + get_list_stdout(certs_out)
# Elements in list alternate between key and value. Separate them and combine into a dict.
key_list = gnmi_list[0::2]
value_list = gnmi_list[1::2]
params_dict = dict(list(zip(key_list, value_list)))
return params_dict<|fim_middle|>get_dict_stdout<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
interp, oldx, oldg, oldy, newx, newg, method="constant"
): # noqa
"""Apply extrapolation to the output of interpolation on quantiles with a given grouping.
Arguments are the same as _interp_on_quantiles_2D.
"""
bnds = _first_and_last_nonnull(oldx)
xp = np.arange(bnds.shape[0])
toolow = newx < np.interp(newg, xp, bnds[:, 0])
toohigh = newx > np.interp(newg, xp, bnds[:, 1])
if method == "constant":
constants = _first_and_last_nonnull(oldy)
cnstlow = np.interp(newg, xp, constants[:, 0])
cnsthigh = np.interp(newg, xp, constants[:, 1])
interp[toolow] = cnstlow[toolow]
interp[toohigh] = cnsthigh[toohigh]
else: # 'nan'
interp[toolow] = np.NaN
interp[toohigh] = np.NaN
return interp<|fim_middle|>extrapolate_on_quantiles<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
x = Entry(lambda x: 1)
y = Entry(lambda x: 1)
assert x == x
assert x != y<|fim_middle|>test_eq<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
app, client, librarian_martigny,
acq_order_fiction_martigny,
acq_order_line_fiction_martigny,
acq_order_line2_fiction_martigny
):
"""Test order notification preview API."""
login_user_via_session(client, librarian_martigny.user)
acor = acq_order_fiction_martigny
url = url_for('api_order.order_notification_preview', order_pid=acor.pid)
res = client.get(url)
assert res.status_code == 200
data = res.json
assert 'recipient_suggestions' in data and 'preview' in data
assert 'message' not in data
# update the vendor communication_language to force it to an unknown
# related template and retry.
with mock.patch.object(VendorAcquisitionNotificationDumper, 'dump',
mock.MagicMock(return_value={
'name': 'test vendor name',
'email': 'test@vendor.com',
'language': 'dummy'
})):
response = client.get(url)
assert response.status_code == 200
assert all(field in response.json
for field in ['recipient_suggestions', 'preview'])<|fim_middle|>test_order_notification_preview<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(s):
if isinstance(s, str):
return True
return False<|fim_middle|>is_unicode<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, *args, **options):
if options['syntax']:
usage()
exit(1)
abstract = 'No abstract'
title = 'No title'
subjects = 'foo,bar'
subjects = tuple(subjects.split(','))
access = 'private'
oname = 'admin'
owner = user_from_name('admin')
files = None
while options['command']:
command = options['command'].pop(0)
if command == 'title':
title = options['command'].pop(0)
elif command == 'abstract':
abstract = options['command'].pop(0)
elif command == 'subjects':
subjects = options['command'].pop(0)
subjects = tuple(subjects.split(','))
elif command == 'files':
files = options['command'].pop(0)
files = files.split(',')
elif command == 'access':
access = options['command'].pop(0)
if access not in ['private', 'discoverable', 'public']:
print("unrecognized access '{}'".format(access))
usage()
exit(1)
elif command == 'owner':
oname = options['command'].pop(0)
else:
owner = user_from_name(oname)
if owner is None:
print("no owner '{}'".format(oname))
usage()
exit(1)
print("unknown resource attribute '{}'".format(command))
print("creating resource with attributes:")
print(" title={}".format(title))
print(" abstract={}".format(abstract))
print(" subjects={}".format(subjects))
print(" owner={}".format(oname))
print(" access={}".format(access))
print(" files={}".format(files))
fds = tuple([open(f, 'rb') for f in files])
metadata_dict = [
{'description': {'abstract': abstract}},
]
res = create_resource(
resource_type='CompositeResource',
owner=owner,
title=title,
keywords=subjects,
metadata=metadata_dict,
files=fds
)
if access == 'discoverable':
res.set_discoverable(True)
elif access == 'public':
res.set_public(True)
# default is 'private'
print(res.short_id)<|fim_middle|>handle<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)<|fim_middle|>i_n_classd<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, root, *subdir):
ret = None
if subdir:
ret = os.path.join(root, *subdir)
else:
ret = root
return ret<|fim_middle|>build_path<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(complete_conf):
connect_config = connect_generator.generate_config(complete_conf)
write_file(KAFKA_CONNECT_CFG_PATH, connect_config)
connect_logging = loggers_generator.generate_kafka_connect_logging_config(
complete_conf
)
write_file(LOG4J_CFG_PATH, connect_logging)<|fim_middle|>setup_configs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")<|fim_middle|>last_modified_by<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, filename, disable_constraints=False):
self.ips.update({os.path.abspath(filename): disable_constraints})<|fim_middle|>add_ip<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> str:
"""
Get ID of currently booted deployment
"""
stdout = remote_sudo("rpm-ostree status --booted --json")
return DataFormats.json_parse(stdout)["deployments"][0]["id"]<|fim_middle|>get_booted_deployment_id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return ["install", "prefix={0}".format(self.prefix)] + self.common_make_opts<|fim_middle|>install_targets<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
version = Version(self.version)
aravis_name = f"aravis-{version.major}.{version.minor}"
self.cpp_info.set_property("pkg_config_name", aravis_name)
self.cpp_info.includedirs = [os.path.join("include", aravis_name)]
self.cpp_info.libs = [aravis_name]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.extend(["dl", "pthread", "m", "resolv"])
elif self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi"])
if self.options.gst_plugin and self.options.shared:
gst_plugin_path = os.path.join(self.package_folder, "lib", "gstreamer-1.0")
self.runenv_info.prepend_path("GST_PLUGIN_PATH", gst_plugin_path)
if self.options.tools:
self.buildenv_info.prepend_path("GST_PLUGIN_PATH", gst_plugin_path)
self.env_info.GST_PLUGIN_PATH.append(gst_plugin_path)
if self.options.tools:
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))<|fim_middle|>package_info<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, tag):
"""Reorder a tag's attributes however you want.
By default, attributes are sorted alphabetically. This makes
behavior consistent between Python 2 and Python 3, and preserves
backwards compatibility with older versions of Beautiful Soup.
If `empty_boolean_attributes` is True, then attributes whose
values are set to the empty string will be treated as boolean
attributes.
"""
if tag.attrs is None:
return []
return sorted(
(k, (None if self.empty_attributes_are_booleans and v == '' else v))
for k, v in list(tag.attrs.items())
)<|fim_middle|>attributes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(filename):
base, extension = os.path.splitext(filename)
extension = extension[1:]
type = extensionList.get(extension, "")
return type, extension<|fim_middle|>analyze_file_type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, v1, v2):
floatTypes = [ 'http://www.w3.org/2001/XMLSchema#float','http://www.w3.org/2001/XMLSchema#decimal','http://www.w3.org/2001/XMLSchema#double']
integerTypes = ['http://www.w3.org/2001/XMLSchema#integer']
if v1.datatype in floatTypes and v2.datatype in integerTypes:
return False
elif v2.datatype in floatTypes and v1.datatype in integerTypes:
return False
else:
return True<|fim_middle|>number_type_mismatch<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(obj: Any) -> Any:
"""
Required to handle the situation when multiple references to one and the
same tensor are present in the input. If tensor replication is not done, then
at runtime one and the same tensor could be wrapped by input/output wrappers twice,
which will disrupt the traced graph structure and possibly hook calls.
"""
observed_tensor_object_ids = set() # type: Set[int]
def replicate_fn(tensor: torch.Tensor) -> torch.Tensor:
tensor_object_id = id(tensor)
if tensor_object_id in observed_tensor_object_ids:
with forward_nncf_trace():
return tensor.clone()
observed_tensor_object_ids.add(tensor_object_id)
return tensor
obj = objwalk(obj, is_tensor, replicate_fn)
return obj<|fim_middle|>replicate_same_tensors<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
agent_url = get_trace_url()
_conn = get_connection(agent_url, timeout=get_trace_agent_timeout())
try:
_conn.request("GET", "info", headers={"content-type": "application/json"})
resp = _conn.getresponse()
data = resp.read()
finally:
_conn.close()
if resp.status == 404:
# Remote configuration is not enabled or unsupported by the agent
return None
if resp.status < 200 or resp.status >= 300:
log.warning("Unexpected error: HTTP error status %s, reason %s", resp.status, resp.reason)
return None
return json.loads(ensure_str(data))<|fim_middle|>info<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
args: Namespace, slither: SlitherCompilationUnit, kspec_functions: Set[Tuple[str, str]]
) -> None:
# Collect all slither functions
slither_functions = _get_slither_functions(slither)
# Determine which klab specs were not resolved.
slither_functions_set = set(slither_functions)
kspec_functions_resolved = kspec_functions & slither_functions_set
kspec_functions_unresolved: Set[Tuple[str, str]] = kspec_functions - kspec_functions_resolved
kspec_missing: List[Union[FunctionContract, StateVariable]] = []
kspec_present: List[Union[FunctionContract, StateVariable]] = []
for slither_func_desc in sorted(slither_functions_set):
slither_func = slither_functions[slither_func_desc]
if slither_func_desc in kspec_functions:
kspec_present.append(slither_func)
else:
kspec_missing.append(slither_func)
logger.info("## Check for functions coverage")
json_kspec_present = _generate_output(kspec_present, "[✓]", green, args.json)
json_kspec_missing_functions = _generate_output(
[f for f in kspec_missing if isinstance(f, FunctionContract)],
"[ ] (Missing function)",
red,
args.json,
)
json_kspec_missing_variables = _generate_output(
[f for f in kspec_missing if isinstance(f, StateVariable)],
"[ ] (Missing variable)",
yellow,
args.json,
)
json_kspec_unresolved = _generate_output_unresolved(
kspec_functions_unresolved, "[ ] (Unresolved)", yellow, args.json
)
# Handle unresolved kspecs
if args.json:
output.output_to_json(
args.json,
None,
{
"functions_present": json_kspec_present,
"functions_missing": json_kspec_missing_functions,
"variables_missing": json_kspec_missing_variables,
"functions_unresolved": json_kspec_unresolved,
},
)<|fim_middle|>run_coverage_analysis<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self,m):
'''
set EMC mode if possible, else throw LinuxcncError
return current mode
'''
self.s.poll()
if self.s.task_mode == m :
return m
if self.running(do_poll=False):
raise LinuxcncError("interpreter running - can not change mode")
self.c.mode(m)
self.c.wait_complete()
return m<|fim_middle|>set_mode<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Total energy exported in Wh"""
return _RamdiskFile("einspeisungkwh", _float_coder)<|fim_middle|>energy_export<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, endpoint, pid_value=None):
record = get_record_from_legacy(pid_value)
if not record:
abort(404)
serializer = self._get_serializer_from_endpoint(endpoint)
serialized_record = serializer().dump(record)
return jsonify({'data': serialized_record.data})<|fim_middle|>get<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, field_to_split: str = None, split_fields: List[str] = None,
split_proportions: List[float] = None, split_seed: int = None, stratify: bool = None) -> bool:
"""
Split given field of dataset to the given list of fields with corresponding proportions
Args:
field_to_split: field name (out of ``"train", "valid", "test"``) which to split
split_fields: list of names (out of ``"train", "valid", "test"``) of fields to which split
split_proportions: corresponding proportions
split_seed: random seed for splitting dataset
stratify: whether to use stratified split
Returns:
None
"""
if split_seed is None:
split_seed = self.random.randint(0, 10000)
data_to_div = self.data[field_to_split].copy()
data_size = len(self.data[field_to_split])
for i in range(len(split_fields) - 1):
if stratify:
stratify = [sample[1] for sample in data_to_div]
self.data[split_fields[i]], data_to_div = train_test_split(
data_to_div,
test_size=len(data_to_div) - int(data_size * split_proportions[i]),
random_state=split_seed,
stratify=stratify)
self.data[split_fields[-1]] = data_to_div
return True<|fim_middle|>split_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(head_outputs: dict[str, list[Tensor]], keys: list[str] | None = None) -> None:
"""
An in-place function. We expect ``head_outputs`` to be Dict[str, List[Tensor]].
Yet if it is Dict[str, Tensor], this func converts it to Dict[str, List[Tensor]].
It will be modified in-place.
Args:
head_outputs: a Dict[str, List[Tensor]] or Dict[str, Tensor], will be modifier in-place
keys: the keys in head_output that need to have value type List[Tensor]. If not provided, will use head_outputs.keys().
"""
if keys is None:
keys = list(head_outputs.keys())
for k in keys:
value_k = head_outputs[k] # Tensor or List[Tensor]
# convert value_k to List[Tensor]
if isinstance(value_k, Tensor):
head_outputs[k] = [value_k]
elif isinstance(value_k[0], Tensor):
head_outputs[k] = list(value_k)
else:
raise ValueError("The output of network should be Dict[str, List[Tensor]] or Dict[str, Tensor].")<|fim_middle|>ensure_dict_value_to_list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, arg: str) -> bool | None: ...<|fim_middle|>do_tbreak<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request<|fim_middle|>prepare_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls):
return cls.models<|fim_middle|>get_models<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> "FileScope":
return self.contract.METHOD_NAME<|fim_middle|>file_scope<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
xa = self.prog["drgn_test_xarray_one_at_zero"].address_of_()
self.assertIdentical(xa_load(xa, 0), Object(self.prog, "void *", 0x1234))
self.assertIdentical(xa_load(xa, 1), NULL(self.prog, "void *"))
self.assertIdentical(xa_load(xa, 100000), NULL(self.prog, "void *"))<|fim_middle|>test_xa_load_one_at_zero<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(size=None):
"""
Run window widget alone
"""
from peacock.ExodusViewer.ExodusPluginManager import ExodusPluginManager
from .plugins.VTKWindowPlugin import VTKWindowPlugin
from .plugins.FilePlugin import FilePlugin
from .plugins.BlockPlugin import BlockPlugin
from .plugins.GoldDiffPlugin import GoldDiffPlugin
from .plugins.ColorbarPlugin import ColorbarPlugin
from .plugins.MeshPlugin import MeshPlugin
from .plugins.BackgroundPlugin import BackgroundPlugin
from .plugins.ClipPlugin import ClipPlugin
from .plugins.ContourPlugin import ContourPlugin
from .plugins.OutputPlugin import OutputPlugin
from .plugins.CameraPlugin import CameraPlugin
from .plugins.MediaControlPlugin import MediaControlPlugin
plugins = [lambda: VTKWindowPlugin(size=size),
FilePlugin,
BlockPlugin,
MediaControlPlugin,
GoldDiffPlugin,
ColorbarPlugin,
MeshPlugin,
ClipPlugin,
ContourPlugin,
CameraPlugin,
BackgroundPlugin,
OutputPlugin]
widget = ExodusPluginManager(plugins=plugins)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(widget)
menubar = main_window.menuBar()
menubar.setNativeMenuBar(False)
widget.addToMainMenu(menubar)
main_window.show()
return widget, main_window<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(file_assoc_widget):
qtbot, widget = file_assoc_widget
# Test data
assert widget.data == widget.test_data
# Test add invalid associations
extension = 'blooper.foo,'
def interact_with_dialog_1():
qtbot.keyClicks(widget._dlg_input.lineedit, extension)
assert widget._dlg_input.lineedit.text() == extension
assert not widget._dlg_input.button_ok.isEnabled()
qtbot.keyClick(widget._dlg_input.button_cancel, Qt.Key_Return)
_ = create_timer(interact_with_dialog_1)
qtbot.mouseClick(widget.button_add, Qt.LeftButton)
# Test add valid association
extension = '*.zpam,MANIFEST.in'
def interact_with_dialog_2():
qtbot.keyClicks(widget._dlg_input.lineedit, extension)
qtbot.keyClick(widget._dlg_input.button_ok, Qt.Key_Return)
_ = create_timer(interact_with_dialog_2)
qtbot.mouseClick(widget.button_add, Qt.LeftButton)
assert widget.list_extensions.count() == 3
assert widget.list_extensions.item(2).text() == extension
# Test add invalid association programmatically
widget.add_association(value='mehh')
assert widget.list_extensions.count() == 3
# Test add valid association programmatically
widget.add_association(value='*.boom')
assert widget.list_extensions.count() == 4
# Test add repeated association programmatically
widget.add_association(value='*.csv')
assert widget.list_extensions.count() == 4
widget._add_association(value='*.csv')
assert widget.list_extensions.count() == 4
# Test edit association
extension = '*.zpam'
def interact_with_dialog_3():
widget._dlg_input.lineedit.clear()
qtbot.keyClicks(widget._dlg_input.lineedit, extension)
qtbot.keyClick(widget._dlg_input.button_ok, Qt.Key_Return)
_ = create_timer(interact_with_dialog_3)
qtbot.mouseClick(widget.button_edit, Qt.LeftButton)
assert widget.list_extensions.count() == 4
assert widget.list_extensions.item(2).text() == extension
# Test remove associations
qtbot.mouseClick(widget.button_remove, Qt.LeftButton)
assert widget.list_extensions.count() == 3
# Test set default
widget.list_applications.setCurrentRow(1)
qtbot.mouseClick(widget.button_default, Qt.LeftButton)
assert 'App name 2' in widget.list_applications.item(0).text()
# Test add application
def interact_with_dialog_4():
assert not widget._dlg_applications.button_ok.isEnabled()
count = widget._dlg_applications.list.count()
if count > 0:
widget._dlg_applications.list.setCurrentRow(count - 1)
qtbot.keyClick(widget._dlg_applications.button_ok, Qt.Key_Return)
else:
qtbot.keyClick(widget._dlg_applications.button_cancel,
Qt.Key_Return)
_ = create_timer(interact_with_dialog_4)
qtbot.mouseClick(widget.button_add_application, Qt.LeftButton)
count = widget.list_applications.count()
assert count in [2, 3]
# Test add repeated application programmatically
app_name, app_path = widget.test_data['*.csv'][0]
widget._add_application(app_name, app_path)
count = widget.list_applications.count()
assert count in [2, 3]
# Test remove application
widget.list_applications.setCurrentRow(0)
qtbot.mouseClick(widget.button_remove_application, Qt.LeftButton)
count = widget.list_applications.count()
assert count in [1, 2]
assert 'App name 1' in widget.list_applications.item(0).text()<|fim_middle|>test_file_assoc_widget<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, key="", **kwargs):
"""Stores POST26 definitions and data during active session.
APDL Command: KEEP
Parameters
----------
key
State or value
On or 1 - Allows you to exit and reenter /POST26 without losing your current time history
variable information. Keeps a cache of the /POST26
variable information including the active file name
(FILE), variable definitions (NSOL, ESOL, GAPF, RFORCE,
SOLU, and EDREAD) and stored variable data in memory for
the current ANSYS session.
Off or 0 - /POST26 variable information is deleted when you exit /POST26.
Notes
-----
Your variable information is saved in memory only for the current
active ANSYS session. It is deleted when you exit ANSYS. This
information is also deleted when you issue /CLEAR, RESUME, SOLVE, or
RESET.
When you reenter /POST26 all time history variable data is available
for use. When you issue STORE,NEW, variable definitions created by math
operations such as ADD or PROD will not be restored. However, variables
defined with NSOL, ESOL, GAPF, RFORCE, SOLU, and EDREAD will be
restored. Only the last active results file name is kept in memory
(FILE).
Commands such as LAYERP26, SHELL, and FORCE that specify the location
or a component of data to be stored will retain the setting at the time
of exiting /POST26 .
"""
command = f"KEEP,{key}"
return self.run(command, **kwargs)<|fim_middle|>keep<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(ext_dir, ext_file, ext_name):
# Modification of https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/extension.py#L89
WHL_METADATA_FILENAME = 'metadata.json'
zip_ref = zipfile.ZipFile(ext_file, 'r')
zip_ref.extractall(ext_dir)
zip_ref.close()
metadata = {}
dist_info_dirs = [f for f in os.listdir(ext_dir) if f.endswith('.dist-info')]
azext_metadata = _get_azext_metadata(ext_dir)
if not azext_metadata:
raise ValueError('azext_metadata.json for Extension "{}" Metadata is missing'.format(ext_name))
metadata.update(azext_metadata)
for dist_info_dirname in dist_info_dirs:
parsed_dist_info_dir = WHEEL_INFO_RE(dist_info_dirname)
if parsed_dist_info_dir and parsed_dist_info_dir.groupdict().get('name') == ext_name.replace('-', '_'):
whl_metadata_filepath = os.path.join(ext_dir, dist_info_dirname, WHL_METADATA_FILENAME)
if os.path.isfile(whl_metadata_filepath):
with open(whl_metadata_filepath) as f:
metadata.update(json.load(f))
return metadata<|fim_middle|>get_ext_metadata<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(modules: list) -> list:
return frappe.db.get_all("DocType", filters={"module": ("in", modules)}, pluck="name")<|fim_middle|>get_doctypes_by_modules<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(op: Type, i: int, m: OpMethod): # pylint: disable=invalid-name
register_op(ty, op, i)(m)<|fim_middle|>r<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, vpc_pcx_id: str) -> VPCPeeringConnection:
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
# if cross region need accepter from another region
pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name
pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name
if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: # type: ignore[attr-defined]
raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region) # type: ignore[attr-defined]
if vpc_pcx._status.code != "pending-acceptance":
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.accept()
return vpc_pcx<|fim_middle|>accept_vpc_peering_connection<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(mapdl, contact_geom_and_mesh):
assert mapdl.mesh.n_node > 0
assert mapdl.mesh.n_elem > 0
assert mapdl.mesh.nnum_all.size > 0
assert mapdl.mesh.enum_all.size > 0
assert mapdl.mesh.nnum.size > 0
assert mapdl.mesh.enum.size > 0
assert mapdl.mesh.nodes.size > 0
# assert mapdl.mesh.node_angles.size > 0 Not implemented
# This should be a list of arrays.
assert len(mapdl.mesh.elem) > 0
assert mapdl.mesh.elem[0].size > 0
# Using size because it should be non-empty arrays
assert mapdl.mesh.ekey.size > 0
assert mapdl.mesh.et_id.size > 0
assert mapdl.mesh.tshape.size > 0
assert mapdl.mesh.material_type.size > 0
assert mapdl.mesh.etype.size > 0
assert mapdl.mesh.section.size > 0
assert mapdl.mesh.element_coord_system.size > 0
assert mapdl.mesh.elem_real_constant.size > 0
# should be non empty dicts
assert mapdl.mesh.key_option
assert len(mapdl.mesh.key_option.keys()) > 0
assert mapdl.mesh.tshape_key
assert len(mapdl.mesh.tshape_key.keys()) > 0
# assert mapdl.mesh.element_components #Not implemented
# assert mapdl.mesh.node_components # Not implemented
# bools
assert mapdl.mesh._has_elements
assert mapdl.mesh._has_nodes
# Others
assert isinstance(mapdl.mesh.grid, pv.UnstructuredGrid)
assert mapdl.mesh.grid.n_cells > 0
assert mapdl.mesh.grid.n_points > 0<|fim_middle|>test_non_empty_mesh<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request):
"""Get configurations from the module."""
return request.param<|fim_middle|>get_configuration<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
raise SkipTest("Not supported")<|fim_middle|>test_dataset_sort_vdim_hm_alias<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tmp_path: pathlib.Path) -> None:
def func(*args: int) -> None:
pass # pragma: no cover
funcs = api.ast.extract(source.make_file(tmp_path, func))
assert funcs == {
'func': api.Parameters(
parameters=[], variadic_args=True, variadic_kwargs=False, line=1
)
}<|fim_middle|>test_extract_variadic_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return """\
color
Sets the text font color of unselected points, applied
only when a selection exists.
"""<|fim_middle|>prop_descriptions<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
order_with_lines, address_usa, shipping_zone, site_settings
):
method = shipping_zone.shipping_methods.get()
order_with_lines.shipping_address = order_with_lines.billing_address.get_copy()
order_with_lines.shipping_method_name = method.name
order_with_lines.shipping_method = method
order_with_lines.save()
site_settings.company_address = address_usa
site_settings.save()
config = AvataxConfiguration(
username_or_account=os.environ.get("AVALARA_USERNAME", ""),
password_or_license=os.environ.get("AVALARA_PASSWORD", ""),
use_sandbox=True,
from_street_address="Tęczowa 7",
from_city="WROCŁAW",
from_postal_code="53-601",
from_country="PL",
)
request_data = get_order_request_data(order_with_lines, config)
transaction_url = urljoin(
get_api_url(config.use_sandbox), "transactions/createoradjust"
)
api_post_request_task(
transaction_url, request_data, asdict(config), order_with_lines.id
)<|fim_middle|>test_api_post_request_task_sends_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(service: str) -> str:
normalized_service = service
if service.lower().startswith("http://"):
normalized_service = service[len("http://") :]
elif service.lower().startswith("https://"):
normalized_service = service[len("https://") :]
return normalized_service<|fim_middle|>normalize_service<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, doc):
properties = self.toRST_properties_get()
msg = ", ".join(" **%s** (%s)" % (x, y) for x, y in properties.items())
doc.write(msg + "\n\n")<|fim_middle|>to_rs_t_properties<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(plugin_instance, settings_module):
# Instead of just adding the module path to the settings
# we instantiate an app config object for the plugin
# and explicitly set its label to its module path.
# This way, there is no way for a plugin to collide in its
# label in the Django App Registry with kolibri core apps
# or Kolibri core plugins.
app_config = AppConfig.create(plugin_instance.module_path)
app_config.label = plugin_instance.module_path
app_config.kolibri_plugin = True
# Register the plugin as an installed app
_set_setting_value("INSTALLED_APPS", (app_config,), settings_module)
plugin_instance.INSTALLED_APPS.append(app_config)
# Add in the external plugins' locale paths. Our frontend messages depends
# specifically on the value of LOCALE_PATHS to find its catalog files.
if is_external_plugin(
plugin_instance.module_path
) and i18n.get_installed_app_locale_path(plugin_instance.module_path):
_set_setting_value(
"LOCALE_PATHS",
(i18n.get_installed_app_locale_path(plugin_instance.module_path),),
settings_module,
)<|fim_middle|>apply_base_settings<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(labels, match_indices, mask, mask_val):
"""Gather based on batched labels."""
batch_size = labels.shape[0]
if batch_size == 1:
if mask is not None:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
ops.squeeze(mask, axis=0),
mask_val,
)
else:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
None,
mask_val,
)
return ops.expand_dims(result, axis=0)
else:
targets = ops.take_along_axis(
labels, ops.expand_dims(match_indices, axis=-1), axis=1
)
if mask is None:
return targets
else:
masked_targets = ops.cast(
mask_val, labels.dtype
) * ops.ones_like(mask, dtype=labels.dtype)
return ops.where(mask, masked_targets, targets)<|fim_middle|>gather_batched<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True<|fim_middle|>is_program_valid<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self,params):
"""
Parameters will be used to generate the signature of the method, an md5.
So we need signatures to be deterministic. There are two sources of randomness
- Python version, in particular differences in dealing with encodings
- Unsorted sets.
This method will add casting transformations to fix those, only required for testing.
:param params:
:return: a list of parameters that should generate a deterministic md5 signature.
"""
lparams = list(params)
for i, param in enumerate(lparams):
if isinstance(param, dict):
lparams[i] = self._to_ordered_dict(param)
return OneServer.METHOD_NAME(self, lparams)<|fim_middle|>cast_parms<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, filename):
with open(filename, "wb") as f:
f.METHOD_NAME(self.fourcc.encode())
f.METHOD_NAME(struct.pack('B', self.header_version))
for i in range(3):
f.METHOD_NAME(struct.pack('B', self.args.version[i]))
f.METHOD_NAME(struct.pack('I', len(self.data)))
f.METHOD_NAME(struct.pack('B', self.product_family_list[self.args.family]))
f.METHOD_NAME(struct.pack('B', self.product_id_list[self.args.product]))
f.METHOD_NAME(struct.pack('H', CrcCCITT.calc_crc(self.data)))
f.METHOD_NAME(self.data)<|fim_middle|>write<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> bytes:
...<|fim_middle|>read_chunk<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(*args, **kwargs):
class MockOutput:
class MockText:
text = "Response text from gpt3"
choices = [MockText(), MockText()]
return MockOutput()<|fim_middle|>mock_create<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, conf: ConfigTree) -> None:
conf = conf.with_fallback(AthenaMetadataExtractor.DEFAULT_CONFIG)
self._cluster = conf.get_string(AthenaMetadataExtractor.CATALOG_KEY)
self.sql_stmt = AthenaMetadataExtractor.SQL_STATEMENT.format(
where_clause_suffix=conf.get_string(AthenaMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
catalog_source=self._cluster
)
LOGGER.info('SQL for Athena metadata: %s', self.sql_stmt)
self._alchemy_extractor = sql_alchemy_extractor.from_surrounding_config(conf, self.sql_stmt)
self._extract_iter: Union[None, Iterator] = None<|fim_middle|>init<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(obj: Group | GroupEvent) -> str:
ev_metadata = obj.get_event_metadata()
ev_type = obj.get_event_type()
title = obj.title
if ev_type == "error" and "type" in ev_metadata:
title = ev_metadata["type"]
elif ev_type == "csp":
title = f'{ev_metadata["directive"]} - {ev_metadata["uri"]}'
else:
group = getattr(obj, "group", obj)
if isinstance(obj, GroupEvent) and obj.occurrence is not None:
title = obj.occurrence.issue_title
else:
event = group.get_latest_event()
if event is not None and event.occurrence is not None:
title = event.occurrence.issue_title
return title<|fim_middle|>build_attachment_title<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(context):
context.update(COMMON_CONTEXT)
config = load_config(os.path.join(os.path.dirname(__file__), "..", "docker.d", "worker.yml"), context)
passwords_config = load_config(os.path.join(os.path.dirname(__file__), "..", "docker.d", "passwords.yml"), context)
config_schema = load_schema(os.path.join(os.path.dirname(__file__), "..", "src", "signingscript", "data", "config_schema.json"))
passwords_schema = load_schema(os.path.join(os.path.dirname(__file__), "..", "src", "signingscript", "data", "passwords_config_schema.json"))
jsonschema.validate(config, config_schema)
jsonschema.validate(passwords_config, passwords_schema)<|fim_middle|>validate_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""The main function. Hier spielt die Musik.
"""
# parse the command line, exit with UNKNOWN if it fails
try:
args = parse_args()
except SystemExit:
sys.exit(STATE_UNKNOWN)
# fetch data
url = 'https://{}/api/v2/monitor/system/firmware/?access_token={}'.format(
args.HOSTNAME,
urllib.parse.quote(args.PASSWORD),
)
result = lib.base.coe(lib.url.fetch_json(
url,
insecure=args.INSECURE,
no_proxy=args.NO_PROXY,
timeout=args.TIMEOUT,
))
installed_version = result['results']['current']['version'].replace('v', '')
if not installed_version:
lib.base.cu('FortIOS not found.')
try:
installed_major, installed_minor, installed_patch = installed_version.split('.')
except:
installed_patch = '0'
installed_major, installed_minor = installed_version.split('.')
state, msg = lib.version.check_eol(
'https://endoflife.date/api/fortios.json',
'{}.{}'.format(installed_major, installed_minor),
)
# over and out
lib.base.oao(
'FortIOS v{} ({})'.format(installed_version, msg),
state,
lib.base.get_perfdata(
'fortios-version',
'{}.{}{}'.format(installed_major, installed_minor, installed_patch),
None,
None,
None,
0,
None,
),
always_ok=args.ALWAYS_OK,
)<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(wlst, cstates):
for dt, cmt in wlst.iter():
if cmt in cstates:
if not ((cstates[cmt] & _DNC) == _DNC):
return True
return False<|fim_middle|>has_candidates<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> 'outputs.PrivateAtlasPropertiesResponse':
"""
The Private Atlas resource properties.
"""
return pulumi.get(self, "properties")<|fim_middle|>properties<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(data):
"""
Parses a rndstr.in, lat.in or bestsqs.out file into pymatgen's
Structure format.
:param data: contents of a rndstr.in, lat.in or bestsqs.out file
Returns:
Structure object
"""
data = data.splitlines()
data = [x.split() for x in data if x] # remove empty lines
# following specification/terminology given in manual
if len(data[0]) == 6: # lattice parameters
a, b, c, alpha, beta, gamma = map(float, data[0])
coord_system = Lattice.from_parameters(a, b, c, alpha, beta, gamma).matrix
lattice_vecs = np.array(
[
[data[1][0], data[1][1], data[1][2]],
[data[2][0], data[2][1], data[2][2]],
[data[3][0], data[3][1], data[3][2]],
],
dtype=float,
)
first_species_line = 4
else:
coord_system = np.array(
[
[data[0][0], data[0][1], data[0][2]],
[data[1][0], data[1][1], data[1][2]],
[data[2][0], data[2][1], data[2][2]],
],
dtype=float,
)
lattice_vecs = np.array(
[
[data[3][0], data[3][1], data[3][2]],
[data[4][0], data[4][1], data[4][2]],
[data[5][0], data[5][1], data[5][2]],
],
dtype=float,
)
first_species_line = 6
scaled_matrix = np.matmul(lattice_vecs, coord_system)
lattice = Lattice(scaled_matrix)
all_coords = []
all_species = []
for line in data[first_species_line:]:
coords = np.array([line[0], line[1], line[2]], dtype=float)
scaled_coords = np.matmul(coords, np.linalg.inv(lattice_vecs))
all_coords.append(scaled_coords)
species_strs = "".join(line[3:]) # join multiple strings back together
species_strs = species_strs.replace(" ", "") # trim any white space
species_strs = species_strs.split(",") # comma-delimited
species = {}
for species_occ in species_strs:
# gets a species, occupancy pair
species_occ = species_occ.split("=")
if len(species_occ) == 1:
# assume occupancy is 1.0
species_occ = [species_occ[0], 1.0]
if "_" in species_occ[0]:
# see to_string() method in this file, since , and = are not valid
# species names in AT-AT we replace "," with "__" and "=" with "___",
# for pymatgen to parse these back correctly we have to replace them back
species_occ[0] = species_occ[0].replace("___", "=").replace("__", ",")
species[get_el_sp(species_occ[0])] = float(species_occ[1])
all_species.append(species)
return Structure(lattice, all_species, all_coords)<|fim_middle|>structure_from_str<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""attempt to init model"""
try:
self.ps_conf.initialize_function(
self.model, **self.ps_conf.initialize_kwargs
)
self.update_initialized_state(True)
except TypeError:
# this happens if the optimize_kwargs are misspecified,
# which is an error we want to raise
self.update_solved_state(False)
self.update_initialized_state(False)
raise
except:
self.update_solved_state(False)
self.update_initialized_state(False)<|fim_middle|>init_model<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
tc = CMakeToolchain(self)
tc.variables["JSONCPP_WITH_TESTS"] = False
tc.variables["JSONCPP_WITH_WARNING_AS_ERROR"] = False
tc.variables["JSONCPP_WITH_CMAKE_PACKAGE"] = False
tc.variables["JSONCPP_WITH_STRICT_ISO"] = False
tc.variables["JSONCPP_WITH_PKGCONFIG_SUPPORT"] = False
jsoncpp_version = Version(self.version)
if jsoncpp_version < "1.9.0" or jsoncpp_version >= "1.9.4":
tc.variables["BUILD_STATIC_LIBS"] = not self.options.shared
if jsoncpp_version >= "1.9.3":
tc.variables["JSONCPP_WITH_EXAMPLE"] = False
if jsoncpp_version >= "1.9.4":
tc.variables["BUILD_OBJECT_LIBS"] = False
if jsoncpp_version < "1.9.0":
# Honor BUILD_SHARED_LIBS from conan_toolchain (see https://github.com/conan-io/conan/issues/11840)
tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0077"] = "NEW"
# No opt-out of ccache
if Version(self.version) < "1.9.3":
tc.cache_variables["CCACHE_FOUND"] = ""
else:
tc.cache_variables["CCACHE_EXECUTABLE"] = ""
tc.METHOD_NAME()<|fim_middle|>generate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, *args):
"""Print to real stdout (for debugging)
"""
self.realOutputFiles()[0].write(' '.join(map(str, args)) + "\n")<|fim_middle|>print<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(user: "User", mode: str) -> Optional[str]:
"""Avatars based on a user attribute"""
avatar = get_path_from_dict(user.attributes, mode[11:], default=None)
return avatar<|fim_middle|>avatar_mode_attribute<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(liste, bool):
if bool: return liste[1]
else: return liste[0]<|fim_middle|>alias<|file_separator|> |