text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(self, metrics):
limiter = RateLimiter(
storage.MemoryStorage(),
"1 per minute; 1 per hour; 1 per day",
metrics=metrics,
)
current = datetime.datetime.now(tz=datetime.UTC)
stats = iter(
[
(0, 0),
((current + datetime.timedelta(seconds=60)).timestamp(), 0),
((current + datetime.timedelta(seconds=5)).timestamp(), 0),
]
)
limiter._window = pretend.stub(get_window_stats=lambda L, *a: next(stats))
resets_in = limiter.resets_in("foo")
assert resets_in > datetime.timedelta(seconds=0)
assert resets_in <= datetime.timedelta(seconds=5)<|fim_middle|>test_resets_in_expired<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
model: Input[VertexModel],
export_format_id: str,
output_info: OutputPath(Dict[str, str]),
gcp_resources: OutputPath(str),
artifact_destination: str = '',
image_destination: str = '',<|fim_middle|>model_export<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(sat_maintain, module_capsule_configured, module_sca_manifest, module_stash):
if not module_stash[synced_repos]:
org = sat_maintain.satellite.api.Organization().create()
sat_maintain.satellite.upload_manifest(org.id, module_sca_manifest.content)
# sync custom repo
cust_prod = sat_maintain.satellite.api.Product(organization=org).create()
cust_repo = sat_maintain.satellite.api.Repository(
url=settings.repos.yum_1.url, product=cust_prod
).create()
cust_repo.sync()
# sync RH repo
product = sat_maintain.satellite.api.Product(
name=constants.PRDS['rhae'], organization=org.id
).search()[0]
r_set = sat_maintain.satellite.api.RepositorySet(
name=constants.REPOSET['rhae2'], product=product
).search()[0]
payload = {'basearch': constants.DEFAULT_ARCHITECTURE, 'product_id': product.id}
r_set.enable(data=payload)
result = sat_maintain.satellite.api.Repository(
name=constants.REPOS['rhae2']['name']
).search(query={'organization_id': org.id})
rh_repo_id = result[0].id
rh_repo = sat_maintain.satellite.api.Repository(id=rh_repo_id).read()
rh_repo.sync()
module_stash[synced_repos]['rh_repo'] = rh_repo
module_stash[synced_repos]['cust_repo'] = cust_repo
module_stash[synced_repos]['org'] = org
if type(sat_maintain) is Capsule:
# assign the Library LCE to the Capsule
lce = sat_maintain.satellite.api.LifecycleEnvironment(
organization=module_stash[synced_repos]['org']
).search(query={'search': f'name={constants.ENVIRONMENT}'})[0]
module_capsule_configured.nailgun_capsule.content_add_lifecycle_environment(
data={'environment_id': lce.id}
)
result = module_capsule_configured.nailgun_capsule.content_lifecycle_environments()
assert lce.id in [capsule_lce['id'] for capsule_lce in result['results']]
# sync the Capsule
sync_status = module_capsule_configured.nailgun_capsule.content_sync()
assert sync_status['result'] == 'success'
yield {
'custom': module_stash[synced_repos]['cust_repo'],
'rh': module_stash[synced_repos]['rh_repo'],
}<|fim_middle|>module_synced_repos<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(service, command, data_dir, num_files, file_size):
"""Run the given TestDFSIO command."""
args = [
'-' + command, '-nrFiles',
str(num_files), '-fileSize',
str(file_size)
]
properties = {'test.build.data': data_dir}
if not (data_dir.startswith(BaseDpbService.HDFS_FS + ':') or
data_dir.startswith('/')):
properties['fs.default.name'] = data_dir
return service.SubmitJob(
classname='org.apache.hadoop.fs.TestDFSIO',
properties=properties,
job_arguments=args,
job_type=dpb_service.BaseDpbService.HADOOP_JOB_TYPE)<|fim_middle|>run_test_dfsio<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, config: Mapping[str, Any]) -> List[Stream]:
"""
:param config: The user-provided configuration as specified by the source's spec.
Any stream construction related operation should happen here.
:return: A list of the streams in this source connector.
"""
args = dict(
credentials=self.get_credentials(config),
partner_id=config.get("partner_id"),
start_date=config.get("start_date"),
end_date=config.get("end_date"),
filters=config.get("filters"),
)
METHOD_NAME = [
Reach(**args),
Standard(**args),
AudienceComposition(**args),
Floodlight(**args),
UniqueReachAudience(**args),
]
return METHOD_NAME<|fim_middle|>streams<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(strategy):
return strategy.id<|fim_middle|>strategy_id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(project, name, slug, glossary, license):
return project.component_set.create(
slug=slug,
name=name,
is_glossary=True,
glossary_name=glossary.name,
glossary_color=glossary.color,
allow_translation_propagation=False,
manage_units=True,
file_format="tbx",
filemask="*.tbx",
vcs="local",
repo="local:",
branch="main",
source_language=glossary.source_language,
license=license,
)<|fim_middle|>create_glossary<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return get_plugin('OPTIONSET_PROVIDERS', self.provider_key)<|fim_middle|>provider<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
gid = relay.GlobalID(type_name="Fruit", node_id="1")
type_ = gid.resolve_type(fake_info)
assert type_ is Fruit<|fim_middle|>test_global_id_resolve_type<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(cls) -> SupportedSettingType:<|fim_middle|>get_pre_init_settings<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.key1 = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.key2 = AAZStrType(
flags={"read_only": True},
)
return cls._schema_on_200<|fim_middle|>build_schema_on_200<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertFalse(self.s("artist"))
self.s["title"] = "artist - title"
self.s.multisong = False
self.assertEqual(self.s("title"), "title")
self.assertEqual(self.s.get("title"), "title")
self.assertEqual(self.s("artist"), "artist")
self.assertEqual(self.s.get("artist"), "artist")<|fim_middle|>test_title_split_stream<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> AsyncIterator[ConfigId]:
pass<|fim_middle|>list_config_ids<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
device = 'mixed' if args.device == 'gpu' else 'cpu'
jpegs, _ = fn.readers.file(file_root=args.images_dir)
images = fn.decoders.image(jpegs, device=device, output_type=types.RGB,
hw_decoder_load=args.hw_load, preallocate_width_hint=args.width_hint,
preallocate_height_hint=args.height_hint)
return images<|fim_middle|>decoder_pipeline<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
with get_logo_png() as LOGO_PNG:
img = Image.from_file(LOGO_PNG)
assert_equal_hash(img.value, LOGO_PNG_DIGEST)<|fim_middle|>test_from_filename<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)<|fim_middle|>on_200<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return json.loads(load_file("screens.json"))<|fim_middle|>screens_response<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> None:
if finalize.src is not None:
self.write(")")<|fim_middle|>output_child_post<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_random_mix_good_and_bad_gradients<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
root = self.mkdtemp()
expect_hex = self.write_file_and_hash(os.path.join(root, "foo"), "hello\n")
expect_bar_hex = self.write_file_and_hash(
os.path.join(root, "bar"), "different\n"
)
with open(os.path.join(root, ".watchmanconfig"), "w") as f:
f.write(json.dumps({"content_hash_max_items": 1}))
self.watchmanCommand("watch", root)
self.assertFileList(root, [".watchmanconfig", "foo", "bar"])
res = self.watchmanCommand(
"query",
root,
{"path": ["foo", "bar"], "fields": ["name", "content.sha1hex"]},
)
self.assertEqual(
[
{"name": "bar", "content.sha1hex": expect_bar_hex},
{"name": "foo", "content.sha1hex": expect_hex},
],
sorted(res["files"], key=lambda k: k["name"]),
)
stats = self.watchmanCommand("debug-contenthash", root)
# ensure that we pruned the cache back to match the content_hash_max_items
self.assertEqual(stats["size"], 1)
self.assertEqual(stats["cacheHit"], 0)
self.assertEqual(stats["cacheMiss"], 2)
self.assertEqual(stats["cacheStore"], 2)
self.assertEqual(stats["cacheLoad"], 2)<|fim_middle|>test_cache_limit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(a: Variable, b: Variable) -> None:
# Support for iterating over bin contents is limited in Python.
# So, simply use `identical` even though it does not produce good error messages.
assert a.bins.unit == b.bins.unit
assert identical(a, b)<|fim_middle|>assert_identical_binned_variable_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cmd_invocationInitial : CommandInvocationInitial) -> ParallelizabilityInfo:
return get_parallelizability_info_from_cmd_invocation(cmd_invocationInitial)<|fim_middle|>get_parallelizability_info_from_cmd_invocation_util<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
with temp_directory() as project_dir:
# Write a default configuration and make sure mq is configured.
main(["--directory", project_dir])
project = _check_project_directory(project_dir)
assert "message_queue_url" not in (project.app_config or {})
# Try to re-config with message queue, expect error because files
# already exist.
exit_code = None
try:
main(["--directory", project_dir, "--mq"])
except SystemExit as e:
exit_code = e.code
assert exit_code == 1
# Try re-config again with --force, expect it to work and for MQ to be
# configured.
main(["--directory", project_dir, "--mq", "--force"])
project = _check_project_directory(project_dir)
assert "message_queue_url" in project.app_config<|fim_middle|>test_force<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, roles: typing.Iterable, strict=True) -> list:
parties = []
for role in roles:
if role not in self._role_to_parties:
if strict:
raise RuntimeError(
f"try to get role {role} "
f"which is not configured in `role` in runtime conf({self._role_to_parties})"
)
else:
continue
parties.extend(self._role_to_parties[role])
return parties<|fim_middle|>roles_to_parties<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
seasonal_periods: int = 7,
n_predict: int = 5,
start_window: float = 0.85,
forecast_horizon: int = 5,
) -> Tuple[
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[List[type[TimeSeries]]],
Optional[float],
Optional[StatsForecast],
Optional[Union[int, str]],<|fim_middle|>get_autoselect_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
tc.reset()
tv1 = tc.fresh_tvar()
tv2 = tc.fresh_tvar()
fn1 = create_Callable([tv1, tv2], bool, {tv1, tv2})
fn2 = create_Callable([int, int], bool)
unify_helper(fn1, fn2, Callable[[int, int], bool])<|fim_middle|>test_simple_polymorphic_call<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
with self.assertRaises(ValueError, msg="Invalid task invalid-task. Make sure it's valid."):
InferenceApi("bert-base-uncased", task="invalid-task")<|fim_middle|>test_inference_overriding_invalid_task<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(mock_repo):
# when
get_diff(mock_repo, "some_ref")
# then
mock_repo.git.diff.assert_called_once_with("some_ref", index=False)<|fim_middle|>test_get_diff<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(kind):
"""Test writing different Spectrums to HDF5 files."""
spec = make_spec(kind, lt=600.0)
fname = os.path.join(TEST_OUTPUTS, "spectrum_io__test_write_h5__" + kind + ".h5")
spec.write(fname)<|fim_middle|>test_write_h5<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, attributes):
if "cards_url" in attributes: # pragma no branch
self._cards_url = self._makeStringAttribute(attributes["cards_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "project_url" in attributes: # pragma no branch
self._project_url = self._makeStringAttribute(attributes["project_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])<|fim_middle|>use_attributes<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self):<|fim_middle|>test_utilization_report_for_project<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Union[pandas.DatetimeIndex, None]:
return self._start_times<|fim_middle|>start_times<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(bg_img_path, img_shape):
if bg_img_path is None:
bg = 255 * np.ones(img_shape)
elif not os.path.exists(bg_img_path):
raise Exception('The --bg_img_path is not existed: {}'.format(
bg_img_path))
else:
bg = cv2.imread(bg_img_path)
return bg<|fim_middle|>get_bg_img<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(x_name, y_lim):
plt.figure(figsize=(5.3, 4))
xs = np.asarray(range(5))
for i in range(1, 3):
fwd = np.flip(np.asarray(data_fwd_mean[i]))
com = np.flip(np.asarray(data_comm_mean[i]))
bwd = np.flip(np.asarray(data_bwd_mean[i]))
fwd_stdv = np.flip(np.asarray(data_fwd_stdv[i]))
com_stdv = np.flip(np.asarray(data_comm_stdv[i]))
bwd_stdv = np.flip(np.asarray(data_bwd_stdv[i]))
fwd += com
configs = {
"width": WIDTH,
"color": colors[i],
"edgecolor": "black",
"capsize": 6,
}
plt.bar(xs + (i - 1.5) * WIDTH, fwd, yerr=fwd_stdv, hatch="///", **configs)
plt.bar(
xs + (i - 1.5) * WIDTH,
bwd,
yerr=bwd_stdv,
hatch="\\\\\\",
bottom=fwd,
**configs,
)
color_handles = []
color_handles.append(plt.bar([20], [0], color=colors[1]))
color_handles.append(plt.bar([2], [0], color=colors[2]))
color_names = ["RPC", "Pipeline"]
hatch_handles = []
hatch_handles.append(plt.bar([2], [0], hatch="///", color="white"))
hatch_handles.append(plt.bar([2], [0], hatch="\\\\\\", color="white"))
hatch_names = ["FWD", "BWD"]
def interleave(l1, l2):
return [val for pair in zip(l1, l2) for val in pair]
plt.legend(
handles=interleave(color_handles, hatch_handles),
loc="upper left",
labels=interleave(color_names, hatch_names),
prop={"family": FONT["fontname"], "size": FONT["size"] - 2},
ncol=2,
# bbox_to_anchor=(-0.015, 0.3, 0.5, 0.5)
)
plt.xticks(xs, ["1", "2", "4", "8", "128"], **FONT)
plt.yticks(**FONT)
plt.xlabel(x_name, **FONT)
plt.ylabel("Delay (Second)", **FONT)
plt.ylim(y_lim)
plt.xlim([-0.5, 4.5])
# plt.yscale('log')
# plt.show()
plt.savefig(f"../images/gpt.pdf", bbox_inches="tight")<|fim_middle|>plot_nlp<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
AliasInvalid2 = list[int] # [unsubscriptable-object]
cast_variable2 = [1, 2, 3]
cast_variable2 = typing.cast(list[int], cast_variable2) # [unsubscriptable-object]
var12: list[int] # [unsubscriptable-object]<|fim_middle|>func3<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return {
"radius": self.radius,
"frame": self.frame.METHOD_NAME,
}<|fim_middle|>data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
batch_size = 8
steps_per_epoch = 6
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.arange(dataset_size).reshape((dataset_size, 1))
y = x * 2
iterator = epoch_iterator.EpochIterator(
x=x,
y=y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
)
steps_seen = []
for step, _ in iterator.enumerate_epoch():
steps_seen.append(step)
self.assertLen(steps_seen, steps_per_epoch - 2)
self.assertIsInstance(iterator, epoch_iterator.EpochIterator)
self.assertTrue(iterator._insufficient_data)<|fim_middle|>test_insufficient_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
os.mkdir(os.path.join(self.tempdir, "src"))
package_xml_path = os.path.join(self.tempdir, "src", "package.xml")
with open(package_xml_path, "w") as f:
f.write(
"""<?xml version="1.0" encoding="UTF-8"?><|fim_middle|>test_list_metadata_types<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(distr, method):
"""Check whether distr has capability of method.
Parameters
----------
distr : BaseDistribution object
method : str
method name to check
Returns
-------
whether distr has capability method, according to tags
capabilities:approx and capabilities:exact
"""
approx_methods = distr.get_tag("capabilities:approx")
exact_methods = distr.get_tag("capabilities:exact")
return method in approx_methods or method in exact_methods<|fim_middle|>has_capability<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(list, name):
for item in list:
if item and is_list(item) and item[0] == 'prop':
if len(item) > 1 and item[1] == name:
return item[2:]
return []<|fim_middle|>getproplist<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
with self.assertRaises(TypeError) as cm:
writer = NEXUSDataWriter(file_name='test_file_name.nxs')
writer.write()
self.assertEqual(str(cm.exception), 'Data to write out must be set.')<|fim_middle|>test_write_throws_when_data_is_none<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Show the input files of the task: input file, submission script and TaskManager
"""
file = self.task.job_file
text = file.read() if file.exists else "Cannot find job_file!"
job_file = pn.pane.Markdown(f"```shell\n{text}\n```")
from .viewers import JSONViewer
json_view = JSONViewer(self.task.manager.as_dict())
def card(title, *items, collapsed=True):
return pn.Card(*items,
title=title,
collapsed=collapsed,
sizing_mode="stretch_width",
header_color="blue",
#header_background="blue",
)
return pn.Column(
f"## Input files of `{repr(self.task)}`",
card("Input file", self.html_with_clipboard_btn(self.task.input), collapsed=False),
pn.layout.Divider(),
card("Submission script", job_file),
pn.layout.Divider(),
card("TaskManager", json_view),
pn.layout.Divider(),
sizing_mode="stretch_width",
)<|fim_middle|>get_inputs_view<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
begin_time,
end_time,
event_time,
eeg_bads,
status="unedited",
name=None,
pns_bads=None,
nsegs=None,
):
"""Build content for a single segment in categories.xml.
Segments are sorted into categories in categories.xml. In a segmented MFF
each category can contain multiple segments, but in an averaged MFF each
category only contains one segment (the average).
"""
channel_status = [
{"signalBin": 1, "exclusion": "badChannels", "channels": eeg_bads}
]
if pns_bads:
channel_status.append(
{"signalBin": 2, "exclusion": "badChannels", "channels": pns_bads}
)
content = {
"status": status,
"beginTime": begin_time,
"endTime": end_time,
"evtBegin": event_time,
"evtEnd": event_time,
"channelStatus": channel_status,
}
if name:
content["name"] = name
if nsegs:
content["keys"] = {"#seg": {"type": "long", "data": nsegs}}
return content<|fim_middle|>build_segment_content<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(opt):
"""
Dump task data to ACUTE-Eval.
"""
# create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
task = opt.get('task')
speaker_0_id = opt.get('speaker_0_id') or f'{task}_as_human'
speaker_1_id = opt.get('speaker_1_id') or f'{task}_as_model'
if opt['outfile'] is None:
outfile = tempfile.mkstemp(
prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt'
)[1]
else:
outfile = opt['outfile']
num_episodes = (
world.num_episodes()
if opt['num_episodes'] == -1
else min(opt['num_episodes'], world.num_episodes())
)
log_timer = TimeLogger()
print(f'[ starting to convert, saving output to {outfile} ]')
dialogues = []
for _ in range(num_episodes):
episode = []
episode_done = False
while not episode_done:
world.parley()
acts = world.get_acts()
text = acts[0].get('text')
split_text = text.split('\n')
label = random.choice(
acts[0].get('labels', acts[0].pop('eval_labels', None))
)
if not episode and opt.get('prepended_context'):
# first turn
context = split_text[:-1]
text = split_text[-1]
context_turn = [
{'text': context, 'episode_done': False, 'id': 'context'}
for _ in range(2)
]
episode.append(context_turn)
turn = [
{'text': text, 'episode_done': False, 'id': speaker_0_id},
{'text': label, 'episode_done': False, 'id': speaker_1_id},
]
episode.append(turn)
if acts[0].get('episode_done', False):
episode[-1][-1]['episode_done'] = True
episode_done = True
dialogues.append(episode)
if log_timer.time() > opt['log_every_n_secs']:
text, _log = log_timer.log(world.total_parleys, world.num_examples())
print(text)
if world.epoch_done():
break
Conversations.save_conversations(dialogues, outfile, opt)<|fim_middle|>dump_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(state):
"""
Generates some random data for the demo.
:param state: The state for which to create the data.
:return: The newly created data.
"""
return {
'state': state,
'date': datetime.date.today().isoformat(),
'cases': random.randint(1, 1000),
'deaths': random.randint(1, 100)
}<|fim_middle|>generate_random_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, node=None):
"""Check that a user is able to execute `SYSTEM FETCHES` commands if and only if
the privilege has been granted via role.
"""
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"), role(node, f"{role_name}"):
with When("I grant the role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
Suite(run=check_replicated_privilege,
examples=Examples("privilege on grant_target_name user_name", [
tuple(list(row)+[role_name,user_name]) for row in check_replicated_privilege.examples
], args=Args(name="check privilege={privilege}", format_name=True)))<|fim_middle|>replicated_privileges_granted_via_role<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Returns the default SymbolDatabase."""
return _DEFAULT<|fim_middle|>default<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(pattern=None):
"""
Checks if the name is can be parsed back to its original form for an air-by-date format.
:return: true if the naming is valid, false if not.
"""
if pattern is None:
pattern = app.NAMING_PATTERN
logger.log(u'Checking whether the pattern ' + pattern + ' is valid for an air-by-date episode', logger.DEBUG)
valid = validate_name(pattern, abd=True)
return valid<|fim_middle|>check_valid_abd_naming<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Retrieve inputs from pointers to results files.
This mechanism can be easily extended/replaced to retrieve data from
other data sources (e.g., XNAT, HTTP, etc.,.)
"""
if self._got_inputs: # Inputs cached
return
if not self.input_source: # No previous nodes
self._got_inputs = True
return
prev_results = defaultdict(list)
for key, info in list(self.input_source.items()):
prev_results[info[0]].append((key, info[1]))
logger.debug(
'[Node] Setting %d connected inputs of node "%s" from %d previous nodes.',
len(self.input_source),
self.name,
len(prev_results),
)
for results_fname, connections in list(prev_results.items()):
outputs = None
try:
outputs = load_resultfile(results_fname).outputs
except AttributeError as e:
logger.critical("%s", e)
except FileNotFoundError as e:
if self.allow_missing_input_source:
logger.warning(
f'Missing input file "{results_fname}". '
"This may indicate that errors occured during previous processing steps.",
exc_info=e,
)
else:
raise
if outputs is None:
if self.allow_missing_input_source:
continue
else:
raise RuntimeError(
"""\<|fim_middle|>get_inputs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, msg: Union[Exception, str], kwargs: MutableMapping[str, Any]
) -> Tuple[str, MutableMapping[str, Any]]:
"""Process the message to append the prefix.
Args:
msg: Message to be prefixed.
kwargs: Keyword args for the message.
"""
return self.prefix_template.format(prefix=self.prefix, msg=msg), kwargs<|fim_middle|>process<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self, **kwargs: Any) -> _models.OperationListResult:
"""List all available Microsoft.AzureStackHCI provider operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationListResult or the result of cls(response)
:rtype: ~azure.mgmt.azurestackhci.models.OperationListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("OperationListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized<|fim_middle|>list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()<|fim_middle|>set_up<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().METHOD_NAME(configuration)
configuration = configuration or self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True<|fim_middle|>validate_configuration<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""AWS Batch client."""
if not self._batch:
self._batch = BatchClient()
return self._batch<|fim_middle|>batch<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
G = nx.DiGraph()
G.add_node(0)
def kernel(x):
return x
assert nx.is_isomorphic(gn_graph(1), G)
assert nx.is_isomorphic(gn_graph(1, kernel=kernel), G)
assert nx.is_isomorphic(gnc_graph(1), G)
assert nx.is_isomorphic(gnr_graph(1, 0.5), G)<|fim_middle|>test_parameters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tree: fdt.FdtParser, devices: List[device.WrappedNode]) -> List[dict]:
cpus = cpu.get_cpus(tree)
PSCI_COMPAT = ['arm,psci-0.2', 'arm,psci-1.0']
psci_node = [n for n in devices if n.has_prop('compatible')
and n.get_prop('compatible').strings[0] in PSCI_COMPAT]
if len(psci_node) > 0:
psci_node = psci_node[0]
else:
psci_node = None
cpu_info = []
for i, cpu_node in enumerate(sorted(cpus, key=lambda a: a.path)):
enable_method = None
if cpu_node.has_prop('enable-method'):
enable_method = cpu_node.get_prop('enable-method').strings[0]
cpuid = i
if cpu_node.has_prop('reg'):
cpuid = cpu_node.parse_address(list(cpu_node.get_prop('reg').words))
extra_data = 0
if enable_method == 'psci' and psci_node:
extra_data = 'PSCI_METHOD_' + psci_node.get_prop('method').strings[0].upper()
elif enable_method == 'spin-table':
extra_data = '0x{:x}'.format(
device.Utils.make_number(2, list(cpu_node.get_prop('cpu-release-addr').words)))
obj = {
'compat': cpu_node.get_prop('compatible').strings[0],
'enable_method': enable_method,
'cpuid': cpuid,
'path': cpu_node.path,
'extra': extra_data,
}
cpu_info.append(obj)
# guarantee that cpus in the same cluster will be consecutive
return sorted(cpu_info, key=lambda a: a['cpuid'])<|fim_middle|>get_elfloader_cpus<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Disposes of the content of an editor."""
if self.factory.readonly:
# enthought/traitsui#884
_BaseEditor.METHOD_NAME(self)
else:
super().METHOD_NAME()<|fim_middle|>dispose<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.prepare_data(grant_type='invalid')
headers = self.create_basic_header(
'password-client', 'password-secret'
)
rv = self.client.post('/oauth/token', data={
'grant_type': 'password',
'username': 'foo',
'password': 'ok',
}, headers=headers)
resp = json.loads(rv.data)
self.assertEqual(resp['error'], 'unauthorized_client')<|fim_middle|>test_invalid_grant_type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
args = parse_args()
new_quantizable_op_type = []
for item in args.quantizable_op_type:
new_quantizable_op_type.append(''.join(item))
args.quantizable_op_type = new_quantizable_op_type
paddle.enable_static()
quantize(args)<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
hash(Availability(True, True, True))<|fim_middle|>test_availability_hash<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
names = mooseutils.git_committers(mooseutils.__file__)
self.assertIn('Andrew E. Slaughter', names)
with self.assertRaises(OSError) as e:
mooseutils.git_authors('wrong')
names = mooseutils.git_committers(mooseutils.__file__, '--merges')
self.assertIn('Logan Harbour', names)<|fim_middle|>test_committers<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self, params: DropTipParams) -> DropTipResult:
"""Move to and drop a tip using the requested pipette."""
pipette_id = params.pipetteId
labware_id = params.labwareId
well_name = params.wellName
home_after = params.homeAfter
if params.alternateDropLocation:
well_location = self._state_view.geometry.get_next_tip_drop_location(
labware_id=labware_id,
well_name=well_name,
pipette_id=pipette_id,
)
else:
well_location = params.wellLocation
tip_drop_location = self._state_view.geometry.get_checked_tip_drop_location(
pipette_id=pipette_id, labware_id=labware_id, well_location=well_location
)
position = await self._movement_handler.move_to_well(
pipette_id=pipette_id,
labware_id=labware_id,
well_name=well_name,
well_location=tip_drop_location,
)
await self._tip_handler.drop_tip(pipette_id=pipette_id, home_after=home_after)
return DropTipResult(
position=DeckPoint(x=position.x, y=position.y, z=position.z)
)<|fim_middle|>execute<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
from . import basic_assets
return [load_assets_from_modules([basic_assets]), basic_assets.basic_assets_job]<|fim_middle|>basic_assets_repository<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(conference_factory, graphql_client):
conference = conference_factory()
resp = graphql_client.query(
"""
query($code: String!) {
conference(code: $code) {
keynotes {
title(language: "en")
speakers {
id
}
}
}
}
""",
variables={"code": conference.code},
)
assert "errors" not in resp
assert resp["data"]["conference"]["keynotes"] == []<|fim_middle|>test_get_conference_keynotes_empty<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
global conf
conf = read_config()
token = conf.token if conf.token is not None else None
if token:
gh = Github(token)
else:
gh = Github()
jira = JIRA('https://issues.apache.org/jira')
result = {}
repo = gh.get_repo('apache/lucene')
open_prs = repo.get_pulls(state='open')
out("Lucene Github PR report")
out("============================")
out("Number of open Pull Requests: %s" % open_prs.totalCount)
result['open_count'] = open_prs.totalCount
lack_jira = list(filter(lambda x: not re.match(r'.*\b(LUCENE)-\d{3,6}\b', x.title), open_prs))
result['no_jira_count'] = len(lack_jira)
lack_jira_list = []
for pr in lack_jira:
lack_jira_list.append({'title': pr.title, 'number': pr.number, 'user': pr.user.login, 'created': pr.created_at.strftime("%Y-%m-%d")})
result['no_jira'] = lack_jira_list
out("\nPRs lacking JIRA reference in title")
for pr in lack_jira_list:
out(" #%s: %s %s (%s)" % (pr['number'], pr['created'], pr['title'], pr['user'] ))
out("\nOpen PRs with a resolved JIRA")
has_jira = list(filter(lambda x: re.match(r'.*\b(LUCENE)-\d{3,6}\b', x.title), open_prs))
issue_ids = []
issue_to_pr = {}
for pr in has_jira:
jira_issue_str = re.match(r'.*\b((LUCENE)-\d{3,6})\b', pr.title).group(1)
issue_ids.append(jira_issue_str)
issue_to_pr[jira_issue_str] = pr
resolved_jiras = jira.search_issues(jql_str="key in (%s) AND status in ('Closed', 'Resolved')" % ", ".join(issue_ids))
closed_jiras = []
for issue in resolved_jiras:
pr_title = issue_to_pr[issue.key].title
pr_number = issue_to_pr[issue.key].number
assignee = issue.fields.assignee.name if issue.fields.assignee else None
closed_jiras.append({ 'issue_key': issue.key,
'status': issue.fields.status.name,
'resolution': issue.fields.resolution.name,
'resolution_date': issue.fields.resolutiondate[:10],
'pr_number': pr_number,
'pr_title': pr_title,
'issue_summary': issue.fields.summary,
'assignee': assignee})
closed_jiras.sort(key=lambda r: r['pr_number'], reverse=True)
for issue in closed_jiras:
out(" #%s: %s %s %s: %s (%s)" % (issue['pr_number'],
issue['status'],
issue['resolution_date'],
issue['issue_key'],
issue['issue_summary'],
issue['assignee'])
)
result['closed_jira_count'] = len(resolved_jiras)
result['closed_jira'] = closed_jiras
if conf.json:
print(json.dumps(result, indent=4))
if conf.html:
print(make_html(result))<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
model = nn.Sequential(nn.Conv2d(3, 3, 1))
sample = torch.rand(1, 3, 32, 32)
model(sample).norm().backward()
with pytest.raises(ValueError, match="some parameter already has gradient"):
balance_by_time(1, model, sample, device="cpu")<|fim_middle|>test_already_has_grad<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self, query: str) -> List[float]:
del query
return [0, 0, 1, 0, 0]<|fim_middle|>aget_query_embedding<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(using_opengl_renderer, tmp_path):
"""Test that the frame size can be set via config file."""
np.testing.assert_allclose(
config.aspect_ratio, config.pixel_width / config.pixel_height
)
np.testing.assert_allclose(config.frame_height, 8.0)
with tempconfig({}):
tmp_cfg = tempfile.NamedTemporaryFile("w", dir=tmp_path, delete=False)
tmp_cfg.write(
"""
[CLI]
pixel_height = 10
pixel_width = 10
""",
)
tmp_cfg.close()
config.digest_file(tmp_cfg.name)
# aspect ratio is set using pixel measurements
np.testing.assert_allclose(config.aspect_ratio, 1.0)
# if not specified in the cfg file, frame_width is set using the aspect ratio
np.testing.assert_allclose(config.frame_height, 8.0)
np.testing.assert_allclose(config.frame_width, 8.0)
with tempconfig({}):
tmp_cfg = tempfile.NamedTemporaryFile("w", dir=tmp_path, delete=False)
tmp_cfg.write(
"""
[CLI]
pixel_height = 10
pixel_width = 10
frame_height = 10
frame_width = 10
""",
)
tmp_cfg.close()
config.digest_file(tmp_cfg.name)
np.testing.assert_allclose(config.aspect_ratio, 1.0)
# if both are specified in the cfg file, the aspect ratio is ignored
np.testing.assert_allclose(config.frame_height, 10.0)
np.testing.assert_allclose(config.frame_width, 10.0)<|fim_middle|>test_frame_size<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(n_samples, noise_std=0.0, rotations=1.0):
ts = torch.linspace(0, 1, n_samples, device=DEVICE)
rs = ts**0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,), device=DEVICE) * 2 - 1
labels = (signs > 0).to(torch.long).to(DEVICE)
xs = (
rs * signs * torch.cos(thetas)
+ torch.randn(n_samples, device=DEVICE) * noise_std
)
ys = (
rs * signs * torch.sin(thetas)
+ torch.randn(n_samples, device=DEVICE) * noise_std
)
points = torch.stack([xs, ys], dim=1)
return points, labels<|fim_middle|>make_spirals<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, name: str) -> bytes:
return self._serialize(InsType.EIP712_SEND_STRUCT_IMPL,
P1Type.COMPLETE_SEND,
P2Type.STRUCT_NAME,
self._string_to_bytes(name))<|fim_middle|>eip712_send_struct_impl_root_struct<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Test that storage.set() sets value both in in-memory cache and
in persist storage
"""
context = self.get_storage_context()
persist_storage = LocalDiskCacheStorage(context)
wrapped_storage = InMemoryCacheStorageWrapper(
persist_storage=persist_storage, context=context
)
persist_storage.set("some-key", b"some-value")
with patch.object(
persist_storage, "set", wraps=persist_storage.set
) as mock_persist_set:
wrapped_storage.set("some-key", b"some-value")
mock_persist_set.assert_called_once_with("some-key", b"some-value")
self.assertEqual(wrapped_storage.get("some-key"), b"some-value")<|fim_middle|>test_in_memory_cache_storage_wrapper_set<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
s = "foo"
resp = self._comm("greeting name={0}".format(s))
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0], "Hello '{0}'".format(s))
self.assertTrue(self.ldmsd.is_running())<|fim_middle|>test_recv_1_rec_resp<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tmp_home, tmp_prefix):
"""Explicit export must have dependencies before dependent packages."""
helpers.install("python=3.10", "pip", "jupyterlab")
lines = helpers.run_env("export", "--explicit").splitlines()
indices = {
"libzlib": 0,
"python": 0,
"wheel": 0,
"pip": 0,
"jupyterlab": 0,
}
for i, l in enumerate(lines):
for pkg in indices.keys():
if pkg in l:
indices[pkg] = i
assert indices["libzlib"] < indices["python"]
assert indices["python"] < indices["wheel"]
assert indices["wheel"] < indices["pip"]
assert indices["python"] < indices["jupyterlab"]<|fim_middle|>test_explicit_export_topologically_sorted<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(v):
try:
return float(v)
except ValueError:
return None<|fim_middle|>to_float<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, value):
self.__float_value = value<|fim_middle|>set_float_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""Returns the status of the task that will provide the value
for this future. This may not be in-sync with the result state
of this future - for example, task_status might return 'done' but
self.done() might not be true (which in turn means self.result()
and self.exception() might block).
The actual status description strings returned by this method are
likely to change over subsequent versions of parsl, as use-cases
and infrastructure are worked out.
It is expected that the status values will be from a limited set
of strings (so that it makes sense, for example, to group and
count statuses from many futures).
It is expected that might be a non-trivial cost in acquiring the
status in future (for example, by communicating with a remote
worker).
Returns: str
"""
return self.task_def['status'].name<|fim_middle|>task_status<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)<|fim_middle|>send_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, version, encoding, standalone) -> None: ...<|fim_middle|>xml_decl_handler<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(api):
'''
test to get access group filters
'''
filters = api.filters.access_group_filters()
assert isinstance(filters, dict)
for data in filters:
check(filters[data], 'choices', list, allow_none=True)
check(filters[data], 'operators', list)
check(filters[data], 'pattern', str, allow_none=True)<|fim_middle|>test_access_group_filters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, expected, args,
sep=NotDefined, end=NotDefined, file=NotDefined):
# Capture sys.stdout in a StringIO. Call print with args,
# and with sep, end, and file, if they're defined. Result
# must match expected.
# Look up the actual function to call, based on if sep, end,
# and file are defined.
fn = dispatch[(sep is not NotDefined,
end is not NotDefined,
file is not NotDefined)]
with support.captured_stdout() as t:
fn(args, sep, end, file)
self.assertEqual(t.getvalue(), expected)<|fim_middle|>check<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[Sequence['outputs.NetworkManagerDeploymentStatusResponse']]:
"""
Gets a page of Network Manager Deployment Status
"""
return pulumi.get(self, "value")<|fim_middle|>value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, response):
schema_text = response.css(
"script[type='application/ld+json']::text"
).extract()[-1]
schema_json = json.loads(schema_text)
if isinstance(schema_json, list):
item = schema_json[0]
else:
item = schema_json
start = self._parse_dt(item["startDate"])
title = self._parse_title(item["name"])
meeting = Meeting(
title=title,
description="",
classification=self._parse_classification(title),
start=start,
end=self._parse_dt(item["endDate"]),
all_day=False,
time_notes="",
location=self._parse_location(item),
links=self.link_date_map[start.date()],
source=response.url,
)
meeting["status"] = self._get_status(meeting, text=schema_text)
meeting["id"] = self._get_id(meeting)
yield meeting<|fim_middle|>parse_detail<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, container_id, *args, **kwargs):
return self._metadata[container_id]<|fim_middle|>load_metadata<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(prop):
return hasattr(prop, 'columns') and isinstance(prop.columns[0], Label)<|fim_middle|>is_column_property<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(client, jobflow_id, job_name, jar_path, main_class, extra_args):
"""Submits single spark job to a running cluster"""
spark_job = {
'Name': job_name,
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar'
}
}
spark_args = ['spark-submit', "--deploy-mode", "cluster"]
if main_class:
spark_args.extend(['--class', main_class])
spark_args.extend([jar_path])
spark_args.extend(extra_args)
spark_job['HadoopJarStep']['Args'] = spark_args
try:
response = client.add_job_flow_steps(
JobFlowId=jobflow_id,
Steps=[spark_job],
)
except ClientError as e:
print(e.response['Error']['Message'])
exit(1)
step_id = response['StepIds'][0]
print("Step Id {} has been submitted".format(step_id))
return step_id<|fim_middle|>submit_spark_job<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(api_versions):
for api_version in sorted(api_versions.keys(), reverse=True):
swagger_files = api_versions[api_version]
print(_PY_NAMESPACE.format(
api_version=api_version,
ns="v"+api_version.replace("-", "_"))
)<|fim_middle|>print_python_namespace<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.syncdb("settings.py", migrate=True)
mock.assert_called_once_with(
"django-admin.py syncdb --settings=settings.py --migrate --noinput",
python_shell=False,
env=None,
runas=None,
)<|fim_middle|>test_django_admin_cli_syncdb_migrate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""get_retry_interval() returns the right retry interval"""
job = Job.create(func=fixtures.say_hello)
# Handle case where self.retry_intervals is None
job.retries_left = 2
self.assertEqual(job.get_retry_interval(), 0)
# Handle the most common case
job.retry_intervals = [1, 2]
self.assertEqual(job.get_retry_interval(), 1)
job.retries_left = 1
self.assertEqual(job.get_retry_interval(), 2)
# Handle cases where number of retries > length of interval
job.retries_left = 4
job.retry_intervals = [1, 2, 3]
self.assertEqual(job.get_retry_interval(), 1)
job.retries_left = 3
self.assertEqual(job.get_retry_interval(), 1)
job.retries_left = 2
self.assertEqual(job.get_retry_interval(), 2)
job.retries_left = 1
self.assertEqual(job.get_retry_interval(), 3)<|fim_middle|>test_get_retry_interval<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertHolidayName(
"Easter Monday",
"2019-04-22",
"2020-04-13",
"2021-04-05",
"2022-04-18",
"2023-04-10",
)<|fim_middle|>test_easter_monday<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(value: str) -> "RuntimeProcessorType":
"""Returns an instance of RuntimeProcessorType corresponding to the given value.
Raises KeyError if parameter is not a value in the enumeration.
"""
for instance in RuntimeProcessorType.__members__.values():
if instance.value == value:
return instance
raise KeyError(f"'{value}'")<|fim_middle|>get_instance_by_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Test :func:`colour.models.rgb.transfer_functions.dji_d_log.\<|fim_middle|>test_nan_log_decoding_d_log<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
anc = pdl.ApplyNativeConstraintOp("anc", [type_val])
assert anc.attributes["name"] == StringAttr("anc")
assert anc.args == (type_val,)<|fim_middle|>test_build_anc<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
data = DerivedNoInherit(uid=str(time()), value=2, status=1)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert "uid" not in data.__syft_serializable__
assert "value" not in data.__syft_serializable__
assert de.status == data.status<|fim_middle|>test_derived_without_inherit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>( profile_2d, dx1, dx2, deg1, deg2 ):
"""
Compute error bound for spline approximation of 2D analytical profile.
Parameters
----------
profile_2d : 2D analytical profile
Must provide 'max_norm( n1,n2 )' method to compute max norm of its
mixed derivative of degree (n1,n2) over domain.
dx1 : float
Grid spacing along 1st dimension.
dx2 : float
Grid spacing along 2nd dimension.
deg1 : int
Spline degree along 1st dimension.
deg2 : int
Spline degree along 2nd dimension.
Result
------
max_error : float
Error bound: max-norm of error over domain should be smaller than this.
"""
# Max norm of highest partial derivatives in x1 and x2 of analytical profile
max_norm1 = profile_2d.max_norm( deg1+1, 0 )
max_norm2 = profile_2d.max_norm( 0 , deg2+1 )
# Error bound on function value
max_error = f_tihomirov_error_bound( dx1, deg1, max_norm1 ) \
+ f_tihomirov_error_bound( dx2, deg2, max_norm2 )
# Empirical correction: for linear interpolation increase estimate by 5%
if (deg1 == 1 or deg2 == 1):
max_error = 1.05 * max_error
return max_error<|fim_middle|>spline_2d_error_bound<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(sample_features):
# test the scenario when previous feature importance df is none
prev_df, curr_df = None, unevaluated_fi_df_template(sample_features)
assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df<|fim_middle|>test_merge_importance_dfs_base<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(servicer, server):
rpc_method_handlers = {
'setRole': grpc.unary_unary_rpc_method_handler(
servicer.setRole,
request_deserializer=uac_dot_RoleV2__pb2.SetRoleV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.SetRoleV2.Response.SerializeToString,
),
'deleteRole': grpc.unary_unary_rpc_method_handler(
servicer.deleteRole,
request_deserializer=uac_dot_RoleV2__pb2.DeleteRoleV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.DeleteRoleV2.Response.SerializeToString,
),
'searchRoles': grpc.unary_unary_rpc_method_handler(
servicer.searchRoles,
request_deserializer=uac_dot_RoleV2__pb2.SearchRolesV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.SearchRolesV2.Response.SerializeToString,
),
'getRole': grpc.unary_unary_rpc_method_handler(
servicer.getRole,
request_deserializer=uac_dot_RoleV2__pb2.GetRoleV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.GetRoleV2.Response.SerializeToString,
),
'getEnabledActions': grpc.unary_unary_rpc_method_handler(
servicer.getEnabledActions,
request_deserializer=uac_dot_RoleV2__pb2.GetEnabledActions.FromString,
response_serializer=uac_dot_RoleV2__pb2.GetEnabledActions.Response.SerializeToString,
),
'getSelfAllowedActionsBatchForWorkspace': grpc.unary_unary_rpc_method_handler(
servicer.getSelfAllowedActionsBatchForWorkspace,
request_deserializer=uac_dot_RoleV2__pb2.GetSelfAllowedActionsBatchForWorkspace.FromString,
response_serializer=uac_dot_RoleV2__pb2.GetSelfAllowedActionsBatchForWorkspace.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.uac.RoleServiceV2', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))<|fim_middle|>add_role_service_v2_servicer_to_server<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, user: User):
return self.user_id == user.id<|fim_middle|>can_edit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Resource Id represents the complete path to the resource.
"""
return pulumi.get(self, "id")<|fim_middle|>id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dbpath: Path) -> str:
with initialised_database_at(str(dbpath)):
new_experiment(sample_name="fivehundredtest_sample",
name="fivehundredtest_name")
p1 = Parameter('Voltage', set_cmd=None)
p2 = Parameter('Current', get_cmd=np.random.randn)
meas = Measurement()
meas.register_parameter(p1).register_parameter(p2, setpoints=[p1])
with meas.run() as datasaver:
for v in np.linspace(0, 2, 250):
p1(v)
datasaver.add_result((p1, cast(float, p1())),
(p2, cast(float, p2())))
guid = datasaver.dataset.guid
datasaver.flush_data_to_database(block=True)
return guid<|fim_middle|>generate_local_run<|file_separator|> |