text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(
kfp_client,
experiment_id,
s3_client,
sagemaker_client,
s3_data_bucket,
test_file_dir,
):
download_dir = utils.mkdir(os.path.join(test_file_dir + "/generated"))
test_params = utils.load_params(
utils.replace_placeholders(
os.path.join(test_file_dir, "config.yaml"),
os.path.join(download_dir, "config.yaml"),
)
)
# Generate random prefix for model, job name to avoid errors if resources with same name exists
test_params["Arguments"]["model_name"] = test_params["Arguments"][
"job_name"
] = input_job_name = (
utils.generate_random_string(5) + "-" + test_params["Arguments"]["model_name"]
)
print(f"running test with model/job name: {input_job_name}")
# Generate unique location for output since output filename is generated according to the content_type
test_params["Arguments"]["output_location"] = os.path.join(
test_params["Arguments"]["output_location"], input_job_name
)
_, _, workflow_json = kfp_client_utils.compile_run_monitor_pipeline(
kfp_client,
experiment_id,
test_params["PipelineDefinition"],
test_params["Arguments"],
download_dir,
test_params["TestName"],
test_params["Timeout"],
)
outputs = {"sagemaker-batch-transformation": ["output_location"]}
output_files = minio_utils.artifact_download_iterator(
workflow_json, outputs, download_dir
)
# Verify Job was successful on SageMaker
response = sagemaker_utils.describe_transform_job(sagemaker_client, input_job_name)
assert response["TransformJobStatus"] == "Completed"
assert response["TransformJobName"] == input_job_name
# Verify output location from pipeline matches job output and that the transformed file exists
output_location = utils.read_from_file_in_tar(
output_files["sagemaker-batch-transformation"]["output_location"]
)
print(f"output location: {output_location}")
assert output_location == response["TransformOutput"]["S3OutputPath"]
# Get relative path of file in S3 bucket
# URI is following format s3://<bucket_name>/relative/path/to/file
# split below is to extract the part after bucket name
file_key = os.path.join(
"/".join(output_location.split("/")[3:]), test_params["ExpectedOutputFile"]
)
assert s3_utils.check_object_exists(s3_client, s3_data_bucket, file_key)
assert not argo_utils.error_in_cw_logs(
workflow_json["metadata"]["name"]
), "Found the CloudWatch error message in the log output. Check SageMaker to see if the job has failed."
utils.remove_dir(download_dir)<|fim_middle|>test_transform_job<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
) -> Callable[
[policytagmanagerserialization.ExportTaxonomiesRequest],
Union[
policytagmanagerserialization.ExportTaxonomiesResponse,
Awaitable[policytagmanagerserialization.ExportTaxonomiesResponse],
],<|fim_middle|>export_taxonomies<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# self.test_root_dir = os.path.realpath(os.path.join(TEST_DIRNAME, RELATIVE_PATH))
self.test_root_dir = Path(TEST_DIRNAME) / RELATIVE_PATH
integration = BcPlatformIntegration()
metadata_integration.bc_integration = integration
integration.get_public_run_config()
metadata_integration.pre_scan()
definitions, definitions_raw = get_folder_definitions(str(self.test_root_dir), None)
self.definitions_context = build_definitions_context(definitions, definitions_raw)<|fim_middle|>set_up<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(ceph_cluster, **kw):
"""
Automates OSD re-balance test scenarios.
1. Create replicated and/or erasure pool/pools
2. Identify the osd to be removed
3. Fetch the host by daemon_type=osd and osd id
4. Fetch container id and device path
5. Mark osd out and wait for pgs to be active+clean
6. Remove OSD
7. Zap device and wait for device not present
8. Add OSD and wait for device present and pgs to be active+clean
"""
log.info(METHOD_NAME.__doc__)
config = kw["config"]
rhbuild = config.get("rhbuild")
cephadm = CephAdmin(cluster=ceph_cluster, **config)
rados_obj = RadosOrchestrator(node=cephadm)
client_node = ceph_cluster.get_nodes(role="client")[0]
timeout = config.get("timeout", 10800)
log.info("Running create pool test case")
if config.get("create_pools"):
pools = config.get("create_pools")
for each_pool in pools:
cr_pool = each_pool["create_pool"]
if cr_pool.get("pool_type", "replicated") == "erasure":
method_should_succeed(
rados_obj.create_erasure_pool, name=cr_pool["pool_name"], **cr_pool
)
else:
method_should_succeed(rados_obj.create_pool, **cr_pool)
if cr_pool.get("rados_put", False):
do_rados_put(mon=client_node, pool=cr_pool["pool_name"], nobj=100)
else:
method_should_succeed(rados_obj.bench_write, **cr_pool)
pool = random.choice(pools)["create_pool"]
if not pool:
log.error("Failed to retrieve pool details")
return 1
# Set recover threads configurations
if not rhbuild.startswith("6"):
rados_obj.change_recover_threads(config=pool, action="set")
# Set mclock_profile
if rhbuild.startswith("6") and config.get("mclock_profile"):
rados_obj.set_mclock_profile(profile=config["mclock_profile"])
acting_pg_set = rados_obj.get_pg_acting_set(pool_name=pool["pool_name"])
log.info(f"Acting set {acting_pg_set}")
if not acting_pg_set:
log.error("Failed to retrieve acting pg set")
return 1
osd_id = acting_pg_set[0]
host = rados_obj.fetch_host_node(daemon_type="osd", daemon_id=osd_id)
if not host:
log.error("Failed to fetch host details")
return 1
# fetch container id
out, _ = host.exec_command(sudo=True, cmd="podman ps --format json")
container_id = [
item["Names"][0]
for item in json.loads(out)
if item.get("Command") and f"osd.{osd_id}" in item["Command"]
][0]
if not container_id:
log.error("Failed to retrieve container id")
return 1
# fetch device path by osd_id
volume_out, _ = host.exec_command(
sudo=True,
cmd=f"podman exec {container_id} ceph-volume lvm list --format json",
)
dev_path = [
v[0]["devices"][0]
for k, v in json.loads(volume_out).items()
if str(k) == str(osd_id)
][0]
if not dev_path:
log.error("Failed to get device path")
return 1
log.debug(
f"device path : {dev_path}, osd_id : {osd_id}, host.hostname : {host.hostname}"
)
utils.set_osd_devices_unmanaged(ceph_cluster, osd_id, unmanaged=True)
method_should_succeed(utils.set_osd_out, ceph_cluster, osd_id)
method_should_succeed(wait_for_clean_pg_sets, rados_obj, timeout)
utils.osd_remove(ceph_cluster, osd_id)
if cr_pool.get("rados_put", False):
do_rados_get(client_node, pool["pool_name"], 1)
method_should_succeed(wait_for_clean_pg_sets, rados_obj, timeout)
method_should_succeed(utils.zap_device, ceph_cluster, host.hostname, dev_path)
method_should_succeed(wait_for_device, host, osd_id, action="remove")
utils.add_osd(ceph_cluster, host.hostname, dev_path, osd_id)
method_should_succeed(wait_for_device, host, osd_id, action="add")
method_should_succeed(wait_for_clean_pg_sets, rados_obj, timeout)
do_rados_put(mon=client_node, pool=pool["pool_name"], nobj=1000)
method_should_succeed(wait_for_clean_pg_sets, rados_obj, timeout)
if cr_pool.get("rados_put", False):
do_rados_get(client_node, pool["pool_name"], 1)
utils.set_osd_devices_unmanaged(ceph_cluster, osd_id, unmanaged=False)
rados_obj.change_recover_threads(config=pool, action="rm")
if config.get("delete_pools"):
for name in config["delete_pools"]:
method_should_succeed(rados_obj.detete_pool, name)
log.info("deleted all the given pools successfully")
return 0<|fim_middle|>run<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
with config.change_flags(cxx="", compute_test_value="raise"):
yield<|fim_middle|>set_pytensor_flags<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Points(df, vdims=['x'])
self.assertEqual(ds.kdims, [Dimension('y'), Dimension('z')])
self.assertEqual(ds.vdims, [Dimension('x')])<|fim_middle|>test_dataset_extract_kdims_with_vdims_defined<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
config = configparser.ConfigParser()
config.optionxform = str
config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertEqual(
self.latest_profile.to_json({self.app.get_id: self.app.name}),
{
'id': self.latest_profile.id,
'app_id': self.app.get_id,
'active': True,
'version': self.v2_build.version,
'build_profile_id': self.build_profile_id,
'app_name': 'foo',
'profile_name': 'English only'
}
)<|fim_middle|>test_latest_profile_serialize<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target
):
def to_np(x):
return None if x is None else x.detach().cpu().numpy()
sample_ids = [dataset.ids[i] for i in sample["id"].tolist()]
texts = sample["src_texts"]
attns = [to_np(hypo["attn"]) for hypo in hypos]
eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos]
feat_preds = [to_np(hypo["feature"]) for hypo in hypos]
wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos]
if dump_target:
feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos]
wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos]
else:
feat_targs = [None for _ in hypos]
wave_targs = [None for _ in hypos]
return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds,
feat_targs, wave_targs)<|fim_middle|>postprocess_results<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(db_session):
mine = MineFactory(
minimal=True
)
MineDocument.query.delete()
yield dict(
mine=mine
)<|fim_middle|>setup_info<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, definition_id: str, action_key: str) -> None:
"""Add a supported action to a machine."""
if action_key in self._machine_actions:
if definition_id in self._supported_actions:
if self._machine_actions[action_key] not in self._supported_actions[definition_id]:
self._supported_actions[definition_id].append(self._machine_actions[action_key])
else:
self._supported_actions[definition_id] = [self._machine_actions[action_key]]
else:
Logger.log("w", "Unable to add %s to %s, as the action is not recognised", action_key, definition_id)<|fim_middle|>add_supported_action<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(offline_event_factory):
EVENT_DATE = parse("2020-01-05 17:00:00 UTC")
ACTION_DATE = EVENT_DATE - timedelta(hours=EVENT_STARTING_HOURS)
CURRENT_DATE = ACTION_DATE + timedelta(minutes=30)
content_type = ContentType.objects.get_for_model(OfflineEvent)
event = offline_event_factory(
date=EVENT_DATE,
)
project = event.project
project.is_draft = True
project.save()
project.refresh_from_db()
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
with freeze_time(CURRENT_DATE):
call_command("create_offlineevent_system_actions")
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
# NotifyFollowersOnUpcomingEventEmail
assert len(mail.outbox) == 0<|fim_middle|>test_event_soon_draft_no_email<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(test_client_factory):
client = test_client_factory(app)
response = client.get("/schema")
assert response.headers["Content-Type"] == "application/vnd.oai.openapi"
assert response.text.strip() == EXPECTED_SCHEMA.strip()<|fim_middle|>test_schema_endpoint<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, source, **kwargs):
"""Helper that parses source without requiring files."""
return self.StoreClass(BytesIO(source), **kwargs)<|fim_middle|>parse_store<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(result: PingScanData):
return PingScanEvent(
source=AGENT_ID,
target=HOST_IP,
timestamp=TIMESTAMP,
tags=frozenset(),
response_received=result.response_received,
os=result.os,
)<|fim_middle|>get_ping_scan_event<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args):
options = parser.parse_args(args)
input_text = codecs.open(options.input[0], "r", encoding="utf-8").read()
python_globals = dict(chain(*options.defines))
output_text = PREAMBLE.format(template=options.input[0], generator=sys.argv[0]) + preprocess(input_text, python_globals, options.input[0])
txt_changed = True
if os.path.exists(options.output):
with codecs.open(options.output, "r", encoding="utf-8") as output_file:
txt_changed = output_file.read() != output_text
if txt_changed:
with codecs.open(options.output, "w", encoding="utf-8") as output_file:
output_file.write(output_text)<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(polygon: Polygon):
"""Constructs a compound matplotlib path from a Shapely or GeoJSON-like
geometric object
Args:
polygon (Polygon): polygon
Returns:
Path: compound matplotlib path
"""
this = Polygon(polygon)
assert this.geom_type == 'Polygon'
def coding(ob):
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(getattr(ob, 'coords', None) or ob)
vals = ones(n, dtype=Path.code_type) * Path.LINETO
vals[0] = Path.MOVETO
return vals
vertices = concatenate([asarray(this.exterior.coords)[:, :2]] +
[asarray(r.coords)[:, :2] for r in this.interiors])
codes = concatenate([coding(this.exterior)] +
[coding(r) for r in this.interiors])
return Path(vertices, codes)<|fim_middle|>polygon_path<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, file_object):<|fim_middle|>open<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request<|fim_middle|>prepare_request<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self, info: Info):<|fim_middle|>parse_player_joined<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.pre_operations()
yield self.ExternalNetworksDelete(ctx=self.ctx)()
self.post_operations()<|fim_middle|>execute_operations<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, sandbox_name: str, **kwargs) -> Response:
""" reset all changes in specified sandbox
:param sandbox_name: str
:return: response
"""
url = format_url("/api/v1/Sandboxes('{}')/tm1.DiscardChanges", sandbox_name)
return self._rest.POST(url=url, **kwargs)<|fim_middle|>reset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
script="testscript", enabled=True, task_type="stream", db="testdb", rp="default"
):
return {
"script": script,
"enabled": enabled,
"type": task_type,
"dbrps": [{"db": db, "rp": rp}],
}<|fim_middle|>task<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, bufsiz: int, flags: int | None = None) -> bytes: ...<|fim_middle|>recv<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
folder = self._get_cmd_folder() / "node"
folder_must_exist(folder)
return folder<|fim_middle|>get_cmd_node_folder<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self, uuid):
try:
self.dataset_state[uuid]
except KeyError:
self.set_status(404, "dataset with uuid %s not found" % uuid)
return
await self.dataset_state.remove(uuid)
msg = Message(self.state).delete_dataset(uuid)
log_message(msg)
self.event_registry.broadcast_event(msg)
self.write(msg)<|fim_middle|>delete<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, event):
self._set_color_setter_boolean()
if self.is_color_setter_running:
return
button = event["button"]
if button == self.button_up:
self._set_color_temperature(+1)
elif button == self.button_down:
self._set_color_temperature(-1)
elif button == self.button_toggle:
self._set_color_temperature()
else:
self.py3.prevent_refresh()<|fim_middle|>on_click<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(before_timestamp: float):
"""Deleting old files inside : archives and tmp folders.
Only files inside the following folders are considered :
- {LOG_FOLDER_PATH}/{ARCHIVES_FOLDER_NAME}
- {LOG_FOLDER_PATH}/{TMP_FOLDER_NAME}
Args:
before_timestamp (float): Timestamp before which files are considered expired.
"""
archives_directory = get_log_sub_dir(name=ARCHIVES_FOLDER_NAME)
tmp_directory = get_log_sub_dir(name=TMP_FOLDER_NAME)
expired_archives_file_list = get_expired_file_list(
directory=archives_directory,
before_timestamp=before_timestamp,
)
expired_tmp_file_list = get_expired_file_list(
directory=tmp_directory,
before_timestamp=before_timestamp,
)
remove_file_list(file_list=expired_archives_file_list)
remove_file_list(file_list=expired_tmp_file_list)<|fim_middle|>clean_expired_files<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, expected_output_np, output_layout):
return transform_numpy(expected_output_np, "nhw", output_layout)<|fim_middle|>transformed_expected_output_np<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)<|fim_middle|>tear_down<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(l):
# Lazy person's mock: just transform the argument in a way in which we
# can check that this function was indeed called.
return [x * 2 for x in l]<|fim_middle|>dummy_function<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, options):<|fim_middle|>set_options<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
grp = EmitterGroup(source=self, em1=Event)
grp.connect(self.record_event)
self.result = None
ev = grp.em1(test_key=1)
self.assert_result(
event=ev,
source=self,
sources=[
self,
self],
test_key=1)<|fim_middle|>test_group_connect<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(xml_file, xml_file_name, report_folder, title, report_name):
logfunc(f'{title} found')
tree = ET.parse(xml_file)
data_headers = ('Key', 'Value')
data_list = []
root = tree.getroot()
for node in root:
# skip not relevant keys
if '.' in node.attrib['name']:
continue
value = None
try:
value = node.attrib['value']
except:
value = node.text
data_list.append((node.attrib['name'], value))
tl_bool = False
_make_reports(f'{APP_NAME} - {report_name}', data_headers, data_list, report_folder, xml_file_name, tl_bool)<|fim_middle|>parse_xml<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
mvn_dist, k, sample_shape, L21, omega1, L11, L22=0.8, L33=0.9, omega2=0.75
):
if mvn_dist == "OMTMultivariateNormal" and k > 1:
return
omega = torch.tensor([omega1, omega2, 0.0])
loc = torch.zeros(3, requires_grad=True)
zero_vec = [0.0, 0.0, 0.0]
off_diag = torch.tensor([zero_vec, [L21, 0.0, 0.0], zero_vec], requires_grad=True)
L = torch.diag(torch.tensor([L11, L22, L33])) + off_diag
if mvn_dist == "OMTMultivariateNormal":
dist = OMTMultivariateNormal(loc, L)
elif mvn_dist == "AVFMultivariateNormal":
CV = (1.1 * torch.rand(2, k, 3)).requires_grad_(True)
dist = AVFMultivariateNormal(loc, L, CV)
z = dist.rsample(sample_shape)
torch.cos((omega * z).sum(-1)).mean().backward()
computed_grad = off_diag.grad.cpu().data.numpy()[1, 0]
analytic = analytic_grad(L11=L11, L22=L22, L21=L21, omega1=omega1, omega2=omega2)
assert off_diag.grad.size() == off_diag.size()
assert loc.grad.size() == loc.size()
assert torch.triu(off_diag.grad, 1).sum() == 0.0
assert_equal(
analytic,
computed_grad,
prec=0.005,
msg="bad cholesky grad for %s (expected %.5f, got %.5f)"
% (mvn_dist, analytic, computed_grad),
)<|fim_middle|>test_mean_gradient<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(table_oid, attnum, engine, metadata, connection_to_use=None):
sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)
column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)
sa_column = sa_table.columns[column_name]
return sa_column<|fim_middle|>get_column_from_oid_and_attnum<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, mock_call, mock_run):
mock_run.side_effect = Exception
self.manager.request_package('vim')
with raises(KiwiRequestError):
self.manager.process_delete_requests(force=True)
mock_run.assert_called_once_with(
['chroot', 'root-dir', 'rpm', '-q', 'vim']
)<|fim_middle|>test_process_delete_requests_package_missing<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Check if element_id is validated."""
self.assertRaises(ValueError, AnyByteDataModelElement, "") # empty element_id
self.assertRaises(TypeError, AnyByteDataModelElement, None) # None element_id
self.assertRaises(TypeError, AnyByteDataModelElement, b"path") # bytes element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, True) # boolean element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, 123) # integer element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, 123.22) # float element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, {"id": "path"}) # dict element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, ["path"]) # list element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, []) # empty list element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, ()) # empty tuple element_id is not allowed
self.assertRaises(TypeError, AnyByteDataModelElement, set()) # empty set element_id is not allowed<|fim_middle|>test3element_id_input_validation<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return tf.initializers.VarianceScaling(
distribution='uniform', mode='fan_out', scale=0.333)<|fim_middle|>uniform_initializer<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(__n: SupportsIndex, __k: SupportsIndex) -> int: ...<|fim_middle|>comb<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, fn, filename,
expected_exception=FileNotFoundError,
check_filename=True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
if check_filename:
self.assertEqual(exc_filename, filename, "Function '%s(%a) failed "
"with bad filename in the exception: %a" %
(fn.__name__, filename, exc_filename))<|fim_middle|>apply_failure<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
shape=None,
batch_size=None,
dtype=None,
sparse=None,
batch_shape=None,
name=None,
tensor=None,
):
"""Used to instantiate a Keras tensor.
A Keras tensor is a symbolic tensor-like object, which we augment with
certain attributes that allow us to build a Keras model just by knowing the
inputs and outputs of the model.
For instance, if `a`, `b` and `c` are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
Args:
shape: A shape tuple (tuple of integers or `None` objects),
not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors. Elements of this tuple
can be `None`; `None` elements represent dimensions where the shape
is not known and may vary (e.g. sequence length).
batch_size: Optional static batch size (integer).
dtype: The data type expected by the input, as a string
(e.g. `"float32"`, `"int32"`...)
sparse: A boolean specifying whether the expected input will be sparse
tensors. Note that, if `sparse` is `False`, sparse tensors can still
be passed into the input - they will be densified with a default
value of 0. This feature is only supported with the TensorFlow
backend. Defaults to `False`.
name: Optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will use this tensor rather
than creating a new placeholder tensor.
Returns:
A Keras tensor.
Example:
```python
# This is a logistic regression in Keras
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
"""
layer = InputLayer(
shape=shape,
batch_size=batch_size,
dtype=dtype,
sparse=sparse,
batch_shape=batch_shape,
name=name,
input_tensor=tensor,
)
return layer.output<|fim_middle|>input<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(x):
return x[0] * x[1]<|fim_middle|>map_mul<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
return f"arn:aws:ec2:{self.ec2_backend.region_name}:{self.ec2_backend.account_id}:transit-gateway/{self.id}"<|fim_middle|>arn<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
num_devices=1, ps_device_type="cpu", worker_device="/cpu:0", ps_ops=None, ps_strategy=None
):
if ps_ops == None:
ps_ops = ["Variable", "VariableV2", "VarHandleOp"]
if ps_strategy is None:
ps_strategy = device_setter._RoundRobinStrategy(num_devices)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
def _local_device_chooser(op):
current_device = pydev.DeviceSpec.from_string(op.device or "")
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = pydev.DeviceSpec.from_string("/{}:{}".format(ps_device_type, ps_strategy(op)))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return _local_device_chooser<|fim_middle|>local_device_setter<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
src_data = [-100, -99, -98, -97, -96, -1]
expected_result = [max(src_data), ]
src = blocks.vector_source_s(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_short, len(src_data))
op = blocks.max_ss(len(src_data))
dst = blocks.vector_sink_s()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)<|fim_middle|>stest_s002<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
drop_table(self.connection, self.table1_path)<|fim_middle|>tear_down<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return _max(product(seq) for seq in list_seqs(NUMS))<|fim_middle|>solve<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(builder, numElems):
return builder.StartVector(4, numElems, 4)<|fim_middle|>object_start_fields_vector<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
np_offsets = np.array([7, 10, 10, 200])
np_content = np.array([6.6, 4.4, 5.5, 7.7, 3.3, 2.2, 1.1, 8.8])
array = ak.contents.ListOffsetArray(
ak.index.Index(np_offsets),
ak.contents.numpyarray.NumpyArray(np_content),
)
assert array.nbytes == np_offsets.nbytes + np_content.nbytes<|fim_middle|>test_list_offset_array_nbytes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self,
rule: OnlineCTCEndpointRule,
rule_name: str,
decoding_something: bool,
trailine_silence: int,
utterance_length: int) -> bool:
ans = (
decoding_something or (not rule.must_contain_nonsilence)
) and trailine_silence >= rule.min_trailing_silence and utterance_length >= rule.min_utterance_length
if (ans):
logger.info(f"Endpoint Rule: {rule_name} activated: {rule}")
return ans<|fim_middle|>rule_activated<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(x, axis=-1):
"""Perform log softmax activation on the data
Parameters
----------
data : tvm.te.Tensor
N-D input data
Returns
-------
output : tvm.te.Tensor
N-D output with same shape
"""
shape = x.shape
if axis < 0:
axis = len(shape) + axis
if axis >= len(shape):
ValueError("axis parameter should be less than input dim")
k1 = te.reduce_axis((0, shape[axis]), name="k")
k2 = te.reduce_axis((0, shape[axis]), name="k")
def insert_reduce_index(indices, reduce_index):
return indices[:axis] + (reduce_index,) + indices[axis:]
def get_non_reduce_indices(indices):
return tuple([var for (i, var) in enumerate(indices) if i != axis])
def _compute_max(*indices):
eval_range = insert_reduce_index(indices, k1)
return tvm.te.max(x[eval_range], axis=k1)
def _compute_expsum(max_elem, *indices):
eval_range = insert_reduce_index(indices, k2)
return te.sum(te.exp(x[eval_range] - max_elem[indices]), axis=k2)
def _normalize(max_elem, expsum, *indices):
non_reduce_indices = get_non_reduce_indices(indices)
return x[indices] - max_elem[non_reduce_indices] - te.log(expsum[non_reduce_indices])
reduced_shape = tuple([dim for (i, dim) in enumerate(shape) if i != axis])
max_elem = te.compute(reduced_shape, _compute_max, name="T_softmax_maxelem")
expsum = te.compute(reduced_shape, lambda *indices: _compute_expsum(max_elem, *indices))
return te.compute(
shape,
lambda *indices: _normalize(max_elem, expsum, *indices),
attrs={"axis": axis},
)<|fim_middle|>log_softmax<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
TestController.METHOD_NAME(self)
self.delete_all_policies()
self.delete_all_token()
self.delete_all_realms()
self.delete_all_resolvers()
self.create_common_resolvers()
self.create_common_realms()<|fim_middle|>set_up<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> int:
return int(math.ceil(float(self.get_length_in_bits() / 8.0)))<|fim_middle|>length_in_bytes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(id,override):
return BackuppcDatabase().METHOD_NAME(id,override)<|fim_middle|>edit_backup_profile<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
def get_option(term_slug: TermEnum, option_type: str) -> TermOption:
return [
option
for option in terms_dict[term_slug].options
if option.option_type == option_type
][0]
def get_consent(term_slug: TermEnum, person: Person) -> Consent:
return [
consent
for consent in old_consents
if consent.term.slug == term_slug and consent.person == person
][0]
self.admin_1 = Person.objects.create_superuser(
username="admin1",
personal="Super",
family="User",
email="sudo1@example.org",
password="admin",
)
consent_to_all_required_consents(self.admin_1)
self.admin_1.airport = Airport.objects.first()
self.admin_1.save()
self.admin_2 = Person.objects.create_superuser(
username="admin_2",
personal="Super",
family="User",
email="sudo@example.org",
password="admin",
)
consent_to_all_required_consents(self.admin_2)
self.admin_2.airport = Airport.objects.first()
self.admin_2.save()
terms = (
Term.objects.filter(
slug__in=[TermEnum.MAY_CONTACT, TermEnum.PUBLIC_PROFILE]
)
.active()
.prefetch_active_options()
)
terms_dict = {term.slug: term for term in terms}
may_contact_agree = get_option(TermEnum.MAY_CONTACT, TermOptionChoices.AGREE)
may_contact_decline = get_option(
TermEnum.MAY_CONTACT, TermOptionChoices.DECLINE
)
public_profile_agree = get_option(
TermEnum.PUBLIC_PROFILE, TermOptionChoices.AGREE
)
public_profile_decline = get_option(
TermEnum.PUBLIC_PROFILE, TermOptionChoices.DECLINE
)
old_consents = (
Consent.objects.filter(
person__in=[self.admin_1, self.admin_2],
term__slug__in=[TermEnum.MAY_CONTACT, TermEnum.PUBLIC_PROFILE],
)
.active()
.select_related("term", "person")
)
Consent.reconsent(
consent=get_consent(TermEnum.MAY_CONTACT, self.admin_1),
term_option=may_contact_agree,
)
Consent.reconsent(
consent=get_consent(TermEnum.MAY_CONTACT, self.admin_2),
term_option=may_contact_decline,
)
Consent.reconsent(
consent=get_consent(TermEnum.PUBLIC_PROFILE, self.admin_1),
term_option=public_profile_agree,
)
Consent.reconsent(
consent=get_consent(TermEnum.PUBLIC_PROFILE, self.admin_2),
term_option=public_profile_decline,
)
self.client.login(username="admin1", password="admin")<|fim_middle|>set_up<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, start_status, db_session):
em = EmailMessageFactory.create(status=EmailStatuses(start_status))
status = EmailStatus.load(em)
assert status.save().status == EmailStatuses(start_status)<|fim_middle|>test_load<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>():
return
yield<|fim_middle|>g2<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, url):
video_id = self._match_id(url)
content = self._call_api(
'1.5', 'IN/CONTENT/VIDEOURL/VOD/' + video_id, video_id)
if content.get('isEncrypted'):
raise ExtractorError('This video is DRM protected.', expected=True)
dash_url = content['videoURL']
headers = {
'x-playback-session-id': '%s-%d' % (uuid.uuid4().hex, time.time() * 1000)
}
formats = self._extract_mpd_formats(
dash_url, video_id, mpd_id='dash', headers=headers, fatal=False)
formats.extend(self._extract_m3u8_formats(
dash_url.replace('.mpd', '.m3u8').replace('/DASH/', '/HLS/'),
video_id, 'mp4', m3u8_id='hls', headers=headers, fatal=False))
for f in formats:
f.setdefault('http_headers', {}).update(headers)
self._sort_formats(formats)
metadata = self._call_api(
'1.6', 'IN/DETAIL/' + video_id, video_id)['containers'][0]['metadata']
title = metadata['title']
episode = metadata.get('episodeTitle')
if episode and title != episode:
title += ' - ' + episode
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': content.get('posterURL'),
'description': metadata.get('longDescription') or metadata.get('shortDescription'),
'timestamp': int_or_none(metadata.get('creationDate'), 1000),
'duration': int_or_none(metadata.get('duration')),
'season_number': int_or_none(metadata.get('season')),
'episode': episode,
'episode_number': int_or_none(metadata.get('episodeNumber')),
'release_year': int_or_none(metadata.get('year')),
}<|fim_middle|>real_extract<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Return option value of `processManagement.pidFilePath` if a yaml conf
and `pidFilePath` if a key-value pair conf.
"""
if self.is_yaml:
return self.get('processManagement', {}).get('pidFilePath')
else:
return self.get('pidfilepath')<|fim_middle|>pidfilepath<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, plugin_ctx):
return dict(max_kwic_words=self._max_kwic_words, load_chunk_size=self._load_chunk_size)<|fim_middle|>export<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(result: QCSObjectsForNotebook):
assert not result.signed_in
assert result.is_simulator
assert result.project_id == 'fake_project'<|fim_middle|>assert_simulated_values<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Returns following residue in the atom group."""
return self._hv._getResidue(self.getResindex()+1)<|fim_middle|>get_next<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(input_file, output_file=None, format=None):
"""Crop to non-zero area of the image
:param input_file: Name of the file to manipulate
:param output_file: filename for the new file (same as input by default)
:param format: format to be used new file (if different from extension)
"""
if PIL is None:
raise RuntimeError(_("Install PIL or Pillow to use this function"))
if not output_file:
output_file = input_file
img = Image.open(input_file)
box = img.getbbox()
cropped_image = img.crop(box)
cropped_image.save(output_file, format)<|fim_middle|>crop_image<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(ctx: tanjun.abc.Context) -> None:
await ctx.respond("I don't know how to describe Europe... small?")<|fim_middle|>europe_command<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
ifm: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ofm_channels: int,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of unary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the unary elementwise operator.
"ABS"
"CLZ"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ofm_channels : int
The number of OFM channels.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Changing the ifm and ofm scale to conform with that expected by Vela API
if ofm_scale != 0:
ofm_scale = ifm_scale / ofm_scale
ifm_scale = 1.0
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ofm_channels, (0, 0, 0, 0)
)
# Unary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
unary_elementwise_attrs = {
"op": "ethosu_unary_elementwise",
"operator_type": operator_type,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
}
def clz_imp(inp):
# Assuming that it's a 32 bit int
return 32 - te.log2(inp)
operators = {"ABS": te.abs, "CLZ": clz_imp}
unary_elementwise = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype)
),
name="ethosu_unary_elementwise",
attrs=unary_elementwise_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(int(ofm_channels))
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {"ifm_propagator": ifm_propagator}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
unary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ofm_channels,
attrs=propagator_attrs,
)<|fim_middle|>unary_elementwise_compute<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
test_users: dict[str, int],
user_based_filter_set_dict: dict[str, Any],
valid_filter_set_data_for_update: dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, FILTER_SET_OWNER_USERNAME)
# act
response = call_delete_filter_set(client, user_based_filter_set_dict)
# assert
assert response.status_code == 200
assert_filterset_deleted(user_based_filter_set_dict)<|fim_middle|>test_when_caller_is_filterset_owner_200<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, context):
self.layout.operator(ROR_OT_exporter.bl_idname, text="Truck (.truck)")<|fim_middle|>menu_func<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
al = RandomSampling()
category_id = 0
doc = generate_simple_doc("dummy_dataset", category_id, num_sentences=100)
predictions = [Prediction(True, random.random()) for _ in doc.text_elements]
sorted_items_for_labeling1 = al.get_recommended_items_for_labeling("dummy_workspace", "dummy_dataset",
category_id, doc.text_elements,
predictions, sample_size=100)
sorted_items_for_labeling2 = al.get_recommended_items_for_labeling("dummy_workspace", "dummy_dataset",
category_id, doc.text_elements,
predictions, sample_size=100)
self.assertEqual(sorted_items_for_labeling1, sorted_items_for_labeling2)<|fim_middle|>test_random_reproducibility<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Ask for isolation options and isolate elements"""
element_cats = isolate_config.load_configs()
select_options = sorted(x.Name for x in element_cats) + [
"Room Tags",
"Model Groups",
"Painted Elements",
"Model Elements",
]
selected_switch = forms.CommandSwitchWindow.show(
select_options, message="Temporarily isolate elements of type:"
)
if selected_switch:
curview = revit.active_view
with revit.TransactionGroup("Isolate {}".format(selected_switch)):
with revit.Transaction("Reset temporary hide/isolate"):
# reset temporary hide/isolate before filtering elements
curview.DisableTemporaryViewMode(
DB.TemporaryViewMode.TemporaryHideIsolate
)
element_to_isolate = get_isolation_elements(selected_switch)
with revit.Transaction("Isolate {}".format(selected_switch)):
# now that list of elements is ready,
# let's isolate them in the active view
curview.IsolateElementsTemporary(element_to_isolate)<|fim_middle|>ask_for_options<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertCallableWorks(GoodDuckProcessor)<|fim_middle|>test_class_duck_typed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Test the value of a dataset"""
title12 = h5py_read_dataset(self.h5f["/1.2/title"])
self.assertEqual(title12,
u"aaaaaa")<|fim_middle|>test_title<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['case_id_external'].label, 'Case external ID')<|fim_middle|>test_case_id_external_form_label<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.__tau_syn_E<|fim_middle|>tau_syn_e<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Checks that standardization execution passes"""
self.assertModule(
"r.learn.train",
group=self.group,
training_map=self.labelled_pixels,
model_name="RandomForestClassifier",
n_estimators=100,
save_model=self.model_file,
flags="s",
)
self.assertFileExists(filename=self.model_file)
self.assertModule(
"r.learn.predict",
group=self.group,
load_model=self.model_file,
output=self.output,
)
self.assertRasterExists(self.output, msg="Output was not created")
estimator, y, class_labels = joblib.load(self.model_file)
trans = estimator.named_steps["preprocessing"].transformers[0]
self.assertIsInstance(trans[1], StandardScaler)
estimator = None<|fim_middle|>test_standardization<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return sjson.dump(self.errors)<|fim_middle|>json_string<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters<|fim_middle|>header_parameters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")<|fim_middle|>id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> CompressionStage:
return CompressionStage.FULLY_COMPRESSED<|fim_middle|>compression_stage<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(
self, request: executor_pb2.CreateStructRequest
-> executor_pb2.CreateStructResponse:<|fim_middle|>create_struct<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Proxy Resource name associated with the resource.
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(rewriter: PatternRewriter) -> list[SSAValue]:
"""
Add cast operations just before the targeted operation
if the operands were not already int registers
"""
new_ops = list[Operation]()
new_operands = list[SSAValue]()
for operand in rewriter.current_operation.operands:
if not isinstance(
operand.type, riscv.IntRegisterType | riscv.FloatRegisterType
):
new_type = register_type_for_type(operand.type)
cast_op = builtin.UnrealizedConversionCastOp.get(
(operand,), (new_type.unallocated(),)
)
new_ops.append(cast_op)
operand = cast_op.results[0]
new_operands.append(operand)
rewriter.insert_op_before_matched_op(new_ops)
return new_operands<|fim_middle|>cast_operands_to_regs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(row: Row):
buffer.append(self.mapper.write_row(row, with_metadata=True))
if len(buffer) > settings.BUFFER_SIZE:
conn.execute(sa.insert(table), buffer)
buffer.clear()
on_row(row) if on_row else None<|fim_middle|>process_row<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Create a L-intersection in 3D
"""
f_1 = np.array([[1, 3, 3, 1], [1, 1, 1, 1], [1, 1, 3, 3]])
f_2 = np.array([[1, 1, 1, 1], [1, 3, 3, 1], [1, 1, 3, 3]])
f_set = [f_1, f_2]
mdg = pp.meshing.cart_grid(f_set, [3, 3, 3])
mdg.compute_geometry()
list_sd_3 = mdg.subdomains(dim=3)
list_sd_2 = mdg.subdomains(dim=2)
list_sd_1 = mdg.subdomains(dim=1)
list_sd_0 = mdg.subdomains(dim=0)
self.assertTrue(len(list_sd_3) == 1)
self.assertTrue(len(list_sd_2) == 2)
self.assertTrue(len(list_sd_1) == 1)
self.assertTrue(len(list_sd_0) == 0)
g_all = np.hstack([list_sd_3, list_sd_2, list_sd_1, list_sd_0])
for g in g_all:
f_p = g.frac_pairs
if g.dim == 3:
f_p_shape_true = 8
else:
f_p_shape_true = 0
self.assertTrue(f_p.shape[1] == f_p_shape_true)
self.assertTrue(
np.allclose(g.face_centers[:, f_p[0]], g.face_centers[:, f_p[1]])
)<|fim_middle|>test_l_intersection_3d<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()<|fim_middle|>close<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, query: str) -> Response:
"""
Receive raw query and act upon it somehow.
Args:
query (Any): query in SurrealQL to execute
Returns:
HandlerResponse
"""
need_to_close = self.is_connected is False
conn = self.connect()
cur = conn.cursor()
try:
cur.execute(query)
result = cur.fetchall()
if result:
response = Response(
RESPONSE_TYPE.TABLE,
data_frame=pd.DataFrame(
result,
columns=[x[0] for x in cur.description],
)
)
else:
response = Response(RESPONSE_TYPE.OK)
except Exception as e:
log.logger.error(f'Error running query: {query} on SurrealDB!')
response = Response(
RESPONSE_TYPE.ERROR,
error_message=str(e)
)
cur.close()
if need_to_close is True:
self.disconnect()
return response<|fim_middle|>native_query<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# To read soil moisture, read 2 bytes from register 0
return self.get_reg(0)<|fim_middle|>moist<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}<|fim_middle|>acc_and_f1<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
metadata_extractors_by, testfiledir, filecount, expected, unexpected
):
is_two_files = filecount == 2
source_filename = "f-markdown-{0}-nikola.md".format(filecount)
metadata_filename = "f-markdown-{0}-nikola.meta".format(filecount)
source_path = os.path.join(testfiledir, source_filename)
metadata_path = os.path.join(testfiledir, metadata_filename)
post = FakePost(source_path, metadata_path, {}, None, metadata_extractors_by)
assert os.path.exists(source_path)
if is_two_files:
assert os.path.exists(metadata_path)
meta, extractor = get_meta(post, None)
assert extractor is metadata_extractors_by["name"]["nikola"]
assert meta["title"] == "T: Markdown, {0}, Nikola".format(filecount)
assert meta["slug"] == "s-markdown-{0}-nikola".format(filecount)
assert expected in meta["tags"]
assert unexpected not in meta["tags"]
assert "meta" in meta["tags"]
assert "Nikola" in meta["tags"]
assert "Markdown" in meta["tags"]
assert meta["date"] == "2017-07-01 00:00:00 UTC"<|fim_middle|>test_nikola_meta_markdown<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
prefix = "%s." % self.namespace
for scoped_name in self.scoped_keys():
name = scoped_name.replace(prefix, "", 1)
yield name<|fim_middle|>keys<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Uses Orca log files to test that
molecular zero point_energy can be properly read.
"""
log = OrcaLog(os.path.join(self.data_path, "Orca_opt_freq_test.log"))
assert abs(log.load_zero_point_energy() - 55502.673180815) < 1e-3
log = OrcaLog(os.path.join(self.data_path, "Orca_TS_test.log"))
assert abs(log.load_zero_point_energy() - 93500.08860598055) < 1e-3<|fim_middle|>test_load_zero_point_energy_from_orca<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(verify_sizes=True):
'''Get train, valid, test retro datasets.'''
args = get_args()
retro_args = get_retro_args()
# DB dataset.
db_dataset = get_db_dataset()
# Retro datasets.
chunk_ds_info_map = get_chunk_dataset_map()
retro_dataset_map = {}
for data_key, chunk_ds_info in chunk_ds_info_map.items():
chunk_dataset = chunk_ds_info["data"]
neighbor_dir = chunk_ds_info["neighbor_dir"]
neighbor_path_map = BlockPathMap.from_dir(neighbor_dir,
retro_args.retro_block_size)
# Verify dataset prefixes.
expected_dir = get_neighbor_dirname(data_key, chunk_dataset.sample_dataset)
assert expected_dir == neighbor_dir, \
"inconsistent dataset source; '%s' vs. '%s'." % \
(expected_dir, neighbor_dir)
# Verify num chunks.
n_sample_chunks = len(chunk_dataset)
n_neighbor_chunks = neighbor_path_map.max_idx
if not os.path.isdir(neighbor_dir):
if torch.distributed.get_rank() == 0:
raise Exception("neighbor directory '%s' not found; please "
"compare --train-samples, --seq-length, --seed, "
"--eval-iters, and --eval-interval, with "
"retro preprocessing args." %
neighbor_dir)
torch.distributed.barrier()
exit()
if verify_sizes and n_sample_chunks != n_neighbor_chunks:
if torch.distributed.get_rank() == 0:
print("neighbor_dir : %s" % neighbor_dir)
print("neighbor_path_map : %s" % neighbor_path_map)
raise Exception("num sampled chunks (%d) != num neighbor chunks "
"(%d); did you complete querying the entire "
"pretraining dataset?"
% (n_sample_chunks, n_neighbor_chunks))
torch.distributed.barrier()
exit()
# Retro dataset.
retro_dataset_map[data_key] = RetroDataset(
num_neighbors=args.retro_num_neighbors,
num_retrieved_chunks=args.retro_num_retrieved_chunks,
block_size=retro_args.retro_block_size,
db_dataset=db_dataset,
chunk_dataset=chunk_dataset,
neighbor_path_map=neighbor_path_map,
)
# Extract datasets.
train_ds = retro_dataset_map.get("train", None)
valid_ds = retro_dataset_map.get("valid", None)
test_ds = retro_dataset_map.get("test", None)
return train_ds, valid_ds, test_ds<|fim_middle|>get_retro_datasets<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
if not Widgets.has_webkit:
return
self.browser.go_back()<|fim_middle|>back_cb<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec(template % ((name,)*7), d)
prop_cache[name] = d[name]
return d[name]<|fim_middle|>make_property<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(*, resp, **_):
"""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param resp: response, response from url call
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
# read all files in the stat canada zip
with zipfile.ZipFile(io.BytesIO(resp.content), "r") as f:
# read in file names
for name in f.namelist():
# if filename does not contain "MetaData", then create dataframe
if "MetaData" not in name:
data = f.open(name)
df = pd.read_csv(data, header=0)
return df<|fim_middle|>sc_call<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(provided, name):
# delete all existing
results = provided.query(properties=[('name', name)])
for res in results['features']:
provided.delete(res['id'])<|fim_middle|>delete_by_name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""test Language creation"""
language = Language(self.sample["language_key"], self.sample["language_name"])
self.assertIsNotNone(language)
self.assertIsNotNone(language.key)
self.assertIsNone(language.concepts)<|fim_middle|>test_language_init<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
node: Any,
key: Optional[Union[int, str]],
parent: Any,
path: List[Union[int, str]],
ancestors: List[Any],
):
if isinstance(node, OperationDefinition):
self.enter_operation_definition(node, key, parent, path, ancestors)<|fim_middle|>enter<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(apps, schema_editor):
_manage_permission(apps, schema_editor, assign_perm)<|fim_middle|>assign_object_permissions<|file_separator|> |