text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(event, saltenv="base", test=None):
"""
Delete a reactor
CLI Example:
.. code-block:: bash
salt-run reactor.delete 'salt/cloud/*/destroyed'
"""
if not _reactor_system_available():
raise CommandExecutionError("Reactor system is not running.")
with salt.utils.event.get_event(
"master",
__opts__["sock_dir"],
opts=__opts__,
listen=True,
) as sevent:
master_key = salt.utils.master.get_master_key("root", __opts__)
__jid_event__.fire_event(
{"event": event, "key": master_key}, "salt/reactors/manage/delete"
)
res = sevent.get_event(wait=30, tag="salt/reactors/manage/delete-complete")
return res.get("result")<|fim_middle|>delete<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Test the post-login hook"""
req, info = self._generate_req_info(self.app_settings['openid.provider'])
remember_me(None, req, info)
# The user should now exist, and be a member of the releng group
user = models.User.get('lmacken')
assert user.name == 'lmacken'
assert user.email == 'lmacken@fp.o'
assert len(user.groups) == 1
assert user.groups[0].name == 'releng'<|fim_middle|>test_new_user<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Connection name.",
required=True,
id_part="name",
)
return cls._args_schema<|fim_middle|>build_arguments_schema<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self._get(self._interfaces)['result']<|fim_middle|>interfaces<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(a : 'int', b : 'int'):
return (a-b), (a+b)<|fim_middle|>multi_return_vars_expr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Runs generate.main() which should merge source files,
then compile all sources in all configured languages.
Validates output by checking all .mo files in all configured languages.
.mo files should exist, and be recently created (modified
after start of test suite)
"""
# Change dummy_locales to only contain Esperanto.
self.configuration.dummy_locales = ['eo']
# Clear previous files.
for locale in self.configuration.dummy_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(self.configuration.get_messages_dir(locale), mofile)
if os.path.exists(path):
os.remove(path)
# Regenerate files.
generate.main(verbosity=0, strict=False)
for locale in self.configuration.dummy_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(self.configuration.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
assert exists, (f'Missing file in locale {locale}: {mofile}')
assert datetime.fromtimestamp(os.path.getmtime(path), UTC) >= \
self.start_time, ('File not recently modified: %s' % path)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
# get the merge header back eventually, or delete this code eventually.
# self.assert_merge_headers(locale)<|fim_middle|>test_main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[str]:
"""
Windows IoT Device Service notes.
"""
return pulumi.get(self, "notes")<|fim_middle|>notes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# Should fit in the context window since we subtracted the number of tokens of the test prompt
# from the max request length of 2049
assert self.window_service.fits_within_context_window(TEST_PROMPT, self.window_service.max_request_length - 51)
# Should not fit within the max request length because we're expecting one more extra token in the completion
assert not self.window_service.fits_within_context_window(
TEST_PROMPT, self.window_service.max_request_length - 51 + 1
)<|fim_middle|>test_fits_within_context_window<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> list[str]:
"""Return the ids of this metric's issues."""
return self.get("issue_ids", [])<|fim_middle|>issue_ids<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self, *filepaths):<|fim_middle|>resolve_conflict<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Any:
"""
Dataset properties.
"""
return pulumi.get(self, "properties")<|fim_middle|>properties<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(site_id, api_key=None):
cycle_point = os.environ['CYLC_TASK_CYCLE_POINT']
# Try to get the information from SYNOPS.
dist = 0.0
badsites = []
fails = True
obs = None
while dist < 1 and fails:
try:
obs = synop_grab(site_id, cycle_point)
fails = False
except NoDataException:
badsites.append(f'{int(site_id):05d}')
site_id, dist = get_nearby_site(site_id, badsites)
if obs is None:
if api_key:
print('Attempting to get weather data from DataPoint...')
data = get_datapoint_data(site_id, cycle_point, api_key)
else:
print('No API key provided, falling back to archived data')
data = get_archived_data(site_id, cycle_point)
obs = extract_observations(data)
# Write observations.
with open('wind.csv', 'w+') as data_file:
data_file.write(', '.join(obs))<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config):
org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster = \
build_model_from_hybrid_plugin(model_fn, loss_fn, test_config)
org_loss, org_output, sharded_loss, sharded_output = \
run_forward_backward_with_hybrid_plugin(
org_model,
sharded_model,
sharded_optimizer,
data_gen_fn,
output_transform_fn,
criterion,
booster)
stage_manager = booster.plugin.stage_manager
tp_group = booster.plugin.tp_group
# unwrap model
vit_model = unwrap_model(org_model, 'ViTModel', 'vit')
shard_vit_model = unwrap_model(sharded_model, 'ViTModel', 'vit')
# check grad
row_layer_for_check = ['encoder.layer[0].attention.attention.query', 'embeddings.patch_embeddings.projection']
col_layer_for_check = ['encoder.layer[0].attention.output.dense']
# Save gradient tensors for comparison between the original model and the sharded model before optimizer step.
grads_to_check = {}
if (stage_manager is None or stage_manager.is_first_stage()) and booster.plugin.zero_stage == 0:
if test_config['precision'] == 'fp32':
atol, rtol = 1e-5, 1e-3
else:
atol, rtol = 5e-3, 5e-3
row_layer_grads = get_grad_tensors_for_check(vit_model,
shard_vit_model,
row_layer_for_check,
tp_group,
atol=atol,
rtol=rtol,
dim=0,
verbose=False)
col_layer_grads = get_grad_tensors_for_check(vit_model,
shard_vit_model,
col_layer_for_check,
tp_group,
atol=atol,
rtol=rtol,
dim=1,
verbose=False)
grads_to_check.update(col_layer_grads)
grads_to_check.update(row_layer_grads)
# optimizer executes step
org_optimizer.step()
sharded_optimizer.step()
# check last hidden state & loss
if stage_manager is None or stage_manager.is_last_stage():
if test_config['precision'] == 'fp32':
atol, rtol = 1e-5, 1e-3
else:
atol, rtol = 5e-3, 5e-3
if org_model.__class__.__name__ == 'ViTModel':
check_output_hidden_state(org_output, sharded_output, stage_manager, atol=atol, rtol=rtol)
check_loss(org_loss, sharded_loss, atol=atol, rtol=rtol)
# check weights
if stage_manager is None or stage_manager.is_first_stage():
if test_config['precision'] == 'fp32':
atol, rtol = 5e-3, 1e-3
else:
atol, rtol = 5e-3, 5e-3
check_weight(vit_model,
shard_vit_model,
col_layer_for_check,
tp_group,
atol=atol,
rtol=rtol,
dim=1,
verbose=False)
# check grads
check_all_grad_tensors(grads_to_check)
torch.cuda.empty_cache()<|fim_middle|>check_forward_backward<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return True<|fim_middle|>has_product_component<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
for i in range(3):
self.op.setConstant(i)
aimg = self.renderScene(self.scene, "/tmp/a_%03d.png" % i)
assert numpy.all(aimg[:, :, 0] == i), "!= %d, [0,0,0]=%d" % (i, aimg[0, 0, 0])
self.op.setConstant(42)
self.op.setDelay(1)
aimg = self.renderScene(self.scene, joinRendering=False, exportFilename="/tmp/x_%03d.png" % i)
# this should be "i", not 255 (the default background for the imagescene)
assert numpy.all(aimg[:, :, 0] == i), "!= %d, [0,0,0]=%d" % (i, aimg[0, 0, 0])
# Now give the scene time to update before we change it again...
self.scene.joinRenderingAllTiles(viewport_only=False)<|fim_middle|>test_lazy<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Do not make a request if the archive format is unsupported."""
self.instance.archive(format="clearly fake")
assert self.session.get.called is False<|fim_middle|>test_unsupported_archive<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self, app):
await self.session.close()<|fim_middle|>on_cleanup<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(plugin, item_id, category_title, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_SHOWS)
root = resp.parse("div",
attrs={"id": "accordion-horizontal"})
for category_datas in root.iterfind("./div"):
if category_title in category_datas.find(".//h2").text:
for sub_category_datas in category_datas.findall(".//div[@class='entry-section clearfix']"):
sub_category_title = sub_category_datas.find("./p/strong").text
item = Listitem()
item.label = sub_category_title
item.set_callback(list_programs,
item_id=item_id,
category_title=category_title,
sub_category_title=sub_category_title)
item_post_treatment(item)
yield item<|fim_middle|>list_sub_categories<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request, lccn, date, edition, sequence):
"""
api/chronam/lccn/<date>/ed-<edition>/seq-<sequence>.json
Retrieve a page's info
"""
try:
_year, _month, _day = date.split("-")
_date = datetime.date(int(_year), int(_month), int(_day))
title = models.Title.objects.get(lccn=lccn)
issue = title.issues.filter(date_issued=_date, edition=edition).order_by("-created").first()
METHOD_NAME = issue.pages.filter(sequence=int(sequence)).order_by("-created").first()
except ValueError as e:
return JsonResponse({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)
except (AttributeError, IndexError, ObjectDoesNotExist):
return JsonResponse({'detail': 'Page does not exist'}, status=status.HTTP_404_NOT_FOUND)
if METHOD_NAME is None:
return JsonResponse({'detail': 'Page does not exist'}, status=status.HTTP_404_NOT_FOUND)
serializer = rest_serializers.PageSerializer(METHOD_NAME, context={'request': request})
return JsonResponse(serializer.data, safe=False)<|fim_middle|>page<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# Mutating attribute with a Tensor type
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.Tensor(3, 2)
def forward(self, x):
self.a = self.a.to(torch.float64)
return x.sum() + self.a.sum()
self.check_failure_on_export(Foo(), torch.Tensor(3, 2))<|fim_middle|>test_module_attribute_mutation_violation_positive_1<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return [
{
"Value": "/path/to/certs/testsite1.crt",
"Key": "test-shared/sites/testsite1/ssl/certs/SSLCertificateFile",
},
{
"Value": "/path/to/certs/testsite1.key",
"Key": "test-shared/sites/testsite1/ssl/certs/SSLCertificateKeyFile",
},
{"Value": None, "Key": "test-shared/sites/testsite1/ssl/certs/"},
{"Value": "True", "Key": "test-shared/sites/testsite1/ssl/force"},
{"Value": None, "Key": "test-shared/sites/testsite1/ssl/"},
{
"Value": "salt://sites/testsite1.tmpl",
"Key": "test-shared/sites/testsite1/template",
},
{"Value": "test.example.com", "Key": "test-shared/sites/testsite1/uri"},
{"Value": None, "Key": "test-shared/sites/testsite1/"},
{"Value": None, "Key": "test-shared/sites/"},
{"Value": "Test User", "Key": "test-shared/user/full_name"},
{"Value": "adm\nwww-data\nmlocate", "Key": "test-shared/user/groups"},
{"Value": '"adm\nwww-data\nmlocate"', "Key": "test-shared/user/dontsplit"},
{"Value": "yaml:\n key: value\n", "Key": "test-shared/user/dontexpand"},
{"Value": None, "Key": "test-shared/user/blankvalue"},
{"Value": "test", "Key": "test-shared/user/login"},
{"Value": None, "Key": "test-shared/user/"},
]<|fim_middle|>base_pillar_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"Vertical polarization should decay in (cos2θ)**2 in vertical plane and no correction in horizontal one"
self.assertTrue(abs(self.ai.polarization(factor=-1)[6] - numpy.ones(13)).max() == 0, "No correction in the horizontal plane")
self.assertTrue(abs(self.ai.polarization(factor=-1)[:, 6] - (numpy.cos((2 * self.rotX)) + 1) / 2).max() < self.epsilon, "cos(2th)^2 like in the verical plane")<|fim_middle|>test_vert_pol<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, data):
"""Handle a pong message."""
self.last_pong = ioloop.IOLoop.current().time()<|fim_middle|>on_pong<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""
This helps trim the library a bit by excluding rates with
products with more protons than the endpoint, heavier than the
endpoint, or with relatively high or low neutron percentages.
"""
# Proton number bounds
Zlo, Zhi = 6, endpoint.Z
# Nucleon number bounds
Alo, Ahi = 12, endpoint.A
# Bounds on A / Z ratio to drop peripheral nuclei
Rlo, Rhi = 1.69, 2.2
def limit_products(r):
meet_conds = \
(
(Zlo <= p.Z <= Zhi and
Alo <= p.A <= Ahi and
Rlo <= p.A / p.Z <= Rhi and
(p.N, p.Z) in bintable.energies) or
(p.Z, p.A) == (1, 1) or
(p.Z, p.A) == (2, 4)
for p in r.products
)
return all(meet_conds)
return limit_products<|fim_middle|>product_limiter<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, y):<|fim_middle|>inverse_log_det_jacobian<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Path to the environment file"""
return self._env_file<|fim_middle|>env_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, load_system_with_callbacks):
scwc = load_system_with_callbacks
# Don't register this rod
mock_rod = self.MockRod(2, 3, 4, 5)
with pytest.raises(ValueError) as excinfo:
scwc.collect_diagnostics(mock_rod)
assert "was not found, did you" in str(excinfo.value)<|fim_middle|>test_callback_with_unregistered_system_throws<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(voigt_notation):
return voigt_notation_to_stiffness_tensor(
np.divide(voigt_notation, voigt_compliance_factors)
)<|fim_middle|>voigt_notation_to_compliance_tensor<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Get a valid Bearer token for the Name Request Service."""
token_url = current_app.config.get('NAMEX_AUTH_SVC_URL')
client_id = current_app.config.get('NAMEX_SERVICE_CLIENT_USERNAME')
client_secret = current_app.config.get('NAMEX_SERVICE_CLIENT_SECRET')
data = 'grant_type=client_credentials'
# get service account token
res = requests.post(url=token_url,
data=data,
headers={'content-type': 'application/x-www-form-urlencoded'},
auth=(client_id, client_secret))
try:
return res.json().get('access_token')
except Exception: # noqa B902; pylint: disable=W0703;
logger.error('Failed to get nr token')
return None<|fim_middle|>get_nr_bearer_token<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
METHOD_NAME = abstracts_settings.get(self.event, 'announcement')
render_mode = abstracts_settings.get(self.event, 'announcement_render_mode')
return RENDER_MODE_WRAPPER_MAP[render_mode](METHOD_NAME)<|fim_middle|>announcement<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self, field, value, idx_start, idx_end=None):<|fim_middle|>set<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
self.optim_step_progress.increment_ready()
self.trainer.profiler.start("optimizer_step")<|fim_middle|>on_before_step<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertEqual(
format_exception(
"Hello world, this is a very long message that will "
"inevitably wrap onto another line."
),
"Hello world, this is a very long message that will\n"
" inevitably wrap onto another line.",
)<|fim_middle|>test_long_basic_message<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return [
self.InputItem(
name=_("业务 ID"),
key="biz_cc_id",
type="string",
schema=StringItemSchema(description=_("当前操作所属的 CMDB 业务 ID")),
),
self.InputItem(
name=_("定时作业名称"),
key="job_cron_name",
type="string",
schema=StringItemSchema(description=_("待创建的定时作业名称")),
),
self.InputItem(
name=_("定时规则"),
key="job_cron_expression",
type="string",
schema=StringItemSchema(description=_("待创建的定时作业定时规则")),
),
self.InputItem(
name=_("定时作业状态"),
key="job_cron_status",
type="int",
schema=IntItemSchema(description=_("待创建的定时作业状态,暂停(1) 启动(2)"), enum=[1, 2]),
),
]<|fim_middle|>inputs_format<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, organization: RpcOrganization, data: Mapping[str, str]
) -> Mapping[str, Any]:
return {
"name": data["name"],
"external_id": data["external_id"],
"url": data["url"],
"config": {
"instance": data["instance"],
"project": data["project"],
"name": data["name"],
},
"integration_id": data["installation"],
}<|fim_middle|>build_repository_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, source, mapped, Model):
for field in Model._meta.fields:
column = field.column
if hasattr(source, column):
if Model in self.overrides and column in self.overrides[Model]:
self.assertEqual(self.overrides[Model][column], getattr(mapped, column))
else:
self.assertEqual(getattr(source, column), getattr(mapped, column))<|fim_middle|>assert_model<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
policy = {"timeout": 0.1}
user = "example-test"
try:
self.client.admin_query_user(user, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"<|fim_middle|>test_query_user_with_invalid_timeout_policy<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, df):
"""
Generic data loader function
:param df: Input tensor
:return: Returns the constructed dataloader
"""
return DataLoader(
df, batch_size=self.args["batch_size"], num_workers=self.args["num_workers"]
)<|fim_middle|>create_data_loader<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, environment=None):
if self._sys_path is not None:
return self._sys_path
# The sys path has not been set explicitly.
if environment is None:
environment = self.get_environment()
sys_path = list(environment.get_sys_path())
try:
sys_path.remove('')
except ValueError:
pass
return sys_path<|fim_middle|>get_base_sys_path<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(n_messages, messages, p_response, app_data):
"""
Simple conversation function that responds to any
prompt where the echo is off with the supplied password
"""
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0<|fim_middle|>my_conv<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(theyear: int, w: int = 2, l: int = 1, c: int = 6, m: int = 3) -> str: ...<|fim_middle|>calendar<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(temp_salt_minion, tmp_path):
opts = temp_salt_minion.config.copy()
opts["master_uri"] = "tcp://127.0.0.1:12323"
grains = salt.loader.grains(opts)
pillar = salt.pillar.RemotePillar(
opts,
grains,
temp_salt_minion.id,
"base",
)
mock = MagicMock()
mock.side_effect = salt.exceptions.SaltReqTimeoutError()
pillar.channel.crypted_transfer_decode_dictentry = mock
with pytest.raises(salt.exceptions.SaltClientError):
pillar.compile_pillar()<|fim_middle|>test_remote_pillar_timeout<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(num: int):
if num < 1:
raise ValueError("test")
METHOD_NAME(num - 1)<|fim_middle|>exception_fun<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, aligned):
a = self._get_array(2e300, aligned)
with pytest.raises(TypeError,
match="error raised inside the core-loop: non-finite factor!"):
a.astype(SF(2e-300))<|fim_middle|>test_sfloat_cast_internal_errors<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(incident_comment_id: Optional[str] = None,
incident_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIncidentCommentResult:
"""
Gets an incident comment.
:param str incident_comment_id: Incident comment ID
:param str incident_id: Incident ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['incidentCommentId'] = incident_comment_id
__args__['incidentId'] = incident_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230701preview:getIncidentComment', __args__, opts=opts, typ=GetIncidentCommentResult).value
return AwaitableGetIncidentCommentResult(
author=pulumi.get(__ret__, 'author'),
created_time_utc=pulumi.get(__ret__, 'created_time_utc'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
last_modified_time_utc=pulumi.get(__ret__, 'last_modified_time_utc'),
message=pulumi.get(__ret__, 'message'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))<|fim_middle|>get_incident_comment<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls):
return "%s.%s" % (cls.__module__, cls.__name__)<|fim_middle|>strclass<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(m, init_func, obj):
if len(list(m.children())) > 0:
if m == obj:
return
METHOD_NAME(m, init_func, m)
else:
try:
init_func(m)
except Exception as e:
print('initialize layer {} failed, exception is :{}'.format(m, e))<|fim_middle|>recursive_init<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""The main function. Hier spielt die Musik.
"""
# parse the command line, exit with UNKNOWN if it fails
try:
args = parse_args()
except SystemExit:
sys.exit(STATE_UNKNOWN)
# fetch data
if args.TEST is None:
result = lib.base.coe(lib.url.fetch_json(
args.URL + '/info/info.json', timeout=args.TIMEOUT
))
else:
# do not call the command, put in test data
stdout, stderr, retc = lib.test.test(args.TEST)
try:
result = json.loads(stdout)
except:
lib.base.cu('ValueError: No JSON object could be decoded')
# init some vars
msg = ''
state = STATE_OK
perfdata = ''
# get thresholds automatically
max_conn = result['licenseInfo']['connections']
max_conn_warn = max_conn * 0.9
max_conn_crit = max_conn * 0.95
# build the message and perfdata
msg += 'Max {} {}, '.format(max_conn, lib.txt.pluralize('connection', max_conn))
# license; expired?
now = lib.time.now(as_type='iso')
msg += 'licensed' if result['licenseInfo']['hasLicense'] else 'no license'
if result['licenseInfo']['endDate']:
if result['licenseInfo']['endDate'][0:10] <= now[0:10]:
msg += ' (expired) [WARNING]'
state = lib.base.get_worst(STATE_WARN, state)
expires_in = lib.time.timestrdiff(result['licenseInfo']['endDate'][0:10], now[0:10], pattern1='%Y-%m-%d', pattern2='%Y-%m-%d')
if expires_in <= 10 * 24 * 60 * 60:
msg += ' (expires in {}) [WARNING]'.format(lib.human.seconds2human(expires_in))
state = lib.base.get_worst(STATE_WARN, state)
msg += ', '
view_state = lib.base.get_state(result['connectionsStat']['hour']['view']['max'], max_conn_warn, max_conn_crit, _operator='ge')
state = lib.base.get_worst(view_state, state)
msg += 'last hour: {}/{}/{} views{} '.format(
result['connectionsStat']['hour']['view']['min'],
result['connectionsStat']['hour']['view']['avr'],
result['connectionsStat']['hour']['view']['max'],
lib.base.state2str(view_state, prefix=' ')
)
perfdata += lib.base.get_perfdata('conn_hour_view_min', result['connectionsStat']['hour']['view']['min'], None, max_conn_warn, max_conn_crit, 0, result['licenseInfo']['connections'])
perfdata += lib.base.get_perfdata('conn_hour_view_avr', result['connectionsStat']['hour']['view']['avr'], None, max_conn_warn, max_conn_crit, 0, result['licenseInfo']['connections'])
perfdata += lib.base.get_perfdata('conn_hour_view_max', result['connectionsStat']['hour']['view']['max'], None, max_conn_warn, max_conn_crit, 0, result['licenseInfo']['connections'])
edit_state = lib.base.get_state(result['connectionsStat']['hour']['edit']['max'], max_conn_warn, max_conn_crit, _operator='ge')
state = lib.base.get_worst(edit_state, state)
msg += 'and {}/{}/{} edits{} (min/avr/max), '.format(
result['connectionsStat']['hour']['edit']['min'],
result['connectionsStat']['hour']['edit']['avr'],
result['connectionsStat']['hour']['edit']['max'],
lib.base.state2str(edit_state, prefix=' ')
)
perfdata += lib.base.get_perfdata('conn_hour_edit_min', result['connectionsStat']['hour']['edit']['min'], None, max_conn_warn, max_conn_crit, 0, result['licenseInfo']['connections'])
perfdata += lib.base.get_perfdata('conn_hour_edit_avr', result['connectionsStat']['hour']['edit']['avr'], None, max_conn_warn, max_conn_crit, 0, result['licenseInfo']['connections'])
perfdata += lib.base.get_perfdata('conn_hour_edit_max', result['connectionsStat']['hour']['edit']['max'], None, max_conn_warn, max_conn_crit, 0, result['licenseInfo']['connections'])
if 'usersInfo' in result:
msg += '{} unique {}, '.format(result['usersInfo']['uniqueUserCount'], lib.txt.pluralize('user', result['usersInfo']['uniqueUserCount']))
perfdata += lib.base.get_perfdata('unique_users', result['usersInfo']['uniqueUserCount'], None, None, None, 0, None)
msg += 'v{}, '.format(result['serverInfo']['buildVersion'])
# over and out
lib.base.oao(msg[:-2], state, perfdata)<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(table, column, profiling_result) -> RecommendedAssertion:
if column is None:
return None
schema_type = profiling_result['tables'][table]['columns'][column].get('schema_type')
if schema_type is None:
return None
test_function_name = 'assert_column_schema_type'
assertion_values = {
'schema_type': schema_type
}
assertion = RecommendedAssertion(test_function_name, None, assertion_values)
return assertion<|fim_middle|>recommended_column_schema_type_assertion<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(rng, dt, sr):
ns = T.NS[dt][sr]
ok = True
attdet = AttackDetector(dt, sr)
state_c = initial_state()
x_c = np.zeros(ns+6)
for run in range(100):
### Generate noise, and an attack at random point
x = ((2 * rng.random(ns)) - 1) * (2 ** 8 - 1)
x[(ns * rng.random()).astype(int)] *= 2 ** 7
### Check Implementation
f_att = attdet.run(100, x)
x_c = np.append(x_c[-6:], x)
f_att_c = lc3.attdet_run(dt, sr, 100, state_c, x_c)
ok = ok and f_att_c == f_att
ok = ok and np.amax(np.abs(1 - state_c['en1']/attdet.en1)) < 2
ok = ok and np.amax(np.abs(1 - state_c['an1']/attdet.an1)) < 2
ok = ok and state_c['p_att'] == attdet.p_att
return ok<|fim_middle|>check_unit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dirpath: str, privkey_pem: bytes, cert_pem: bytes,
entropy: bytes, counter: int) -> None:
"""
Write the setup directory.
Args:
dirpath: The directory path.
key_pem: The private key PEM.
cert_pem: The certificate PEM.
entropy: The 48 bytes of entropy.
counter: The counter value.
"""
# Directory
os.mkdir(dirpath)
# Private key
with open(f'{dirpath}/private-key.pem', 'bw') as f:
f.write(privkey_pem)
# Certificate
with open(f'{dirpath}/certificate.pem', 'bw') as f:
f.write(cert_pem)
# Entropy
with open(f'{dirpath}/entropy', 'wb') as f:
f.write(entropy)
# Counter
with open(f'{dirpath}/counter', 'w') as f:
f.write(f'{str(counter)}\n')<|fim_middle|>write_setup_dir<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cnt_viewmodel):
"""
Dispalays information about all the services of container.
:param cnt_viewmodel: ContainerViewModel: contains view info about container
:return: None
"""
io.echo('Platform:', cnt_viewmodel.soln_stk)
for i, service_info in enumerate(cnt_viewmodel.service_infos):
_print_service_details(service_info)
if i != len(cnt_viewmodel.service_infos) - 1:
io.echo()<|fim_middle|>print_container_details<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>( self ):<|fim_middle|>test_include_cache_cached_new_mtime<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(_):
return '[Ping Devices]'<|fim_middle|>parse_ping_devices<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(micros: int, logical: int) -> int:
return (micros << HybridTime.kBitsForLogicalComponent) | logical<|fim_middle|>repr_from_micros_and_logical<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.settings.arch in ["x86", "x86_64"]<|fim_middle|>has_sse_support<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(enums, item):
for en in enums:
if item in en:
value = en[1] if wx4 else en[0]
return value<|fim_middle|>find_enum<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")<|fim_middle|>kind<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls):
cls.use_temp_region()
cls.runModule("g.region", raster="elevation")<|fim_middle|>set_up_class<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, ufunc, stride_in0, stride_in1,
stride_out, dtype):
ufunc(self.ufunc_args[0], self.ufunc_args[1][0], *self.ufunc_args[2:])<|fim_middle|>time_binary_scalar_in1<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(xmlfile, operation_model, expected):
response_body = _get_raw_response_body(xmlfile)
response = {'body': response_body, 'status_code': 200, 'headers': {}}
for case in SPECIAL_CASES:
if case in xmlfile:
print("SKIP: %s" % xmlfile)
return
if 'errors' in xmlfile:
response['status_code'] = 400
# Handle the special cased __headers__ key if it exists.
if b'__headers__' in response_body:
loaded = json.loads(response_body.decode('utf-8'))
response['headers'] = loaded.pop('__headers__')
response['body'] = json.dumps(loaded).encode('utf-8')
protocol = operation_model.service_model.protocol
parser_cls = parsers.PROTOCOL_PARSERS[protocol]
parser = parser_cls(timestamp_parser=lambda x: x)
parsed = parser.parse(response, operation_model.output_shape)
parsed = _convert_bytes_to_str(parsed)
expected['ResponseMetadata']['HTTPStatusCode'] = response['status_code']
expected['ResponseMetadata']['HTTPHeaders'] = response['headers']
d2 = parsed
d1 = expected
if d1 != d2:
log.debug('-' * 40)
log.debug("XML FILE:\n" + xmlfile)
log.debug('-' * 40)
log.debug("ACTUAL:\n" + pprint.pformat(parsed))
log.debug('-' * 40)
log.debug("EXPECTED:\n" + pprint.pformat(expected))
if not d1 == d2:
# Borrowed from assertDictEqual, though this doesn't
# handle the case when unicode literals are used in one
# dict but not in the other (and we want to consider them
# as being equal).
print(d1)
print()
print(d2)
pretty_d1 = pprint.pformat(d1, width=1).splitlines()
pretty_d2 = pprint.pformat(d2, width=1).splitlines()
diff = '\n' + '\n'.join(difflib.ndiff(pretty_d1, pretty_d2))
raise AssertionError("Dicts are not equal:\n%s" % diff)<|fim_middle|>test_parsed_response<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(
src: StrOrBytesPath,
dst: StrOrBytesPath,
*,
src_dir_fd: int | None = ...,
dst_dir_fd: int | None = ...,
follow_symlinks: bool = ...,
loop: AbstractEventLoop | None = ...,
executor: Any = ...,
) -> None: ...<|fim_middle|>link<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(using: str, max_csv_amount: int = 30) -> None:
"""
rotate csv zip file in the {now:%Y} folder
:returns:
"""
storage = _get_storage_backend(using=using)
now = timezone.now()
src_folder = f'{storage.location}/{now:%Y}'
if os.path.exists(src_folder):
list_of_files = glob(f'{src_folder}/**/*.zip', recursive=True)
if len(list_of_files) > max_csv_amount:
list_of_files.sort(key=os.path.getmtime)
for file_to_be_deleted in list_of_files[:len(list_of_files) - max_csv_amount]:
os.remove(file_to_be_deleted)<|fim_middle|>rotate_zip_files<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Returns the description for this plugin. This is shown on the plugin configuration
page.
>>> plugin.get_description()
"""
return self.description<|fim_middle|>get_description<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(data, filename):
return _problem_directory_file(data.problem.code, filename)<|fim_middle|>problem_directory_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, exe, args, csv_enabled):
self.exe_path = exe
self.exe_args = args
self.has_csv = csv_enabled<|fim_middle|>set_command<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(bytes_io, output_folder_name):
with ZipFile(bytes_io) as zf:
if output_folder_name:
os.makedirs(output_folder_name, exist_ok=True)
zf.extractall(output_folder_name)
os.remove(os.path.join(output_folder_name, 'metadata_v0.json'))
os.rename(
os.path.join(output_folder_name, 'metadata_v1.json'),
os.path.join(output_folder_name, METADATA_FILENAME)
)
else:
in_memory_directory = {}
for name in zf.namelist():
in_memory_directory[name] = zf.read(name)
return in_memory_directory<|fim_middle|>extract_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args: List[str]) -> Optional[Dict[str, Any]]:
try:
opts, items = parse_broadcast_args(args[1:])
except SystemExit as e:
if e.code != 0:
print(e.args[0], file=sys.stderr)
input(_('Press Enter to quit'))
return None
sys.stdout.flush()
loop = Loop()
handler = Broadcast(opts, items)
try:
loop.loop(handler)
finally:
if handler.session_started:
sys.stdout.buffer.write(session_command(handler.payload, False))
sys.stdout.buffer.flush()
return None<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls) -> List[str]:
"""
Return a list of metadata fields that can be used to order an entity
list.
"""
return ["duration", "publication_date"]<|fim_middle|>get_allowed_meta_order_fields<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, combined_system, molecule_name):
combined_system.remove_molecule_type(molecule_name)
# Just a sanity check
combined_system.to_files(prefix=molecule_name, decimal=8)
get_intermol_defaults(periodic=True).write_mdp_file("tmp.mdp")
_process(
_run_gmx_energy(f"{molecule_name}.top", f"{molecule_name}.gro", "tmp.mdp"),
)<|fim_middle|>test_remove_basic<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return 1<|fim_middle|>eccentricity<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
client, phase_factory, idea_factory, user, category_factory, organisation
):
phase, module, project, idea = setup_phase(
phase_factory, idea_factory, phases.IssuePhase
)
category = category_factory(module=module)
url = reverse(
"a4_candy_ideas:idea-create",
kwargs={"organisation_slug": organisation.slug, "module_slug": module.slug},
)
with freeze_phase(phase):
client.login(username=user.email, password="password")
response = client.get(url)
assert_template_response(response, "a4_candy_ideas/idea_create_form.html")
idea = {
"name": "Idea",
"description": "description",
"category": category.pk,
"organisation_terms_of_use": True,
}
response = client.post(url, idea)
assert redirect_target(response) == "idea-detail"<|fim_middle|>test_create_view<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return {group: {}}<|fim_middle|>setup_loader_modules<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
if self.mt_dict is None:
print "Cannot save GMT, please read a ModEM data and/or response file first"
self.data_array=np.zeros((len(self.mt_dict), 6))
for i in range(len(self.mt_dict)):
for ii, att in enumerate(['lon', 'lat', self.colorby, 'azimuth', 'phimin', 'phimax', 'skew']):
self.data_array[i, ii]=getattr(self.mt_dict[i], att)<|fim_middle|>build_data_array<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, available_ports):
"""
Combines all ports that should be added to the
Dockerfile into one array
"""
port_list = []
for p in available_ports:
if p.get("expose", True):
port_list.append(p.get("value"))
return port_list<|fim_middle|>ports<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
data = self.load_hdf5_file(self.events_file)
data = self.calc_rho36(data)
if any(key.startswith("unc_est") for key in self.cuts):
data = self.calc_uncertainties(data)
data = self.apply_cuts(data, self.cuts)
for name in self.output_names:
container = Container(name)
nubar = -1 if "bar" in name else 1
if "e" in name:
flav = 0
n_files = int(self.files_per_flavor[0])
if "mu" in name:
flav = 1
n_files = int(self.files_per_flavor[1])
if "tau" in name:
flav = 2
n_files = int(self.files_per_flavor[2])
pdg = nubar * (12 + 2 * flav)
mask = data["pdg_id"] == pdg
if "cc" in name:
mask = np.logical_and(mask, data["interaction_type"] == 1)
else:
mask = np.logical_and(mask, data["interaction_type"] == 2)
events = {key: value[mask] for key, value in data.items()}
weight_dict = events["I3MCWeightDict"]
primary = events["MCInIcePrimary"]
container["true_energy"] = primary["energy"].astype(FTYPE)
container["true_coszen"] = np.cos(primary["zenith"]).astype(FTYPE)
container["pdg_code"] = primary["pdg_encoding"].astype(FTYPE)
container["interaction"] = weight_dict["InteractionType"].astype(FTYPE)
CM2_TO_M2 = 1e-4
derived_weight = (
CM2_TO_M2
* weight_dict["OneWeight"]
/ n_files
/ weight_dict["gen_ratio"]
/ weight_dict["NEvents"]
)
container["weighted_aeff"] = derived_weight.astype(FTYPE)
reco = self.reco
reco_total_energy = (
events[f"{reco}_cascade_energy"] + events[f"{reco}_track_energy"]
)
container["reco_energy"] = reco_total_energy.astype(FTYPE)
container["reco_coszen"] = np.cos(events[f"{reco}_zenith"]).astype(FTYPE)
container["reco_z"] = events[f"{reco}_z"].astype(FTYPE)
container["reco_rho"] = events["rho_36"].astype(FTYPE)
if self.track_E_cut is None:
container["pid"] = events["L7_PIDClassifier_ProbTrack"].astype(FTYPE)
else:
pid = events[f"{reco}_track_energy"] > float(self.track_E_cut)
container["pid"] = pid.astype(FTYPE)
container["weights"] = np.ones(container.size, dtype=FTYPE)
container["initial_weights"] = np.ones(container.size, dtype=FTYPE)
container.set_aux_data("nubar", nubar)
container.set_aux_data("flav", flav)
self.data.add_container(container)
if len(self.data.names) == 0:
raise ValueError(
"No containers created during data loading for some reason."
)<|fim_middle|>setup_function<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return MonitorAlarmShieldStrategyComponent<|fim_middle|>component_cls<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(head: Object) -> Iterator[Object]:
"""
Iterate over all of the nodes in a list.
:param head: ``struct list_head *``
:return: Iterator of ``struct list_head *`` objects.
"""
head = head.read_()
pos = head.next.read_()
while pos != head:
yield pos
pos = pos.next.read_()<|fim_middle|>list_for_each<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(monkeypatch, clean_env):
patch_platforms(monkeypatch, [])
with pytest.raises(
RuntimeError,
match=r"No Toga backend could be loaded.",
):
_get_platform_factory()<|fim_middle|>test_no_platforms<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, title, file_output):
save_plot_file(file_output, title, self._get_plotly_figs(title))<|fim_middle|>save_to_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
y = [None, 1, 2, None, None, 5, None, 7, None]
filled = _fill_missing_data(y, max_gap_fraction=0.5)
assert filled == [None, 1, 2, 3, 4, 5, 6, 7, None]
filled = _fill_missing_data(y, max_gap_fraction=0.1)
assert filled == [None, 1, 2, None, None, 5, 6, 7, None]<|fim_middle|>test_fill_missing_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args):
tapsets = tapset_dir(args.binary)
if args.verbose:
print("Using tapset dir '%s' for binary '%s'" % (tapsets, args.binary))
def print_probes(verbose, name):
prefix = probe_prefix(args.binary)
offset = len(prefix) + 1
script = prefix + "." + name
if verbose:
print("Listing probes with name '%s'" % script)
proc = subprocess.Popen(["stap", "-I", tapsets, "-l", script],
stdout=subprocess.PIPE,
universal_newlines=True)
out, err = proc.communicate()
if proc.returncode != 0:
print("No probes found, are the tapsets installed in %s" % tapset_dir(args.binary))
sys.exit(1)
for line in out.splitlines():
if line.startswith(prefix):
print("%s" % line[offset:])
if len(args.probes) == 0:
print_probes(args.verbose, "*")
else:
for probe in args.probes:
print_probes(args.verbose, probe)<|fim_middle|>cmd_list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(function, parameters, expected_error, expected_message):
"""
Test `trace` function of `Tracer` class with bad parameters.
"""
with pytest.raises(expected_error) as excinfo:
Tracer.trace(function, parameters)
assert str(excinfo.value) == expected_message<|fim_middle|>test_tracer_bad_trace<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Similar situation as with `mongodb-bin` above but the opposite."""
pkg_name = "guitar-pro"
correct_arch_dep_name = "lib32-portaudio"
self.remove_if_installed(pkg_name, correct_arch_dep_name)
fake_pikaur(f"-S {pkg_name}")
self.assertInstalled(pkg_name)
self.assertInstalled(correct_arch_dep_name)<|fim_middle|>test_aur_rpc_didnt_fully_parsed_srcinfo<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The provisioning state of the mobile network resource.
"""
return pulumi.get(self, "provisioning_state")<|fim_middle|>provisioning_state<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.OperationsListResult"]
"""List available operations.
List the available operations supported by the Microsoft.Maintenance resource provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~maintenance_management_client.models.OperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.MaintenanceError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)<|fim_middle|>list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
t = TransactionMetaData()
# Can't get data that wasn't set:
with self.assertRaises(KeyError) as c:
t.data(self)
self.assertEqual(c.exception.args, (self,))
data = dict(a=1)
t.set_data(self, data)
self.assertEqual(t.data(self), data)
# Can't get something we haven't stored.
with self.assertRaises(KeyError) as c:
t.data(data)
self.assertEqual(c.exception.args, (data,))<|fim_middle|>test_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(subjects_list: Iterable[Subject]) -> None:
# Check that it's an iterable
try:
iter(subjects_list)
except TypeError as e:
message = f'Subject list must be an iterable, not {type(subjects_list)}'
raise TypeError(message) from e
# Check that it's not empty
if not subjects_list:
raise ValueError('Subjects list is empty')
# Check each element
for subject in subjects_list:
if not isinstance(subject, Subject):
message = (
'Subjects list must contain instances of torchio.Subject,'
f' not "{type(subject)}"'
)
raise TypeError(message)<|fim_middle|>parse_subjects_list<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self, connection: SendConnection, req: Subscribe) -> None:
self._check_topics(req.topics)
self._maybe_add_session(connection)
for topic in req.topics:
self._topic_info[topic].subscribers.add(connection.session_id)<|fim_middle|>subscribe<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(h, f):
if h[:4] == '.snd':
f = get_long_be
elif h[:4] in ('\0ds.', 'dns.'):
f = get_long_le
else:
return None
type = 'au'
hdr_size = f(h[4:8])
data_size = f(h[8:12])
encoding = f(h[12:16])
rate = f(h[16:20])
nchannels = f(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
return type, rate, nchannels, data_size//frame_size, sample_bits<|fim_middle|>test_au<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
graph = Graph()
opExport = OpFormattedDataExport(graph=graph)
data = numpy.random.random((100, 100)).astype(numpy.float32) * 100
data = vigra.taggedView(data, vigra.defaultAxistags("xy"))
opExport.Input.setValue(data)
sub_roi = [(10, 0), (None, 80)]
opExport.RegionStart.setValue(sub_roi[0])
opExport.RegionStop.setValue(sub_roi[1])
opExport.ExportDtype.setValue(numpy.uint8)
opExport.InputMin.setValue(0.0)
opExport.InputMax.setValue(100.0)
opExport.ExportMin.setValue(100)
opExport.ExportMax.setValue(200)
opExport.OutputFormat.setValue("hdf5")
opExport.OutputFilenameFormat.setValue(self._tmpdir + "/export_x{x_start}-{x_stop}_y{y_start}-{y_stop}")
opExport.OutputInternalPath.setValue("volume/data")
opExport.TransactionSlot.setValue(True)
assert opExport.ImageToExport.ready()
assert opExport.ExportPath.ready()
assert opExport.ImageToExport.meta.drange == (100, 200)
# print "exporting data to: {}".format( opExport.ExportPath.value )
assert opExport.ExportPath.value == self._tmpdir + "/" + "export_x10-100_y0-80.h5/volume/data"
opExport.run_export()
opRead = OpInputDataReader(graph=graph)
try:
opRead.FilePath.setValue(opExport.ExportPath.value)
# Compare with the correct subregion and convert dtype.
sub_roi[1] = (100, 80) # Replace 'None' with full extent
expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
expected_data = expected_data.astype(numpy.uint8)
expected_data += 100 # see renormalization settings
assert opRead.Output.meta.shape == expected_data.shape
assert opRead.Output.meta.dtype == expected_data.dtype
read_data = opRead.Output[:].wait()
# Due to rounding errors, the actual result and the expected result may differ by 1
# e.g. if the original pixel value was 32.99999999
# Also, must promote to signed values to avoid unsigned rollover
# See issue ( https://github.com/ilastik/lazyflow/issues/165 ).
expected_data_signed = expected_data.astype(numpy.int16)
read_data_signed = expected_data.astype(numpy.int16)
difference_from_expected = expected_data_signed - read_data_signed
assert (numpy.abs(difference_from_expected) <= 1).all(), "Read data didn't match exported data!"
finally:
opRead.cleanUp()<|fim_middle|>test_basic<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, context: RuleContext) -> Optional[LintResult]:
"""Looking for DISTINCT before a bracket.
Look for DISTINCT keyword immediately followed by open parenthesis.
"""
seq = None
anchor = None
children = FunctionalContext(context).segment.children()
if context.segment.is_type("select_clause"):
# Look for `select_clause_modifier`
modifier = children.select(sp.is_type("select_clause_modifier"))
first_element = children.select(sp.is_type("select_clause_element")).first()
expression = (
first_element.children(sp.is_type("expression")).first()
or first_element
)
bracketed = expression.children(sp.is_type("bracketed")).first()
# is the first element only an expression with only brackets?
if modifier and bracketed:
# If there's nothing else in the expression, remove the brackets.
if len(expression[0].segments) == 1:
anchor, seq = self._remove_unneeded_brackets(context, bracketed)
# Otherwise, still make sure there's a space after the DISTINCT.
else:
anchor = modifier[0]
seq = ReflowSequence.from_around_target(
modifier[0],
context.parent_stack[0],
config=context.config,
sides="after",
)
elif context.segment.is_type("function"):
# Look for a function call DISTINCT() whose parent is an expression
# with a single child.
anchor = context.parent_stack[-1]
if not anchor.is_type("expression") or len(anchor.segments) != 1:
return None
function_name = children.select(sp.is_type("function_name")).first()
bracketed = children.first(sp.is_type("bracketed"))
if (
not function_name
or function_name[0].raw_upper != "DISTINCT"
or not bracketed
):
return None
# Using ReflowSequence here creates an unneeded space between CONCAT
# and "(" in the test case test_fail_distinct_concat_inside_count:
# SELECT COUNT(DISTINCT(CONCAT(col1, '-', col2, '-', col3)))
#
# seq = ReflowSequence.from_around_target(
# anchor,
# context.parent_stack[0],
# config=context.config,
# ).replace(
# anchor,
# (KeywordSegment("DISTINCT"), WhitespaceSegment())
# + self.filter_meta(bracketed[0].segments)[1:-1],
# )
# Do this until we have a fix for the above.
return LintResult(
anchor=anchor,
fixes=[
LintFix.replace(
anchor,
(KeywordSegment("DISTINCT"), WhitespaceSegment())
+ self.filter_meta(bracketed[0].segments)[1:-1],
)
],
)
if seq and anchor:
# Get modifications.
fixes = seq.respace().get_fixes()
if fixes:
return LintResult(
anchor=anchor,
fixes=fixes,
)
return None<|fim_middle|>eval<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path):
from pathlib import Path
return Path(__file__).parent / path<|fim_middle|>here<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args=None):
"""Entry point for nuc_data_make utility."""
print(message(pyne_logo))
make_funcs = [
("atomic_mass", make_atomic_mass),
("scattering_lengths", make_scattering_lengths),
("decay", make_decay),
("simple_xs", make_simple_xs),
("cinder", make_cinder),
("materials", make_materials_library),
("q_values", make_q_value),
("dose_factors", make_dose_factors),
("eaf", make_eaf),
("wimsd_fpy", wimsdfpy.make_fpy),
("nds_fpy", ndsfpy.make_fpy),
]
make_map = dict(make_funcs)
make_open = set(
[
"atomic_mass",
"scattering_lengths",
"simple_xs",
"materials",
"wimsd_fpy",
"nds_fpy",
"q_values",
"dose_factors",
]
)
# Parse the command line arguments
parser = argparse.ArgumentParser(description="Make a nuclear data library.")
parser.add_argument(
"-o",
dest="nuc_data",
action="store",
default=nuc_data,
help="path to the output database file.",
)
parser.add_argument(
"-b",
dest="build_dir",
action="store",
default=build_dir,
help="path to the build directory.",
)
parser.add_argument(
"--datapath", dest="datapath", action="store", default="", help="MCNP DATAPATH."
)
parser.add_argument(
"--fetch-prebuilt",
dest="fetch_prebuilt",
action="store",
type=lambda s: "t" in s.lower() or "y" in s.lower(),
default=True,
help="grab partially assembled file [y/n].",
)
parser.add_argument(
"--make-open-only",
dest="make_open_only",
action="store",
type=lambda s: "t" in s.lower() or "y" in s.lower(),
default=False,
help="only add open data to file [y/n].",
)
parser.add_argument(
"-m",
dest="make",
action="store",
default="all",
help="comma-separated parts of nuc_data to make: "
+ ", ".join([mf[0] for mf in make_funcs])
+ ", all, and none.",
)
parser.add_argument(
"--check",
dest="hash_check",
action="store_true",
help="check hashes against built-in ones",
)
parser.add_argument(
"--clean",
dest="clean",
type=int,
default=0,
help="""level to clean up existing files.
0: no cleaning (default).
1: clean nuc_data.
2: clean nuc_data and build_dir.""",
)
args = parser.parse_args(args=args)
# clean nuc data
if args.clean in [1, 2]:
print("Removing nuc_data from {0}".format(args.nuc_data))
try:
os.remove(args.nuc_data)
except OSError:
pass
# Make the build dir
if args.clean == 2 and os.path.exists(args.build_dir):
print("Removing build_dir from {0}".format(args.build_dir))
remove_tree(args.build_dir)
mkpath(args.build_dir)
# Determine what to make
if args.make == "none":
make_order = []
elif args.make == "all":
make_order = [mf[0] for mf in make_funcs]
else:
make_order = args.make.replace(" ", "").split(",")
if args.make_open_only:
make_order = [mo for mo in make_order if mo in make_open]
# fetch prebuilt data library if possible
if args.fetch_prebuilt:
_fetch_prebuilt(args)
# Make the various tables
print("Making nuc_data at {0}".format(args.nuc_data))
for mo in make_order:
make_map[mo](args)
if args.hash_check:
print("Checking hashes")
result = check_hashes(args.nuc_data)
print("Results:")
badsum = False
for name, value in result:
if value:
print(" node " + name + " checksum matches")
else:
badsum = True
print(" node " + name + " checksum doesn't match!!")
if badsum is True:
print(
"""You may need to try building the data from scratch using:\n
nuc_data_make --fetch-prebuilt False
"""
)<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
parser = argparse.ArgumentParser(description="MiSoC port to the Papilio Pro")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
soc = BaseSoC(**soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build()<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(filter_dir=False, r_list=False):
res = _get_dir_source_name(ConfigLoader.get_items_dir(), filter_dir=filter_dir)
if res:
if r_list:
res = list(res)
return res
else:
return []<|fim_middle|>get_all_items_sources<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self: "T") -> "T":
raise NotImplementedError<|fim_middle|>find_debug<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, operation, value):
self.changes.append((operation, value))<|fim_middle|>apply_changelog_event<|file_separator|> |