text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(self, p, n):
"""
Neuman boundary condition
Parameters
----------
p: (NQ, NE, 2)
n: (NE, 2)
grad*n : (NQ, NE, 2)
"""
grad = self.gradient(p) # (NQ, NE, 2)
val = np.sum(grad*n, axis=-1) # (NQ, NE)
return val<|fim_middle|>neumann<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Retrieve the CA's files location"""
# Grid-Security
retVal = gConfig.getOption(f"{g_SecurityConfPath}/Grid-Security")
if retVal["OK"]:
casPath = f"{retVal['Value']}/certificates"
if os.path.isdir(casPath):
return casPath
# CAPath
retVal = gConfig.getOption(f"{g_SecurityConfPath}/CALocation")
if retVal["OK"]:
casPath = retVal["Value"]
if os.path.isdir(casPath):
return casPath
# Look up the X509_CERT_DIR environment variable
if "X509_CERT_DIR" in os.environ:
casPath = os.environ["X509_CERT_DIR"]
return casPath
# rootPath./etc/grid-security/certificates
casPath = f"{DIRAC.rootPath}/etc/grid-security/certificates"
if os.path.isdir(casPath):
return casPath
# /etc/grid-security/certificates
casPath = "/etc/grid-security/certificates"
if os.path.isdir(casPath):
return casPath
# No CA's location found
return False<|fim_middle|>get_c_as_location<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
export_conandata_patches(self)<|fim_middle|>export_sources<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(codelist, column):
assert codelist.has_categories
by_category = {}
for code, category in codelist:
by_category.setdefault(category, []).append(code)
clauses = "\n".join(
[
f"WHEN {column} IN ({codelist_to_sql(codes)}) THEN {quote(category)}"
for category, codes in by_category.items()
]
)
return f"""
CASE
{clauses}
END
"""<|fim_middle|>categorised_codelist_to_case_expression<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.G_ = []
for i in range(len(self.G)):
self.G_.append([])
for j in range(len(self.G[i])):
self.G_[i].append([k for k in self.G[i][j] if not isinstance(k, AttributeRule)])<|fim_middle|>copy_grammar_without_attrs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, sym: str):
return self._idx[sym]<|fim_middle|>index<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(header, label):
for i in range(len(header)):
if label == header[i]:
return i
else:
return -1<|fim_middle|>get_csv_index<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self):
"""
Make RPC calls to 'GetServiceInfo' functions of other services, to
get current status.
"""
for service in list(self._service_info):
# Check whether service provides service303 interface
if service in self._config['non_service303_services']:
continue
try:
chan = ServiceRegistry.get_rpc_channel(
service, ServiceRegistry.LOCAL,
)
except ValueError:
# Service can't be contacted
logging.error('Cant get RPC channel to %s', service)
continue
client = Service303Stub(chan)
try:
future = client.GetServiceInfo.future(
Void(),
self.GET_STATUS_TIMEOUT,
)
info = await grpc_async_wrapper(future, self._loop)
self._service_info[service].update(
info.start_time_secs,
info.status,
)
self._service_info[service].continuous_timeouts = 0
except grpc.RpcError as err:
logging.error(
"GetServiceInfo Error for %s! [%s] %s",
service,
err.code(),
err.details(),
extra=EXCLUDE_FROM_ERROR_MONITORING if indicates_connection_error(err) else None,
)
self._service_info[service].continuous_timeouts += 1<|fim_middle|>get_service_info<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args: Arguments) -> None:
log_file_path = Path(args.log_name).resolve()
if not log_file_path.exists() or not log_file_path.is_file():
raise CLIError(f"Log does not exist: {log_file_path}")
ignore_users = []
if args.ignore_users:
ignore_users_path = Path(args.ignore_users).resolve()
if not ignore_users_path.exists() or not ignore_users_path.is_file():
raise CLIError(f"Ignore users file does not exist: {ignore_users_path}")
with ignore_users_path.open("r", encoding="utf-8") as f:
ignore_users = [line.strip() for line in f]
n = 0
ips: Dict[str, Dict[str, Dict[str, Any]]] = {}
users: Dict[str, Dict[str, Any]] = {}
with log_file_path.open("r", encoding="utf-8") as f:
for line in f:
# s = line.rstrip("\n")
n += 1
# print(n, s)
dt, user, op = get_time_user_op_rights(line)
if dt and user:
if user in ignore_users:
continue
usr = users.get(user)
if not usr:
usr = {"add": "", "rem": "", "ips": {}}
users[user] = usr
if op == "added":
usr["add"] = dt
if op == "removed":
usr["rem"] = dt
continue
dt, ip, user = get_time_ip_and_user(line)
if not dt or not ip or not user:
continue
if user in ignore_users:
continue
# print(ip, user)
if not ips.get(ip):
ips[ip] = {}
usr = ips[ip].get(user)
if not usr:
usr = {"n": 0, "first": dt}
ips[ip][user] = usr
usr["n"] += 1
usr["last"] = dt
usr = users.get(user)
if not usr:
usr = {"add": "", "rem": "", "ips": {}}
users[user] = usr
uip = usr["ips"].get(ip)
if not uip:
uip = {"n": 0, "first": dt}
usr["ips"][ip] = uip
uip["n"] += 1
uip["last"] = dt
for ip in ips:
usrs = ips[ip]
if len(usrs) <= 1:
continue
s = ip + " "
for user in usrs:
s += " " + user
s += ": ("
sep = ""
for user in usrs:
usr = usrs[user]
s += sep + str(usr["n"])
sep = ", "
s += ") ("
sep = ""
for user in usrs:
usr = usrs[user]
s += sep + usr["first"] + "-" + usr["last"]
sep = ","
s += ")"
print(s)
for user in users:
usr = users[user]
ipsip = usr["ips"]
if not usr["add"] or not usr["rem"] or user == "nemaaho":
print(user + " " + usr["add"] + " - " + usr["rem"])
if len(ipsip) <= 1 or user == "Anonymous":
continue
s = user + " "
for ip in ipsip:
if ip:
s += " " + ip
print(s)
print(n)<|fim_middle|>run<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Test an empty dict with a missing resp.reason."""
resp, content = fake_response(b"}NOT OK", {"status": "400"}, reason=None)
error = HttpError(resp, content)
self.assertEqual(
str(error),
'<HttpError 400 when requesting None returned "". Details: "}NOT OK">',
)<|fim_middle|>test_missing_reason<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Gets or sets primaryKey of the created AuthorizationRule.
"""
return pulumi.get(self, "primary_key")<|fim_middle|>primary_key<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
xs_errors.XML_DEFS = self._xmldefs<|fim_middle|>tear_down<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
input = InputCell(1)
plus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
minus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] - 1,
)
cb1_observer = []
cb2_observer = []
callback1 = self.callback_factory(cb1_observer)
callback2 = self.callback_factory(cb2_observer)
plus_one.add_callback(callback1)
minus_one.add_callback(callback2)
input.value = 10
self.assertEqual(cb1_observer[-1], 11)
self.assertEqual(cb2_observer[-1], 9)<|fim_middle|>test_callbacks_can_fire_from_multiple_cells<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(index):
"""Called when a player left the server."""
# Unmute the player, so the next player who gets this index won't be muted
mute_manager.unmute_player(index)<|fim_middle|>on_client_disconnect<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(s):
"""Take any reasonable date string, and return an ApproximateDate for it
>>> ad = parse_approximate_date('2014-02-17')
>>> type(ad)
<class 'django_date_extensions.fields.ApproximateDate'>
>>> ad
2014-02-17
>>> parse_approximate_date('2014-02')
2014-02-00
>>> parse_approximate_date('2014')
2014-00-00
>>> parse_approximate_date('future')
future
"""
for regexp in [
r"^(\d{4})-(\d{2})-(\d{2})$",
r"^(\d{4})-(\d{2})$",
r"^(\d{4})$",
]:
m = re.search(regexp, s)
if m:
return ApproximateDate(*(int(g, 10) for g in m.groups()))
if s == "future":
return ApproximateDate(future=True)
if s:
dt = parser.parse(
s,
parserinfo=localparserinfo(),
dayfirst=settings.DD_MM_DATE_FORMAT_PREFERRED,
)
return ApproximateDate(dt.year, dt.month, dt.day)
raise ValueError("Couldn't parse '{}' as an ApproximateDate".format(s))<|fim_middle|>parse_approximate_date<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(FunctionalModule_nums, nn_module):
print(f"{FunctionalModule_nums} functional modules detected.")
supported = []
unsupported = []
not_fully_supported = []
for key, value in nn_module.items():
if value == 1:
supported.append(key)
elif value == 2:
unsupported.append(key)
elif value == 3:
not_fully_supported.append(key)
def fun(info, l):
print(info)
for v in l:
print(v)
# Fully Supported Ops: All related test cases of these ops have been exported
# Semi-Supported Ops: Part of related test cases of these ops have been exported
# Unsupported Ops: None of related test cases of these ops have been exported
for info, l in [
[f"{len(supported)} Fully Supported Operators:", supported],
[
f"{len(not_fully_supported)} Semi-Supported Operators:",
not_fully_supported,
],
[f"{len(unsupported)} Unsupported Operators:", unsupported],
]:
fun(info, l)<|fim_middle|>print_stats<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
try:
dsp = ossaudiodev.open('w')
except (ossaudiodev.error, IOError), msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
test_support.run_unittest(__name__)<|fim_middle|>test_main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return any([
self.text not in self.FALSE_TEXT,
self.option,
self.file,
self.external_id != ''
])<|fim_middle|>is_true<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(count, offset) -> None:
ptrsize = pwndbg.gdblib.typeinfo.ptrsize
telescope.repeat = METHOD_NAME.repeat
telescope(address=pwndbg.gdblib.regs.sp + offset * ptrsize, count=count)<|fim_middle|>stack<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
data_array_validator, datetime_pandas, dates_array
):
df = pd.DataFrame({"d": datetime_pandas})
res = data_array_validator.validate_coerce(df)
# Check type
assert isinstance(res, np.ndarray)
# Check dtype
assert res.dtype == "object"
# Check values
np.testing.assert_array_equal(res, dates_array.reshape(len(dates_array), 1))<|fim_middle|>test_data_array_validator_dates_dataframe<|file_separator|> |
<|fim_prefix|>nc def <|fim_suffix|>(self):<|fim_middle|>count<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, data: Any) -> None:
self.value = data<|fim_middle|>save<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path):
def json_deps(jsondata):
ret = {}
deps = jsondata.get('dependencies', {})
for key in deps.keys():
key = key.lower()
devonly = deps[key].get('dev', False)
if not devonly:
if key not in ret.keys():
depname = key.replace('/', '-')
if depname[0] == '@':
depname = depname[1:]
ret[depname] = {'name': depname, 'version': deps[key]['version']}
ret.update(json_deps(deps[key]))
return ret
with open('%s/package-lock.json' % path) as f:
jsondata = json.load(f)
return json_deps(jsondata)<|fim_middle|>read_ui_requirements<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(simulation, volume_name):
"""
Return the size of the bounding box of the given volume
"""
pMin, pMax = get_volume_bounding_limits(simulation, volume_name)
return [pMax[0] - pMin[0], pMax[1] - pMin[1], pMax[2] - pMin[2]]<|fim_middle|>get_volume_bounding_box_size<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(fixturedef, request):
trace("\n%s: start", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
stf.fixture_setup(fixturedef, request)
yield
stf.fixture_setup_finish(fixturedef, request)
trace("\n%s: end", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))<|fim_middle|>pytest_fixture_setup<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Tests address without aliases."""
acct = _AccountTestClass(address="foo@example.com")
self.assertTrue(acct.matches_address("foo@example.com"))
self.assertFalse(acct.matches_address("bar@example.com"))<|fim_middle|>test_matches_address<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
shapeletsInterp = Shapelets(interpolation=True)
x, y = 0.99, 0
beta = 0.5
flux_full = self.shapelets.function(
x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0
)
flux_interp = shapeletsInterp.function(
x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0
)
npt.assert_almost_equal(flux_interp, flux_full, decimal=10)<|fim_middle|>test_interpolate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, R, amp, sigma, e1, e2):
"""
:param R:
:param amp:
:param sigma:
:param e1:
:param e2:
:return:
"""
return self.spherical.METHOD_NAME(R, amp, sigma)<|fim_middle|>mass_3d_lens<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
data_shape,
kernel_size,
num_filter,
in_dtype,
strides,
padding,
groups,
dilation,
data_layout,
kernel_layout,
out_layout,
schedule_name,
):
"""Test a subgraph with a single conv2d operator."""
ref_input_data = np.random.randint(low=-128, high=127, size=data_shape, dtype=in_dtype)
ref_input_var = relay.var("input", relay.TensorType(data_shape, in_dtype)) # NHWC layout
kernel_shape = (*kernel_size, data_shape[-1] // groups, num_filter) # HWIO layout
ref_kernel_data = np.random.randint(low=-10, high=10, size=kernel_shape, dtype=in_dtype)
"""Our x86 depthwise implementation only supports HWOI with NHWC, so we need to change our
kernel layout to work around this. We can't just change the whole thing to HWIO or
something else, as then group conv2d would not work. Eventually, we should switch to using
TensorFlow to create the reference output so we can ensure our implementation is right.
See https://github.com/apache/tvm/issues/13137 for details."""
ref_relay_op = relay.op.nn.conv2d(
ref_input_var,
relay.const(change_ndarray_layout(ref_kernel_data, "HWIO", self.ref_kernel_layout)),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout="NHWC",
kernel_layout=self.ref_kernel_layout,
out_dtype="int32",
out_layout="NHWC",
)
ref_module = tvm.IRModule.from_expr(relay.Function([ref_input_var], ref_relay_op))
ref_outputs = generate_ref_data(ref_module, {"input": ref_input_data})
# Reshape output dictionary to match out_layout
assert len(ref_outputs) == 1
output_tensor_name, output_tensor = next(iter(ref_outputs.items()))
ref_outputs[output_tensor_name] = change_ndarray_layout(output_tensor, "NHWC", out_layout)
test_input_data = change_ndarray_layout(ref_input_data, "NHWC", data_layout)
test_input_var = relay.var("input", relay.TensorType(test_input_data.shape, in_dtype))
test_kernel_data = change_ndarray_layout(ref_kernel_data, "HWIO", kernel_layout)
test_relay_op = relay.op.nn.conv2d(
test_input_var,
relay.const(test_kernel_data),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout=out_layout,
)
test_function = relay.Function([test_input_var], test_relay_op)
test_model = AOTTestModel(
module=tvm.IRModule.from_expr(test_function),
inputs={"input": test_input_data},
outputs=ref_outputs,
)
compile_and_run(
test_model,
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)<|fim_middle|>test_conv2d<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
segment, segment_rule, mocker
):
# Given
condition = Condition.objects.create(
rule=segment_rule,
property="foo",
operator=EQUAL,
value="bar",
created_with_segment=True,
)
mock_history_instance = mocker.MagicMock()
# When
msg = condition.get_create_log_message(mock_history_instance)
# Then
assert msg is None<|fim_middle|>test_condition_get_create_log_message_for<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(init_context) -> PySparkResource:
"""This resource provides access to a PySpark SparkSession for executing PySpark code within Dagster.
Example:
.. code-block:: python
@op(required_resource_keys={"pyspark"})
def my_op(context):
spark_session = context.resources.pyspark.spark_session
dataframe = spark_session.read.json("examples/src/main/resources/people.json")
my_pyspark_resource = pyspark_resource.configured(
{"spark_conf": {"spark.executor.memory": "2g"}}
)
@job(resource_defs={"pyspark": my_pyspark_resource})
def my_spark_job():
my_op()
"""
context_updated_config = init_context.replace_config(
{"spark_config": init_context.resource_config["spark_conf"]}
)
return PySparkResource.from_resource_context(context_updated_config)<|fim_middle|>pyspark_resource<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, coordinates: AnnotationPosition) -> None:
start = coordinates.start
end = coordinates.end
self.offset_start = start.offset
self.depth_start = start.depth
self.node_start = start.node
self.offset_end = end.offset
self.depth_end = end.depth
self.node_end = end.node
element_path_start = start.el_path
if element_path_start is not None:
self.element_path_start = str(element_path_start)
element_path_end = end.el_path
if element_path_end is not None:
self.element_path_end = str(element_path_end)
self.paragraph_id_start = start.par_id
self.hash_start = start.t
self.paragraph_id_end = end.par_id
self.hash_end = end.t<|fim_middle|>set_position_info<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path_to_nwbfile, nwbfile_with_ecephys_content):
"""
Test that the property is retrieved from the electrodes table ONLY from the corresponding
region of the electrical series
"""
electrical_series_name_list = ["ElectricalSeries1", "ElectricalSeries2"]
for electrical_series_name in electrical_series_name_list:
recording_extractor = NwbRecordingExtractor(path_to_nwbfile, electrical_series_name=electrical_series_name)
nwbfile = nwbfile_with_ecephys_content
electrical_series = nwbfile.acquisition[electrical_series_name]
electrical_series_electrode_indices = electrical_series.electrodes.data[:]
electrodes_table = nwbfile.electrodes.to_dataframe()
sub_electrodes_table = electrodes_table.iloc[electrical_series_electrode_indices]
expected_property = sub_electrodes_table["property"].values
extracted_property = recording_extractor.get_property("property")
assert np.array_equal(extracted_property, expected_property)<|fim_middle|>test_nwb_extractor_property_retrieval<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, id: int) -> ProjectColumn: ...<|fim_middle|>get_project_column<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
parser = argparse.ArgumentParser(
description='Generate training and val set of MTWI.')
parser.add_argument('root_path', help='Root dir path of MTWI')
parser.add_argument(
'--val-ratio', help='Split ratio for val set', default=0.0, type=float)
parser.add_argument(
'--nproc', default=1, type=int, help='Number of process')
args = parser.METHOD_NAME()
return args<|fim_middle|>parse_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, config, blob, data):
raise NotImplementedError<|fim_middle|>blobs_update<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, component: Component, request: Request) -> None:
"""
Mutate the component in place for the given request context.
"""
# if there is no plugin registered for the component, return the input
if (component_type := component["type"]) not in self:
return
# invoke plugin if exists
rewriter = self[component_type].rewrite_for_request
if rewriter is None:
return
rewriter(component, request)<|fim_middle|>update_config_for_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
icon_path = os.path.join(CfdTools.getModulePath(), "Gui", "Icons", "scalartransport.svg")
return {'Pixmap': icon_path,
'MenuText': QtCore.QT_TRANSLATE_NOOP("Cfd_ScalarTransportFunction",
"Cfd scalar transport function"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Cfd_ScalarTransportFunction",
"Create a scalar transport function")}<|fim_middle|>get_resources<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsResolvers/{dnsResolverName}/outboundEndpoints/{outboundEndpointName}",
**self.url_parameters
)<|fim_middle|>url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(case, case_property_name, start=0, per_page=50):
"""Return paged changes to case properties, and last transaction index checked
"""
def iter_transactions(transactions):
for i, transaction in enumerate(transactions):
property_changed_info = property_changed_in_action(
case.domain,
transaction,
case.case_id,
case_property_name
)
if property_changed_info:
yield property_changed_info, i + start
num_actions = len(case.actions)
if start > num_actions:
return [], -1
case_transactions = iter_transactions(
sorted(case.actions, key=lambda t: t.server_date, reverse=True)[start:]
)
infos = []
last_index = 0
while len(infos) < per_page:
try:
info, last_index = next(case_transactions)
infos.append(info)
except StopIteration:
last_index = -1
break
return infos, last_index<|fim_middle|>get_paged_changes_to_case_property<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(input_path, store, tz, sort_index=True):
"""
Parameters
----------
input_path : str
The root path of the REFIT dataset.
store : DataStore
The NILMTK DataStore object.
measurement_mapping_func : function
Must take these parameters:
- house_id
- chan_id
Function should return a list of tuples e.g. [('power', 'active')]
tz : str
Timezone e.g. 'US/Eastern'
sort_index : bool
"""
check_directory_exists(input_path)
# Iterate though all houses and channels
# house 14 is missing!
houses = [1,2,3,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19,20,21]
nilmtk_house_id = 0
prefix = ''
suffix = '_'
version_checked = False
for house_id in houses:
nilmtk_house_id += 1
print("Loading house", house_id, end="... ")
stdout.flush()
csv_filename = join(input_path, prefix + 'House' + suffix + str(house_id) + '.csv')
if not version_checked:
version_checked = True
if exists(csv_filename):
print('Using original filenames (House_XX.csv)')
else:
prefix = 'CLEAN_'
suffix = ''
csv_filename = join(input_path, prefix + 'House' + suffix + str(house_id) + '.csv')
print('Using CLEAN filenames (CLEAN_HouseXX.csv)')
if not exists(csv_filename):
raise RuntimeError('Could not find REFIT files. Please check the provided folder.')
# The clean version already includes header, so we
# just skip the text version of the timestamp
usecols = ['Unix','Aggregate','Appliance1','Appliance2','Appliance3','Appliance4','Appliance5','Appliance6','Appliance7','Appliance8','Appliance9']
df = _load_csv(csv_filename, usecols, tz)
if sort_index:
df = df.sort_index() # might not be sorted...
chan_id = 0
for col in df.columns:
chan_id += 1
print(chan_id, end=" ")
stdout.flush()
key = Key(building=nilmtk_house_id, meter=chan_id)
chan_df = pd.DataFrame(df[col])
chan_df.columns = pd.MultiIndex.from_tuples([('power', 'active')])
# Modify the column labels to reflect the power measurements recorded.
chan_df.columns.set_names(LEVEL_NAMES, inplace=True)
store.put(str(key), chan_df)
print('')<|fim_middle|>convert<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# Test sendall() timeout
# couldn't figure out how to test it
pass<|fim_middle|>test_sendall<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
sys_gpio_dir = "/sys/class/gpio"
self.GPIO_OFFSET = 0
gpiochip_no = 0
for d in os.listdir(sys_gpio_dir):
if "gpiochip" in d:
try:
gpiochip_no = int(d[8:], 10)
except ValueError as e:
print("Error: %s" % str(e))
if gpiochip_no > 255:
self.GPIO_OFFSET = 256
return True
return True<|fim_middle|>set_gpio_offset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
relationships: List[Relationship], packages: List[Package], files: List[File]
) -> Tuple[List, Dict]:
contained_files_by_package_id = dict()
relationships_to_write = []
files_by_spdx_id = {file.spdx_id: file for file in files}
packages_spdx_ids = [package.spdx_id for package in packages]
for relationship in relationships:
if (
relationship.relationship_type == RelationshipType.CONTAINS
and relationship.spdx_element_id in packages_spdx_ids
and relationship.related_spdx_element_id in files_by_spdx_id.keys()
):
contained_files_by_package_id.setdefault(relationship.spdx_element_id, []).append(
files_by_spdx_id[relationship.related_spdx_element_id]
)
if relationship.comment:
relationships_to_write.append(relationship)
elif (
relationship.relationship_type == RelationshipType.CONTAINED_BY
and relationship.related_spdx_element_id in packages_spdx_ids
and relationship.spdx_element_id in files_by_spdx_id
):
contained_files_by_package_id.setdefault(relationship.related_spdx_element_id, []).append(
files_by_spdx_id[relationship.spdx_element_id]
)
if relationship.comment:
relationships_to_write.append(relationship)
else:
relationships_to_write.append(relationship)
return relationships_to_write, contained_files_by_package_id<|fim_middle|>scan_relationships<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(refer_nodes, refer_layer_orders):
"""
@summary: 根据相邻层有连接的点求中位数
@param refer_nodes:
@param refer_layer_orders:
@return:
"""
layer_orders_index = sorted([refer_layer_orders.index(ref) for ref in refer_nodes])
refer_len = len(layer_orders_index)
# 没有相邻顶点的节点中位数值被设置为-1,让这些节点维持原来位置
if refer_len == 0:
return -1
elif refer_len % 2 == 1:
return layer_orders_index[refer_len // 2]
else:
return (layer_orders_index[(refer_len // 2) - 1] + layer_orders_index[refer_len // 2]) / 2<|fim_middle|>median_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, request, form):
try:
category = models.GareaCategory.objects.get(id=request.POST["category"])
nnew, nold = self._process_uploaded_shapefile(
category, request.FILES["file"]
)
except IntegrityError as e:
messages.add_message(request, messages.ERROR, str(e))
else:
messages.add_message(
request,
messages.INFO,
_(
"Replaced {} existing objects in category {} with {} new objects"
).format(nold, category.descr, nnew),
)
return HttpResponseRedirect("")<|fim_middle|>process_uploaded_form<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(sqlite_sample_tap: SQLTap):
"""Run standard tap tests against Countries tap."""
tests = get_standard_tap_tests(
type(sqlite_sample_tap),
dict(sqlite_sample_tap.config),
)
for test in tests:
test()<|fim_middle|>test_sqlite_tap_standard_tests<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(client):
# Validating a validated token should update the token attribute
user = f.UserFactory.create()
application = f.ApplicationFactory(next_url="http://next.url")
token = f.ApplicationTokenFactory(
auth_code="test-auth-code",
state="test-state",
application=application,
token="existing-token")
url = reverse("application-tokens-validate")
client.login(user)
data = {
"application": token.application.id,
"auth_code": "test-auth-code",
"state": "test-state"
}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 200
token = models.ApplicationToken.objects.get(id=token.id)
assert token.token == "existing-token"<|fim_middle|>test_token_validate_validated<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
return f"{self.tag.name}-{self.tag.version}"<|fim_middle|>export_name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(pipeline_response):
deserialized = self._deserialize("OperationListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)<|fim_middle|>extract_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(compiler):
"""Do platform-specific customizations of compilers on unix platforms."""
if compiler.compiler_type == "unix":
(cc, cxx, cflags) = get_config_vars("CC", "CXX", "CFLAGS")
if "CC" in os.environ:
cc = os.environ["CC"]
if "CXX" in os.environ:
cxx = os.environ["CXX"]
if "CFLAGS" in os.environ:
cflags = cflags + " " + os.environ["CFLAGS"]
cc_cmd = cc + " " + cflags
# We update executables in compiler to take advantage of distutils arg splitting
compiler.set_executables(compiler=cc_cmd, compiler_cxx=cxx)<|fim_middle|>customize_compiler<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
cases = [
('target_os = "windows"', cfg.Equal('', cfg.Identifier('', "target_os"), cfg.String('', "windows"))),
('target_arch = "x86"', cfg.Equal('', cfg.Identifier('', "target_arch"), cfg.String('', "x86"))),
('target_family = "unix"', cfg.Equal('', cfg.Identifier('', "target_family"), cfg.String('', "unix"))),
('any(target_arch = "x86", target_arch = "x86_64")',
cfg.Any(
'', [
cfg.Equal('', cfg.Identifier('', "target_arch"), cfg.String('', "x86")),
cfg.Equal('', cfg.Identifier('', "target_arch"), cfg.String('', "x86_64")),
])),
('all(target_arch = "x86", target_os = "linux")',
cfg.All(
'', [
cfg.Equal('', cfg.Identifier('', "target_arch"), cfg.String('', "x86")),
cfg.Equal('', cfg.Identifier('', "target_os"), cfg.String('', "linux")),
])),
('not(all(target_arch = "x86", target_os = "linux"))',
cfg.Not(
'',
cfg.All(
'', [
cfg.Equal('', cfg.Identifier('', "target_arch"), cfg.String('', "x86")),
cfg.Equal('', cfg.Identifier('', "target_os"), cfg.String('', "linux")),
]))),
]
for data, expected in cases:
with self.subTest():
self.assertEqual(cfg.parse(iter(cfg.lexer(data)), ''), expected)<|fim_middle|>test_parse<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, ch):
ch.write("abc")
assert ch.parent.method_calls == [mock.call.write('abc')]<|fim_middle|>test_write<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tracks):
depths = []
samples = []
for track in tracks:
depths.append(track.params.max_depth)
if "subsample" not in track.params_dict.keys():
samples.append(1.0)
continue
samples.append(track.params.subsample)
depths = set(depths)
samples = set(samples)
table_tracks = {(depth, subsample): [] for depth in depths for subsample in samples}
for track in tracks:
subsample = track.params.subsample if "subsample" in track.params_dict.keys() else 1.
table_tracks[(track.params.max_depth, subsample)].append(track)
return table_tracks<|fim_middle|>split_tracks<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
today = date.today()
assert daterange_start(today) == today<|fim_middle|>test_parse_daterange_start_date<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, **cfg):
super().METHOD_NAME(**cfg)<|fim_middle|>run_stages<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, method, parameter):<|fim_middle|>annotate_parameter<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
download_dir = "./"
train_df, tune_df = shopee_dataset(download_dir=download_dir)
predictor = MultiModalPredictor(label="label")
hyperparameters = [
"model.names=[timm_image]",
"model.timm_image.checkpoint_name=ghostnet_100",
"env.num_workers=0",
"env.num_workers_evaluation=0",
"optimization.top_k_average_method=best",
"optimization.val_check_interval=1.0",
]
predictor.fit(
train_data=train_df,
tuning_data=tune_df,
hyperparameters=hyperparameters,
time_limit=2,
)<|fim_middle|>test_hyperparameters_in_terminal_format<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, timeout=RECV_TIMEOUT_DEFAULT):
raw_length = self._recvall(self.HEADER_LENGTH, timeout)
length, = struct.unpack("!I", raw_length)
payload = self._recvall(length)
return payload<|fim_middle|>receive<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self):<|fim_middle|>is_ready<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, activationWindow:'ApplicationWindow', activateOnMessage=True) -> None:
self._activationWindow = activationWindow
self._activateOnMessage = activateOnMessage<|fim_middle|>set_activation_window<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(engine_with_schema):
engine, _ = engine_with_schema
sel = select(
sa_call_sql_function(
email.EMAIL_LOCAL_PART,
text("'test@example.com'"),
return_type=PostgresType.TEXT
)
)
with engine.begin() as conn:
res = conn.execute(sel)
assert res.fetchone()[0] == "test"<|fim_middle|>test_local_part_func_wrapper<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
x = [2]
x[0] += 1
x[0] *= 2
x[0] **= 2
x[0] -= 8
x[0] //= 5
x[0] %= 3
x[0] &= 2
x[0] |= 5
x[0] ^= 1
x[0] /= 2
self.assertEqual(x[0], 3.0)<|fim_middle|>test_in_list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, img, bin_type):
"""Find the bins and their orientations."""
assert (
bin_type in self.bins[bin_type]
), f"Bins_2d does not know bin color: {bin_type}"
if img is not None:
kernel = np.ones((2, 2), np.float32) / 4
img = cv2.filter2D(img, -1, kernel)
debug_image = np.copy(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, img = cv2.threshold(img, 254, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
np.copy(img),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE,
)
contours = contour_sort(contours)
"""This finds the bins and looks for the one that is orange or is not
orange. Each bin is given an orangeness rating and either the most
or least orange bin is selected.
"""
if len(contours) > 0:
bins = 2
orangeness = 0 if self.bin_type == "orange" else 100000
if len(contours) < bins:
bins = len(contours)
for i in range(0, bins + 1):
x, y, w, h = cv2.boundingRect(contours[i])
roi = debug_image[y : y + h, x : x + w]
temp = evaluate_bin(roi)
if (orangeness > temp and self.bin_type == "norange") or (
orangeness < temp and self.bin_type == "orange"
):
orangeness = temp
M = cv2.moments(contours[i])
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
img_h, img_w, _ = np.shape(debug_image)
point = (cx, cy)
(_, _), (_, _), rad = cv2.fitEllipse(contours[i])
cv2.rectangle(debug_image, (x, y), (x + w, y + h), (127), 2)
ellipse = cv2.fitEllipse(contours[i])
cv2.ellipse(debug_image, ellipse, (170), 2)
if point is not None:
cv2.circle(debug_image, point, 5, (0, 0, 255), -1)
pixels = np.copy(point)
point = [cx - (img_w / 2), cy - (img_h / 2)]
tuple_center = (point[0], point[1], 0)
rad = ((rad) * np.pi) / 180.0
P = np.asarray(self.image_sub.camera_info.P).reshape(3, 4)
_P = np.linalg.pinv(P)
pixels = np.asarray([pixels[0], pixels[1], 1])
ray = _P.dot(pixels)
tuple_center = self.range * ray
tuple_center[2] = (
-tuple_center[2] + 0.45 + 1
) # height of the bin and some buffer
self.last_draw_image = debug_image
return tuple_center, rad<|fim_middle|>find_single_bin<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request):
id_ = request.matchdict["sponsor_id"]
try:
sponsor = request.db.query(Sponsor).filter(Sponsor.id == id_).one()
except NoResultFound:
raise HTTPNotFound
form = SponsorForm(request.POST if request.method == "POST" else None, sponsor)
if request.method == "POST":
if _color_logo_url := _upload_image("color_logo", request, form):
form.color_logo_url.data = _color_logo_url
if _white_logo_url := _upload_image("white_logo", request, form):
form.white_logo_url.data = _white_logo_url
if form.validate():
form.populate_obj(sponsor)
request.session.flash("Sponsor updated", queue="success")
return HTTPSeeOther(location=request.current_route_path())
return {"sponsor": sponsor, "form": form}<|fim_middle|>edit_sponsor<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)<|fim_middle|>send_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(hpid, realm):
realmid = realm["id"]
types = get_waste_types(hpid, realmid)
text = "{ \n"
text += f'"hpid": {hpid},\n'
text += f'"realm": {realmid},\n'
text += f'"name": "{realm["name"]}",\n'
text += '"icons": {\n'
for t in types:
text += f'"{t["id"]}": "", # {t["name"]}\n'
text += '}, \n'
text += '}, \n'
return text<|fim_middle|>gen_icons<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
xs = tf.cast(
tf.stack(
[3 * tf.ones((40, 40, 20)), 2 * tf.ones((40, 40, 20))],
axis=0,
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=5)
xs = layer(xs, training=True)
self.assertTrue(tf.math.reduce_any(xs[0] == 3.0))
self.assertTrue(tf.math.reduce_any(xs[1] == 2.0))<|fim_middle|>test_channel_shuffle_call_results_multi_channel<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(msgid, msgstr, line_warns):
"""check if quote count is the same"""
return check_count(msgid, msgstr, '"', line_warns)<|fim_middle|>check_quotes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path):
print 'todo'
return []<|fim_middle|>get_dependencies_by_objdump<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
id = self.obtain().data["id"]
wrong_solution = "most certainly wrong!"
self.assertFalse(self.verify(id, wrong_solution))<|fim_middle|>test_verify_incorrect<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")<|fim_middle|>private_link_service_connection_state<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request: Request, exception: Exception) -> None:
"""Logs information about an incoming request and the associated exception.
Args:
request (Request): The incoming request to be logged.
exception (Exception): The exception that occurred during the handling of the request.
Returns:
None
""" # noqa: E501
quiet = getattr(exception, "quiet", False)
noisy = getattr(request.app.config, "NOISY_EXCEPTIONS", False)
if quiet is False or noisy is True:
try:
url = repr(request.url)
except AttributeError: # no cov
url = "unknown"
error_logger.exception(
"Exception occurred while handling uri: %s", url
)<|fim_middle|>log<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(obj, value):
np.testing.assert_allclose(value, obj.result().numpy(), atol=1e-6)<|fim_middle|>check_results<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, fSortChildren):
self._node.METHOD_NAME(fSortChildren)
self._node.update()<|fim_middle|>set_f_sort_children<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(spec, state):
rng = random.Random(501)
yield from _test_harness_for_randomized_test_case(
spec,
state,
participation_fn=lambda comm: [rng.choice(comm)],
)<|fim_middle|>test_random_only_one_participant_without_duplicates<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, n_samples):
delimiter = None
if self.example_file.endswith(".tsv"):
delimiter = "\t"
if self.example_file.endswith(".csv"):
delimiter = ","
with open(self.example_file, "r") as f:
reader = csv.reader(f, delimiter=delimiter)
R = []
for r in reader:
R += [r]
idxs = [i for i in range(len(R))]
idxs = random.choices(idxs, k=n_samples)
D = []
for idx in idxs:
r = R[idx]
D += [{"key": r[0], "input": r[2], "text": r[2]}]
return D<|fim_middle|>sample_example_singlets<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args):
"""
Only runs linter for monitors generation, without writing the actual file.
"""
Linter.shared = Linter()
all_monitors = read_all_monitors(tests_dir=args.tests_dir)
renderer = MainTF.load_from_templates(
main_template_path='monitors-gen/templates/main.tf.src',
logs_monitor_template_path='monitors-gen/templates/monitor-logs.tf.src',
apm_monitor_template_path='monitors-gen/templates/monitor-apm.tf.src',
rum_monitor_template_path='monitors-gen/templates/monitor-rum.tf.src'
)
_ = renderer.render(monitors=all_monitors)
Linter.shared.print(strict=False) # Just print linter events, without aborting<|fim_middle|>lint<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, x, y, u, P, Q=None, R=None, t=None):
r'''
Performs one step estimation.
Args:
x (:obj:`Tensor`): estimated system state of previous step.
y (:obj:`Tensor`): system observation at current step (measurement).
u (:obj:`Tensor`): system input at current step.
P (:obj:`Tensor`): state estimation covariance of previous step.
Q (:obj:`Tensor`, optional): covariance of system transition model.
Default: ``None``.
R (:obj:`Tensor`, optional): covariance of system observation model.
Default: ``None``.
t (:obj:`int`, optional): set system timestamp for estimation.
If ``None``, current system time is used. Default: ``None``.
Return:
list of :obj:`Tensor`: posteriori state and covariance estimation
'''
# Upper cases are matrices, lower cases are vectors
Q = Q if Q is not None else self.Q
R = R if R is not None else self.R
self.model.set_refpoint(state=x, input=u, t=t)
n = x.size(-1)
xp = self.generate_particles(x, n * P)
xs, ye = self.model(xp, u)
q = self.relative_likelihood(y, ye, R)
xr = self.resample_particles(q, xs)
x = xr.mean(dim=-2)
ex = xr - x
P = self.compute_cov(ex, ex, Q)
return x, P<|fim_middle|>forward<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, mock_RpmDataBase):
rpmdb = mock.Mock()
rpmdb.has_rpm.return_value = True
mock_RpmDataBase.return_value = rpmdb
self.manager.post_process_install_requests_bootstrap()
rpmdb.rebuild_database.assert_called_once_with()
rpmdb.set_database_to_image_path.assert_called_once_with()<|fim_middle|>test_post_process_install_requests_bootstrap<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(source, query):
"""Get records from source database.
Args:
source: File path to the source database where we want to extract the
data from.
query: The query string to be executed in order to retrieve relevant
attributes as (datetime, url, time) from the source database according
to the browser chosen.
"""
try:
conn = open_db(source)
cursor = conn.cursor()
cursor.execute(query)
history = cursor.fetchall()
conn.close()
return history
except sqlite3.OperationalError as op_e:
raise Error('Could not perform queries on the source database: '
'{}'.format(op_e))<|fim_middle|>extract<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
s = GiftCardTransaction.objects.filter(
card=OuterRef('pk')
).order_by().values('card').annotate(s=Sum('value')).values('s')
return self.request.organizer.reusable_media.prefetch_related(
Prefetch(
'linked_orderposition',
queryset=OrderPosition.objects.select_related(
'order', 'order__event', 'order__event__organizer', 'seat',
).prefetch_related(
Prefetch('checkins', queryset=Checkin.objects.all()),
'answers', 'answers__options', 'answers__question',
)
),
Prefetch(
'linked_giftcard',
queryset=GiftCard.objects.annotate(
cached_value=Coalesce(Subquery(s), Decimal('0.00'))
)
)
)<|fim_middle|>get_queryset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
parameters = {
**self.serialize_query_param(
"api-version", "2019-09-01",
required=True,
),
}
return parameters<|fim_middle|>query_parameters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(sdfg):
return utils.distributed_compile(sdfg, commworld)<|fim_middle|>compile<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, wallet_basename, encrypt=False):
self.nodes[0].createwallet(wallet_name=f"{wallet_basename}_base", descriptors=False, blank=True)
self.nodes[0].createwallet(wallet_name=f"{wallet_basename}_test", descriptors=False, blank=True)
base_wallet = self.nodes[0].get_wallet_rpc(f"{wallet_basename}_base")
test_wallet = self.nodes[0].get_wallet_rpc(f"{wallet_basename}_test")
# Setup both wallets with the same HD seed
seed = get_generate_key()
base_wallet.sethdseed(True, seed.privkey)
test_wallet.sethdseed(True, seed.privkey)
if encrypt:
# Encrypting will generate a new HD seed and flush the keypool
test_wallet.encryptwallet("pass")
else:
# Generate a new HD seed on the test wallet
test_wallet.sethdseed()
return base_wallet, test_wallet<|fim_middle|>prepare_wallets<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
ex_river_routing(1,1)<|fim_middle|>test_river_routing_1_1<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(filename, port1, port2, per_listener):
with open(filename, 'w') as f:
f.write("per_listener_settings %s\n" % (per_listener))
f.write("check_retain_source true\n")
f.write("port %d\n" % (port1))
f.write("allow_anonymous true\n")
f.write("acl_file %s\n" % (filename.replace('.conf', '.acl')))
f.write("persistence true\n")
f.write("persistence_file %s\n" % (filename.replace('.conf', '.db')))
f.write("listener %d\n" % (port2))
f.write("allow_anonymous true\n")<|fim_middle|>write_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
@jitclass
class MyInt:
x: int
def __init__(self, value):
self.x = value
self.assertEqual(as_numba_type(MyInt), MyInt.class_type.instance_type)<|fim_middle|>test_jitclass_registers<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional['outputs.VulnerabilityAssessmentRecurringScansPropertiesResponse']:
"""
The recurring scans settings
"""
return pulumi.get(self, "recurring_scans")<|fim_middle|>recurring_scans<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>( # type: ignore[override]
self,
df: pandas.DataFrame,
with_default_function_args: bool = True,
function_prefix: str = "function.",
seed_name: str = "seed",
y_name: str = "y",
):
"""Load data from a `pandas.DataFrame`.
If ``with_default_function_args`` is True, then ``learner.function``'s
default arguments are set (using `functools.partial`) from the values
in the `pandas.DataFrame`.
Parameters
----------
df : pandas.DataFrame
The data to load.
with_default_function_args : bool, optional
The ``with_default_function_args`` used in ``to_dataframe()``,
by default True
function_prefix : str, optional
The ``function_prefix`` used in ``to_dataframe``, by default "function."
TODO
"""
raise NotImplementedError<|fim_middle|>load_dataframe<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, emulator: Emulator):
"""!
@brief Install SCION on router, control service and host nodes.
"""
super().METHOD_NAME(emulator)
reg = emulator.getRegistry()
for ((scope, type, name), obj) in reg.getAll().items():
if type == 'rnode':
rnode: ScionRouter = obj
if not issubclass(rnode.__class__, ScionRouter):
rnode.__class__ = ScionRouter
rnode.initScionRouter()
self.__install_scion(rnode)
name = rnode.getName()
rnode.appendStartCommand(_CommandTemplates['br'].format(name=name), fork=True)
elif type == 'csnode':
csnode: Node = obj
self.__install_scion(csnode)
self.__append_scion_command(csnode)
name = csnode.getName()
csnode.appendStartCommand(_CommandTemplates['cs'].format(name=name), fork=True)
elif type == 'hnode':
hnode: Node = obj
self.__install_scion(hnode)
self.__append_scion_command(hnode)<|fim_middle|>configure<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(inst, basedn, log, args):
log = log.getChild('referint_del_config')
targetdn = args.DN
config = ReferentialIntegrityConfig(inst, targetdn)
config.delete()
log.info("Successfully deleted the %s", targetdn)<|fim_middle|>referint_del_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dd):
dict_class = []
dict_idx = []
for ii, jj in enumerate(dd.symbols):
dict_idx.append(ii)
dict_class.append(jj)
dict_idx = np.array(dict_idx)
dict_class = np.array(dict_class)
return dict_class, dict_idx<|fim_middle|>dict_to_nparr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
with session.begin():
lc = data_setup.create_labcontroller()
another_lc = data_setup.create_labcontroller()
data_setup.unique_name(u'lab%s.testdata.invalid')
try:
run_client(['bkr', 'labcontroller-modify',
'--fqdn', another_lc.fqdn,
lc.fqdn])
self.fail('Must error out')
except ClientError as e:
self.assertIn('FQDN %s already in use' % another_lc.fqdn,
e.stderr_output)<|fim_middle|>test_change_fqdn_being_used_by_another<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
set_toolchangedata('0')
global change_tool
change_tool = '0'<|fim_middle|>tool_changed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
@abc.abstractproperty
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
class C:
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def foo(self): return 3
class D(C):
@property
def foo(self): return super(D, self).foo
self.assertEqual(D().foo, 3)<|fim_middle|>test_abstractproperty_basics<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, query: str) -> str:
raise NotImplementedError("Explain not yet implemented in Oracle")<|fim_middle|>explain_as_text<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.regmap.METHOD_NAME()
return super().METHOD_NAME()<|fim_middle|>reloadme<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, backend, params):
key1 = params["key1"]
key2 = params["key2"]
key3 = params["key3"]
key = key1 + key2 + key3
message = params["message"]
output = params["output"]
cmac = CMAC(TripleDES(binascii.unhexlify(key)), backend)
cmac.update(binascii.unhexlify(message))
cmac.verify(binascii.unhexlify(output))<|fim_middle|>test_3des_verify<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, sample: dict) -> Tuple[str, List[str]]:
"""
Given an example in dataset format, create the prompt and the list of
correct references.
"""
prompt = ""
prompt += f"Title: {sample['title']}\n\n"
prompt += f"Background: {sample['background']}\n\n"
prompt += f"Section: {sample['section_title']}\n"
dialogue = sample["paragraphs"][0]
context = dialogue["context"]
assert context[-13:] == " CANNOTANSWER"
context = context[:-13]
prompt += f"Passage: {context}\n\n"
qas = dialogue["qas"]
num_qas = len(qas)
k = random.randint(3, num_qas) - 1 # allow at least two QAs in dialogue
for i in range(k - 1):
prompt += f"Question: {qas[i]['question']}\n"
prompt += f"Answer: {qas[i]['orig_answer']['text']}\n\n"
prompt += f"Question: {qas[k]['question']}"
answers = [ans["text"] for ans in qas[k]["answers"]]
# De-duplicate the list with dict.fromkeys, which preserves the list order
answers = list(dict.fromkeys(answers))
if answers == ["CANNOTANSWER"]:
answers.extend(["Not enough information", "Cannot answer", "Do not know"])
return prompt, answers<|fim_middle|>create_prompt<|file_separator|> |