text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(self, data):
self.stack[-1][2].append(data)<|fim_middle|>add_character_data<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, inputs, *args, **kwargs):<|fim_middle|>call<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None<|fim_middle|>is_present<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
with patch.dict(chef.__opts__, {"test": True}):
yield<|fim_middle|>test_mode<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(title):
print()
print('*' * 30)
print(f'{title}...')
print('*' * 30)
print()<|fim_middle|>print_header<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return Frozen(_fix_attributes(self.ds.attributes))<|fim_middle|>get_attrs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(x, *args):
return numpy.cos(x) - x<|fim_middle|>prob18<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.num_nodes = 1
self.extra_args = [["-checkaddrman=1"]] # Do addrman checks on all operations.<|fim_middle|>set_test_params<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(base_net_oos_with_pumps):
net = copy.deepcopy(base_net_oos_with_pumps)
j11 = pandapipes.create_junction(net, 1.05, 293.15, name="Junction 11", geodata=(14, 0))
j12 = pandapipes.create_junction(net, 1.05, 293.15, name="Junction 12", geodata=(14, -2))
j10 = net.junction.loc[net.junction.name == "Junction 10"].index[0]
pandapipes.create_flow_controls(net, [j10, j10], [j11, j12], 0.5, 0.1, in_service=[True, False])
collections = plot.create_simple_collections(net, plot_sinks=True, plot_sources=True)
assert len(collections) == len([comp for comp in net["component_list"]
if not net[comp.table_name()].empty])
assert len(collections["junction"].get_paths()) == len(net.junction[net.junction.in_service])
assert len(collections["pipe"].get_paths()) == len(net.pipe[net.pipe.in_service])
assert len(collections["ext_grid"].get_paths()) == len(net.ext_grid[net.ext_grid.in_service])
assert len(collections["source"]) == 2
assert isinstance(collections["source"][0], PatchCollection)
assert isinstance(collections["source"][1], LineCollection)
assert len(collections["source"][0].get_paths()) == len(net.source[net.source.in_service])
assert len(collections["source"][1].get_paths()) == 3 * len(net.source[net.source.in_service])
assert len(collections["sink"]) == 2
assert isinstance(collections["sink"][0], PatchCollection)
assert isinstance(collections["sink"][1], LineCollection)
assert len(collections["sink"][0].get_paths()) == len(net.sink[net.sink.in_service])
assert len(collections["sink"][1].get_paths()) == len(net.sink[net.sink.in_service])
assert len(collections["valve"]) == 2
assert isinstance(collections["valve"][0], PatchCollection)
assert isinstance(collections["valve"][1], LineCollection)
assert len(collections["valve"][0].get_paths()) == 2 * len(net.valve)
assert len(collections["valve"][1].get_paths()) == 2 * len(net.valve)
assert len(collections["heat_exchanger"]) == 2
assert isinstance(collections["heat_exchanger"][0], PatchCollection)
assert isinstance(collections["heat_exchanger"][1], LineCollection)
assert len(collections["heat_exchanger"][0].get_paths()) == 2 * len(net.heat_exchanger[
net.heat_exchanger.in_service])
assert len(collections["heat_exchanger"][1].get_paths()) == 2 * len(net.heat_exchanger[
net.heat_exchanger.in_service])
assert len(collections["pump"]) == 2
assert isinstance(collections["pump"][0], PatchCollection)
assert isinstance(collections["pump"][1], LineCollection)
assert len(collections["pump"][0].get_paths()) == len(net.pump[net.pump.in_service])
assert len(collections["pump"][1].get_paths()) == 4 * len(net.pump[net.pump.in_service])
assert len(collections["circ_pump_pressure"]) == 2
assert isinstance(collections["circ_pump_pressure"][0], PatchCollection)
assert isinstance(collections["circ_pump_pressure"][1], LineCollection)
assert len(collections["circ_pump_pressure"][0].get_paths()) == len(net.circ_pump_pressure[
net.circ_pump_pressure.in_service])
assert len(collections["circ_pump_pressure"][1].get_paths()) == 4 * len(net.circ_pump_pressure[
net.circ_pump_pressure.in_service])
assert len(collections["circ_pump_mass"]) == 2
assert isinstance(collections["circ_pump_mass"][0], PatchCollection)
assert isinstance(collections["circ_pump_mass"][1], LineCollection)
assert len(collections["circ_pump_mass"][0].get_paths()) == \
len(net.circ_pump_mass[net.circ_pump_mass.in_service])
assert len(collections["circ_pump_mass"][1].get_paths()) == \
4 * len(net.circ_pump_mass[net.circ_pump_mass.in_service])
assert len(collections["flow_control"]) == 2
assert isinstance(collections["flow_control"][0], PatchCollection)
assert isinstance(collections["flow_control"][1], LineCollection)
assert len(collections["flow_control"][0].get_paths()) == \
3 * len(net.flow_control[net.flow_control.in_service])
assert len(collections["flow_control"][1].get_paths()) == \
2 * len(net.flow_control[net.flow_control.in_service])<|fim_middle|>test_simple_collections_out_of_service<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path: path_type):
# Extract OSS configuration from the encoded URL.
str_path = stringify_path(path)
parse_result = oss2.urlparse(str_path)
if parse_result.scheme != "oss":
raise ValueError(
f"Except scheme oss, but got scheme: {parse_result.scheme}"
f" in path: {str_path}"
)
bucket = parse_result.hostname
if not (parse_result.username and parse_result.password):
raise RuntimeError(r"Please use build_oss_path to add OSS info")
param_dict = url_to_dict(parse_result.username)
access_key_id = param_dict["access_key_id"]
access_key_secret = parse_result.password
end_point = param_dict["end_point"]
key = parse_result.path
key = key[1:] if key.startswith("/") else key
return bucket, key, access_key_id, access_key_secret, end_point<|fim_middle|>parse_osspath<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Test basic modules start and stop
"""
self.render_config_template(
reload=True,
reload_path=self.working_dir + "/configs/*.yml",
reload_type="modules",
inputs=False,
)
proc = self.start_beat()
os.mkdir(self.working_dir + "/logs/")
logfile = self.working_dir + "/logs/test.log"
os.mkdir(self.working_dir + "/configs/")
with open(self.working_dir + "/configs/system.yml.test", 'w') as f:
f.write(moduleConfigTemplate.format(self.working_dir + "/logs/*"))
os.rename(self.working_dir + "/configs/system.yml.test",
self.working_dir + "/configs/system.yml")
with open(logfile, 'w') as f:
f.write("Hello world\n")
self.wait_until(lambda: self.output_lines() == 1, max_timeout=10)
print(self.output_lines())
# Remove input
with open(self.working_dir + "/configs/system.yml", 'w') as f:
f.write("")
# Wait until input is stopped
self.wait_until(
lambda: self.log_contains("Stopping runner:"),
max_timeout=15)
with open(logfile, 'a') as f:
f.write("Hello world\n")
# Wait to give a change to pick up the new line (it shouldn't)
time.sleep(1)
self.wait_until(lambda: self.output_lines() == 1, max_timeout=5)
proc.check_kill_and_wait()<|fim_middle|>test_start_stop<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, on):
pass
# No joystick used<|fim_middle|>joystick_on_off<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(applications_stream):
response = create_response({"link": f'<https://harvest.greenhouse.io/v1/applications?per_page={100}&since_id=123456789>; rel="next"'})
next_page_token = applications_stream.retriever._next_page_token(response=response)
request_params = applications_stream.retriever._request_params(next_page_token=next_page_token, stream_state={})
path = applications_stream.retriever._paginator_path()
assert "applications?per_page=100&since_id=123456789" == path
assert request_params == {"per_page": 100}<|fim_middle|>test_request_params_next_page_token_is<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
p = Plot(["a", "b", "c"], [1, 0, 2]).add(Bar()).plot()
ax = p._figure.axes[0]
assert len(ax.patches) == 2<|fim_middle|>test_zero_height_skipped<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
pins = pinutils.scan_pin_file([], 'stm32f401.csv', 5, 8, 9)
pins = pinutils.scan_pin_af_file(pins, 'stm32f401_af.csv', 0, 1)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])<|fim_middle|>get_pins<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
addr = account.Address('UŠer', 'example.com', case_sensitive=True)
self.assertNotEqual(addr, 'ušer@example.com')<|fim_middle|>test_eq_unicode_case_sensitive<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(x):
return Xdot(x) - mdot(x)<|fim_middle|>matvec<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, notebook):
imageList = wx.ImageList(24, 24)
image = wx.Image(PPRZ_HOME + "/data/pictures/gray_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
image = wx.Image(PPRZ_HOME + "/data/pictures/green_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
notebook.AssignImageList(imageList)<|fim_middle|>setup_image_list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(payload: WildValue) -> str:
return RADARR_MESSAGE_TEMPLATE_MOVIE_IMPORTED.format(
movie_title=payload["movie"]["title"].tame(check_string)
)<|fim_middle|>get_body_for_movie_imported_event<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, meta, form_id, status):
self.stdout.write(
f'{meta.domain},{meta.case_id},{meta.referenced_id},{meta.index_identifier},{form_id},{status}'
)<|fim_middle|>write_output<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(h, f):
if h.startswith(b'\0\0'):
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8<|fim_middle|>test_sndr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(box_dict):
return np.array([
[
box_dict['Lx'], box_dict['Ly'] * box_dict['xy'],
box_dict['Lz'] * box_dict['xz']
],
[0, box_dict['Ly'], box_dict['Lz'] * box_dict['yz']],
[0, 0, box_dict['Lz']],
])<|fim_middle|>expected_matrix<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
cls,
type: Literal["atlas", "modes"],
name: str,
image_path: Path | str,
labels_path: Path | str,
) -> Self:
image_path = Path(image_path)
labels_frame = read_spreadsheet(labels_path)
labels: dict[int, str] = dict()
for label_tuple in labels_frame.itertuples(index=False):
# First columnn is the index, second is the name.
labels[int(label_tuple[0])] = format_like_bids(str(label_tuple[1]))
image = nib.loadsave.load(image_path)
return cls(type, name, image, labels)<|fim_middle|>from_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""This write a function of object Chip"""
if "external" in self:
return ""
code = self.symtab.codeFormatter()
# Generate function header
void_type = self.symtab.find("void", Type)
return_type = self.return_type.c_ident
if "return_by_ref" in self and self.return_type != void_type:
return_type += "&"
if "return_by_pointer" in self and self.return_type != void_type:
return_type += "*"
params = ", ".join(self.param_strings)
code(
"""<|fim_middle|>generate_code<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(api_name: str):
if not api_name.isidentifier():
raise InvalidArgument(
"Invalid API name: '{}', a valid identifier may only contain letters,"
" numbers, underscores and not starting with a number.".format(api_name)
)
if api_name in RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API name: '{}' is reserved for infra endpoints".format(
api_name
)
)<|fim_middle|>validate_name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Close training areas map - be quiet"""
verbosity = G_verbose()
G_set_verbose(0)
DisplayDriver.METHOD_NAME(self)
G_set_verbose(verbosity)<|fim_middle|>close_map<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_na_n<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, model_id, locally, output):
if locally:
is_loaded, model_path = ModelDeploy(self.ctx, self.project).verify_local_model(model_id)
if not is_loaded:
raise AugerException('Model should be deployed locally.')
return ModelReview({'model_path': model_path}).METHOD_NAME(
data_path=self.ctx.config.get("source"), output=output)
else:
raise Exception("Not Implemented.")<|fim_middle|>build_review_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(seq, refmodels):
refmodels.Reference.objects.all().delete()
project1 = factories.ProjectFactory.create()
seqname1 = refmodels.make_sequence_name(project1)
project2 = factories.ProjectFactory.create()
seqname2 = refmodels.make_sequence_name(project2)
seq.alter(seqname1, 100)
seq.alter(seqname2, 200)
issue = factories.IssueFactory.create(project=project1)
assert issue.ref == 101
issue.subject = "other"
issue.save()
assert issue.ref == 101
issue.project = project2
issue.save()
assert issue.ref == 201<|fim_middle|>test_regenerate_issue_reference_on_project_change<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Prints the daemon status."""
if self.is_running():
self.echo("Daemon is running")
else:
self.echo("Daemon is not running")<|fim_middle|>status<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(test_case, shape, dtype, device):
np_input = np.random.randint(3, size=shape)
np_other = np.random.randint(3, size=shape)
input = flow.tensor(np_input, dtype=dtype, device=flow.device(device))
other = flow.tensor(np_other, dtype=dtype, device=flow.device(device))
of_out = flow.logical_and(input, other)
np_out = np.logical_and(np_input, np_other)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
x = torch.ones(3).byte()
y = torch.ones(3).byte()
z = (x & ~y).bool()
test_case.assertTrue(np.array_equal(z.numpy(), [False, False, False]))<|fim_middle|>test_logical_and<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return jsonify_data(settings_box=self.render_settings_box(),
right_header=render_event_management_header_right(self.event))<|fim_middle|>jsonify_success<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(url) -> str:
filename = list(filter(lambda x: x!='', url.split('/')))[-1]
download_path = f"{TEMPDIR}/{filename}"
with open(download_path, 'wb') as f:
f.write(requests.get(url).content)
return download_path<|fim_middle|>download<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, input: Tensor, target: Tensor) -> Tensor:
fake_img, latents = self.generator(input, return_latents=True)
path_loss, self.mean_path_length, path_lengths = self.g_path_regularize(
fake_img, latents, self.mean_path_length)
return path_loss<|fim_middle|>train_forward_g_path<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dmId):
nameFilePath = os.path.join(getSysfsPath(dmId), "dm", "name")
with open(nameFilePath, "r") as f:
return f.readline().rstrip("\n")<|fim_middle|>get_dev_name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(buffer_connection):
from moler.cmd.unix import iperf
buffer_connection.remote_inject_response([iperf.COMMAND_OUTPUT_basic_server])
iperf_cmd = iperf.Iperf(connection=buffer_connection.moler_connection,
**iperf.COMMAND_KWARGS_basic_server)
assert iperf_cmd() == iperf.COMMAND_RESULT_basic_server<|fim_middle|>test_iperf_correctly_parses_basic_udp_server<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path, mode):
parts = str(path).split(".")
assert len(parts) > 1, "Extension needed to figure out serialization format"
if len(parts) == 2:
db_format = parts[-1]
compression = None
else:
db_format = parts[-2]
compression = parts[-1]
assert compression is None or compression in COMPRESSION_FORMATS
assert db_format in SERIALIZATION_FORMATS
store_constructor = SERIALIZATION_FORMATS[db_format]
if compression == "gz":
with gzip.GzipFile(path, mode) as f:
yield store_constructor(f)
elif compression == "zstd":
if "w" in mode or "a" in mode:
cctx = zstandard.ZstdCompressor()
with open(path, mode) as f:
with cctx.stream_writer(f) as writer:
yield store_constructor(writer)
else:
dctx = zstandard.ZstdDecompressor()
with open(path, mode) as f:
with dctx.stream_reader(f) as reader:
yield store_constructor(reader)
else:
with open(path, mode) as f:
yield store_constructor(f)<|fim_middle|>db_open<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(d):
file_list = []
for dirname, dirs, names in os.walk(d):
file_list.append((dirname, filter(lambda x, d=dirname: is_file_or_link(d, x),
names)))
return file_list<|fim_middle|>list_files_recursive<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(text):
return text_attribute(text, 'cyan')<|fim_middle|>cyan<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Path:
"""Path to PUDL output directory."""
return Path(self.pudl_output)<|fim_middle|>output_dir<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
tcl = self.interp
filename = "testEvalFile.tcl"
fd = open(filename,'w')
script = """set a 1
set b 2
set c [ expr $a + $b ]
"""
fd.write(script)
fd.close()
tcl.evalfile(filename)
os.remove(filename)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')<|fim_middle|>test_eval_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args, model, device, train_loader, optimizer, epoch):
model.METHOD_NAME()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
if args.dry_run:
break<|fim_middle|>train<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.request('/store-api/country', name='countries')<|fim_middle|>countries<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>( filepath, fileposition ):
with open( filepath, encoding = 'utf8' ) as f:
if fileposition:
f.seek( fileposition )
return f.read(), f.tell()<|fim_middle|>read_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, js_name):
with open(os.path.join(TEST_DIR, js_name)) as f:
js = f.read()
# WebKit.run_javascript() needs some serializable return value
js += '\ntrue\n'
result = None
def on_done(_group, name, value):
# cockpit-client resets the value to '' initially, to guarantee that a Changed signal happens
# even when two consecutive run-js calls have the same result
v = value.get_string()
if v == '':
return
nonlocal result
result = v
def on_timeout():
nonlocal result
result = "timed out waiting for JavaScript result"
handler = self.win_actions.connect('action-state-changed::run-js', on_done)
self.win_actions.activate_action("run-js", GLib.Variant.new_string(self.testlib + js))
main = GLib.MainContext.default()
GLib.timeout_add_seconds(JS_TIMEOUT, on_timeout)
while not result:
main.iteration(may_block=True)
self.win_actions.disconnect(handler)
return result<|fim_middle|>run_js<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
'''
Stops (Unregisters) the client.
'''
self.sub.unregister()<|fim_middle|>stop<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(pipeline_response):
deserialized = self._deserialize("OperationsList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)<|fim_middle|>extract_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
data_util = datetime(2023, 4, 17)
self.assertTrue(self.calendar.data_eh_dia_util_bancario(data_util))
data_nao_util = datetime(2023, 4, 15)
self.assertFalse(self.calendar.data_eh_dia_util_bancario(data_nao_util))
data_nao_util = datetime(2023, 4, 16)
self.assertFalse(self.calendar.data_eh_dia_util_bancario(data_nao_util))
data_feriado = datetime(2023, 4, 21)
self.assertTrue(self.calendar.data_eh_dia_util_bancario(data_feriado))<|fim_middle|>test_data_eh_dia_util_bancario<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, node):
return (node.y - self.min_y) * self.x_width + (node.x - self.min_x)<|fim_middle|>calc_index<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, library: "ElementLibrary"):
library_name = library.get_name()
if self.__module is None:
_TaipyLogger._get_logger().info(
f"Python API for extension library '{library_name}' will not be available. To fix this, import 'taipy.gui.builder' before importing the extension library."
)
return
library_module = getattr(self.__module, library_name, None)
if library_module is None:
library_module = types.ModuleType(library_name)
setattr(self.__module, library_name, library_module)
for element_name in library.get_elements().keys():
setattr(
library_module,
element_name,
_ElementApiGenerator().createControlElement(element_name, f"{library_name}.{element_name}"),
)<|fim_middle|>add_library<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(filters, kernel_size, name=None):
return Sequential([
ReLU(),
SeparableConv2D(filters, kernel_size, padding='same'),
BatchNormalization(trainable=True),
], name)<|fim_middle|>build_sep_conv<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, point, index=np.s_[:], p=None, scaled=True):
smlphi = self.smspace.METHOD_NAME(point, index=index, p=p)
ldof = self.number_of_local_dofs(p=p, doftype='cell')
shape = point.shape[:-1] + (ldof, 2)
lphi = np.zeros(shape, dtype=self.ftype)
lphi[..., :ldof//2, 0] = smlphi
lphi[..., -ldof//2:, 1] = smlphi
return lphi<|fim_middle|>laplace_basis<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
error = APIException({
'non_field_errors': ['Error message.']
})
response = exception_handler(error, None)
assert response.data == {
'code': 'error',
'message': 'Error message.',
}<|fim_middle|>test_validation_error_field_single<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(working_dir, bundler_dir, thresh, new_bundles):
if path_exists(join(working_dir, '.git')):
last_bundle = None
last_bundlename = newest_bundle_filename(bundler_dir)
# "git gc" before bundling is *very* important, since
# otherwise the bundles mushroom for no good reason.
os.chdir(working_dir)
cmd(['git', 'commit', '-a', '-m', 'Salvus save'], ignore_errors=True)
cmd(['git', 'gc'])
# Try making a new bundle first
cmd([diffbundler, 'create', working_dir, bundler_dir], dry_run=False)
new_bundlename = newest_bundle_filename(bundler_dir)
if new_bundlename != last_bundlename:
# There were changes to the repo that we had not already bundled up.
# First, check if we should merge the last two bundles.
if thresh > 0 and last_bundlename is not None and getsize(last_bundlename)/1000000.0 < thresh:
os.unlink(last_bundlename)
os.unlink(new_bundlename)
cmd([diffbundler, 'create', working_dir, bundler_dir], dry_run=False)
new_bundles.append(last_bundlename)
else:
new_bundles.append(new_bundlename)
for path in listdir(working_dir):
p = join(working_dir, path)
if isdir(p) and not islink(p):
METHOD_NAME(p, join(bundler_dir, path), thresh, new_bundles)<|fim_middle|>create_multidiffbundle<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
import os
import subprocess
# Get the repository directory
repo_dir = os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..")))
# Attempt to get the configured username from the local Git
try:
result = subprocess.run(["git", "config", "user.username"], stdout=subprocess.PIPE, cwd=repo_dir)
result.check_returncode() # Check if the command was executed successfully
username = result.stdout.decode().rstrip()
return username
except subprocess.CalledProcessError as ex:
# Handle errors if the git config command fails
print(f"Error fetching Git username: {ex}")
return None<|fim_middle|>git_username_detect<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
lx = guess_lexer(get_input('easytrieve', 'example.ezt'))
assert lx.__class__.__name__ == 'EasytrieveLexer'
lx = guess_lexer(get_input('easytrieve', 'example.mac'))
assert lx.__class__.__name__ == 'EasytrieveLexer'<|fim_middle|>test_guess_lexer_easytrieve<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(data):
if not isinstance(data, bytes) and hasattr(data, 'encode'):
data = data.encode('utf-8')
# Don't bail out with an exception if data is None
return data if data is not None else b''<|fim_middle|>coerce_to_bytes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
import torch.nn as nn
class Net(ModelSpace):
def __init__(self):
super().__init__()
self.repeat = Repeat(
lambda index: LayerChoice([nn.Identity(), nn.Identity()], label=f'layer{index}'),
(3, 5), label='rep')
def forward(self, x):
return self.module(x)
net = Net()
assert net.contains({'rep': 3, 'layer0': 0, 'layer1': 0, 'layer2': 0})
assert not net.contains({'rep': 4, 'layer0': 0, 'layer1': 0, 'layer2': 0})
assert net.contains({'rep': 3, 'layer0': 0, 'layer1': 0, 'layer2': 0, 'layer3': 0})<|fim_middle|>test_repeat_contains<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(backend):
return getattr(getattr(backend, "AUTH_BACKEND", backend), "name", None)<|fim_middle|>get_backend_name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(module, min_num_params: Optional[int] = None, **kwargs):
"""
Helper to wrap layers/modules in FSDP. This falls back to a no-op if
fairscale is not available.
Args:
module (nn.Module): module to (maybe) wrap
min_num_params (int, Optional): minimum number of layer params to wrap
"""
try:
from fairscale.nn import wrap
if min_num_params is not None:
num_params = sum(p.numel() for p in module.parameters())
if num_params >= min_num_params:
return wrap(module, **kwargs)
else:
return module
else:
return wrap(module, **kwargs)
except ImportError:
return module<|fim_middle|>fsdp_wrap<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""The PUSH socket for use in the zmq message destructor callback."""
if getattr(self, "_stay_down", False):
raise RuntimeError("zmq gc socket requested during shutdown")
if not self.is_alive() or self._push is None:
self._push = self.context.socket(zmq.PUSH)
self._push.connect(self.url)
return self._push<|fim_middle|>push_socket<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>create_dataset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
workspace_process_context: WorkspaceProcessContext,
query: str,
variables: Optional[Mapping[str, object]] = None,
):
check.inst_param(
workspace_process_context, "workspace_process_context", WorkspaceProcessContext
)
check.str_param(query, "query")
check.opt_mapping_param(variables, "variables")
query = query.strip("'\" \n\t")
context = workspace_process_context.create_request_context()
result = create_schema().execute(
query,
context_value=context,
variable_values=variables,
)
result_dict = result.formatted
# Here we detect if this is in fact an error response
# If so, we iterate over the result_dict and the original result
# which contains a GraphQLError. If that GraphQL error contains
# an original_error property (which is the exception the resolver
# has thrown, typically) we serialize the stack trace of that exception
# in the 'stack_trace' property of each error to ease debugging
if "errors" in result_dict:
result_dict_errors = check.list_elem(result_dict, "errors", of_type=Exception)
result_errors = check.is_list(result.errors, of_type=Exception)
check.invariant(len(result_dict_errors) == len(result_errors)) #
for python_error, error_dict in zip(result_errors, result_dict_errors):
if hasattr(python_error, "original_error") and python_error.original_error:
error_dict["stack_trace"] = get_stack_trace_array(python_error.original_error)
return result_dict<|fim_middle|>execute_query<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Install procedure for Perl modules: using either Makefile.Pl or Build.PL."""
prefix_opt = self.cfg.get('prefix_opt')
# Perl modules have two possible installation procedures: using Makefile.PL and Build.PL
# configure, build, test, install
if os.path.exists('Makefile.PL'):
if prefix_opt is None:
prefix_opt = 'PREFIX'
install_cmd = ' '.join([
self.cfg['preconfigopts'],
'perl',
'Makefile.PL',
'%s=%s' % (prefix_opt, self.installdir),
self.cfg['configopts'],
])
run_cmd(install_cmd)
ConfigureMake.build_step(self)
ConfigureMake.test_step(self)
ConfigureMake.install_step(self)
elif os.path.exists('Build.PL'):
if prefix_opt is None:
prefix_opt = '--prefix'
install_cmd = ' '.join([
self.cfg['preconfigopts'],
'perl',
'Build.PL',
prefix_opt,
self.installdir,
self.cfg['configopts'],
])
run_cmd(install_cmd)
run_cmd("%s perl Build build %s" % (self.cfg['prebuildopts'], self.cfg['buildopts']))
if self.cfg['runtest']:
run_cmd('perl Build %s' % self.cfg['runtest'])
run_cmd('%s perl Build install %s' % (self.cfg['preinstallopts'], self.cfg['installopts']))<|fim_middle|>install_perl_module<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
client, cluster, aws_nodes = create_and_validate_import_cluster()
cluster_cleanup(client, cluster, aws_nodes)<|fim_middle|>test_import_rke_cluster<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(fn):
listen(target, identifier, fn, *args, **kw)
return fn<|fim_middle|>decorate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.copy("LICENSE_1_0.txt", dst="licenses", src=self._source_subfolder)
if self.options.c_api:
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
else:
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
self.copy("*.hpp", dst="include", src=os.path.join(self._source_subfolder, "include"))<|fim_middle|>package<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(LMS: ArrayLike) -> NDArrayFloat:
"""
Callable applying the forward non-linearity to the :math:`LMS`
colourspace array.
"""
with domain_range_scale("ignore"):
return eotf_inverse_ST2084(LMS)<|fim_middle|>lm_s_to_lm_s_p_callable<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_basics<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, obj, attrs, user, *args, **kwargs):
raw_data = {option.key: option.value for option in attrs}
data = {}
for key, uo in USER_OPTION_SETTINGS.items():
val = raw_data.get(uo["key"], uo["default"])
if uo["type"] == bool:
data[key.value] = bool(int(val)) # '1' is true, '0' is false
elif uo["type"] == int:
data[key.value] = int(val)
data["weeklyReports"] = True # This cannot be overridden
return data<|fim_middle|>serialize<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(org_user):
"""Everybody is considered to have viewer perms."""
return org_user.role_level >= ROLE_VIEWER<|fim_middle|>requires_viewer<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
export_conandata_patches(self)<|fim_middle|>export_sources<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
constraint = gpytorch.constraints.GreaterThan([1.0, 2.0])
v = torch.tensor([-3.0, -2.0])
value = constraint.transform(v)
actual_value = v.clone()
actual_value[0] = softplus(v[0]) + 1.0
actual_value[1] = softplus(v[1]) + 2.0
self.assertAllClose(value, actual_value)<|fim_middle|>test_transform_tensor_greater_than<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(data):
"""For a 44-year-old, the api should
always return an age, a full retirement age
and a value for benefits at age 70
"""
if (
data["current_age"] == 44
and data["data"]["full retirement age"] == "67"
and data["data"]["benefits"]["age 70"]
):
return "OK"
else:
return "BAD DATA"<|fim_middle|>check_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(parent, dest):
dest["author_detail"] = detail = FeedParserDict()
if "name" in parent:
dest["author"] = detail["name"] = parent["name"]
if "url" in parent:
if parent["url"].startswith("mailto:"):
detail["email"] = parent["url"][7:]
else:
detail["href"] = parent["url"]<|fim_middle|>parse_author<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(list1, list2):
"""
Examples:
>>> issubset([], [65, 66, 67])
True
>>> issubset([65], [65, 66, 67])
True
>>> issubset([65, 66], [65, 66, 67])
True
>>> issubset([65, 67], [65, 66, 67])
False
"""
n = len(list1)
for startpos in range(len(list2) - n + 1):
if list2[startpos:startpos+n] == list1:
return True
return False<|fim_middle|>issubset<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>() -> None:
@get("/")
def handler() -> None:
return None
config = RateLimitConfig(rate_limit=("second", 1))
cache_key = "RateLimitMiddleware::testclient"
app = Litestar(route_handlers=[handler], middleware=[config.middleware])
store = app.stores.get("rate_limit")
with TestClient(app=app) as client:
response = client.get("/")
assert response.status_code == HTTP_200_OK
cached_value = await store.get(cache_key)
assert cached_value
cache_object = CacheObject(**decode_json(value=cached_value))
assert cache_object.reset == int(time() + 1)
cache_object.reset -= 2
await store.set(cache_key, encode_json(cache_object))
response = client.get("/")
assert response.status_code == HTTP_200_OK<|fim_middle|>test_reset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
o = Organizer.objects.create(name='Dummy', slug='dummy')
with scope(organizer=o):
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), live=True
)
o1 = Order.objects.create(
code='FOOBAR', event=event, email='dummy@dummy.test',
status=Order.STATUS_PENDING,
datetime=now(), expires=now() + timedelta(days=10),
total=Decimal('13.37'),
)
shirt = Item.objects.create(event=event, name='T-Shirt', default_price=12)
shirt_red = ItemVariation.objects.create(item=shirt, default_price=14, value="Red")
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name_parts={}, secret='1234'
)
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name_parts={}, secret='5678'
)
yield event, o1, shirt<|fim_middle|>env<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
expected = [
("can't assign to keyword", "None", 80, self.FatalError),
]
actual = self.compile_file("None = 42")
self.assertEqual(expected, actual)<|fim_middle|>test_assignment_to_none<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, request):
args = request.args
chdict = {
"revision": args.get(b'revision'),
"repository": args.get(b'_repository') or '',
"project": args.get(b'project') or '',
"codebase": args.get(b'codebase')
}
return ([chdict], None)<|fim_middle|>get_changes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(region):
"""
Return the region argument.
"""
return " --region {r}".format(r=region)<|fim_middle|>region<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(
src: StrOrBytesPath,
dst: StrOrBytesPath,
*,
src_dir_fd: int | None = None,
dst_dir_fd: int | None = None,
loop: AbstractEventLoop | None = ...,
executor: Any = ...,
) -> None: ...<|fim_middle|>replace<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(project_ids, state, credentials):
"""Returns the summary of recommendations on all the given projects.
Args:
project_ids: List(str) project to which recommendation is needed.
state: state of recommendations
credentials: client credentials.
"""
recommender = build("recommender",
"v1",
credentials=credentials,
cache_discovery=False)
def get_metric(project_id):
recommendation_metric = common.get_recommendations(
project_id,
recommender=recommender,
state=state,
credentials=credentials)
return accounts_can_made_safe(project_id, state, recommendation_metric)
recommendation_stats = common.rate_limit_execution(get_metric, RATE_LIMIT,
project_ids)
recommendation_stats_sorted = sorted(
recommendation_stats, key=lambda metric: -sum(metric["stats"].values()))
return recommendation_stats_sorted<|fim_middle|>get_recommendation_summary_of_projects<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, application_paused):
self.application_paused = application_paused<|fim_middle|>application_paused_callback<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(queue: str, chunk_info):
"""Manually queue chunk when a job is stuck for whatever reason."""
from .cluster import _create_atomic_chunk
from .cluster import create_parent_chunk
from .utils import chunk_id_str
redis = get_redis_connection()
imanager = IngestionManager.from_pickle(redis.get(r_keys.INGESTION_MANAGER))
layer = chunk_info[0]
coords = chunk_info[1:]
queue = imanager.get_task_queue(queue)
if layer == 2:
func = _create_atomic_chunk
args = (coords,)
else:
func = create_parent_chunk
args = (layer, coords)
queue.enqueue(
func,
job_id=chunk_id_str(layer, coords),
job_timeout=f"{int(layer * layer)}m",
result_ttl=0,
args=args,
)<|fim_middle|>ingest_chunk<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(value_str):
try:
value = int(value_str)
if value > 0: return value
except ValueError:
pass
raise argparse.ArgumentTypeError('must be a positive integer (got {!r})'.format(value_str))<|fim_middle|>positive_int_arg<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, idx: int, key: str) -> Optional[str]:
return self._request_processor[idx].get_request_property(key)<|fim_middle|>get_request_header<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, inInputFilter):<|fim_middle|>create_para_view_filter<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, url):<|fim_middle|>open<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> dict | None:
"""
Return structured config for patch_panel
"""
if not self.shared_utils.network_services_l1:
return None
patches = []
for tenant in self._filtered_tenants:
if "point_to_point_services" not in tenant:
continue
for point_to_point_service in natural_sort(tenant["point_to_point_services"], "name"):
if subifs := point_to_point_service.get("subinterfaces", []):
subifs = [subif for subif in subifs if subif.get("number") is not None]
for endpoint in point_to_point_service.get("endpoints", []):
if self.shared_utils.hostname not in endpoint.get("nodes", []):
continue
node_index = list(endpoint["nodes"]).index(self.shared_utils.hostname)
interface = endpoint["interfaces"][node_index]
if get(endpoint, "port_channel.mode") in ["active", "on"]:
channel_group_id = "".join(re.findall(r"\d", interface))
interface = f"Port-Channel{channel_group_id}"
if subifs:
for subif in subifs:
patch = {
"name": f"{point_to_point_service['name']}_{subif['number']}",
"enabled": True,
"connectors": [
{
"id": 1,
"type": "interface",
"endpoint": f"{interface}.{subif['number']}",
},
],
}
if point_to_point_service.get("type") == "vpws-pseudowire":
patch["connectors"].append(
{
"id": 2,
"type": "pseudowire",
"endpoint": f"bgp vpws {tenant['name']} pseudowire {point_to_point_service['name']}_{subif['number']}",
}
)
append_if_not_duplicate(
list_of_dicts=patches,
primary_key="name",
new_dict=patch,
context="Patches defined under point_to_point_services",
context_keys=["name"],
)
else:
patch = {
"name": f"{point_to_point_service['name']}",
"enabled": True,
"connectors": [
{
"id": 1,
"type": "interface",
"endpoint": f"{interface}",
}
],
}
if point_to_point_service.get("type") == "vpws-pseudowire":
patch["connectors"].append(
{
"id": 2,
"type": "pseudowire",
"endpoint": f"bgp vpws {tenant['name']} pseudowire {point_to_point_service['name']}",
}
)
append_if_not_duplicate(
list_of_dicts=patches,
primary_key="name",
new_dict=patch,
context="Patches defined under point_to_point_services",
context_keys=["name"],
)
if patches:
return {"patches": patches}
return None<|fim_middle|>patch_panel<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> None:
default_processes = max(1, cpu_count())
parser = argparse.ArgumentParser(description=__doc__, parents=[DB().argparser])
if db.nmap is None:
fltnmap = None
else:
fltnmap = db.nmap.flt_empty
if db.passive is None:
fltpass = None
else:
fltpass = db.passive.flt_empty
_from: List[Generator[Record, None, None]] = []
parser.add_argument(
"--view-category",
metavar="CATEGORY",
help="Choose a different category than the default",
)
parser.add_argument(
"--test",
"-t",
action="store_true",
help="Give results in standard output instead of "
"inserting them in database.",
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="For test output, print out formatted results.",
)
parser.add_argument(
"--no-merge",
action="store_true",
help="Do **not** merge with existing results for same host and source.",
)
parser.add_argument(
"--to-db",
metavar="DB_URL",
help="Store data to the provided URL instead of the default DB for view.",
)
parser.add_argument(
"--processes",
metavar="COUNT",
type=int,
help=f"The number of processes to use to build the records. Default on this system is {default_processes}.",
default=default_processes,
)
subparsers = parser.add_subparsers(
dest="view_source",
help=("Accepted values are 'nmap' and 'passive'. None or 'all' will do both"),
)
if db.nmap is not None:
subparsers.add_parser("nmap", parents=[db.nmap.argparser])
if db.passive is not None:
subparsers.add_parser("passive", parents=[db.passive.argparser])
subparsers.add_parser("all")
args = parser.parse_args()
view_category = args.view_category
if not args.view_source:
args.view_source = "all"
if args.view_source == "all":
_from = []
if db.nmap is not None:
fltnmap = db.nmap.parse_args(args, flt=fltnmap)
_from.append(nmap_to_view(fltnmap, category=view_category))
if db.passive is not None:
fltpass = db.passive.parse_args(args, flt=fltpass)
_from.append(passive_to_view(fltpass, category=view_category))
elif args.view_source == "nmap":
if db.nmap is None:
parser.error('Cannot use "nmap" (no Nmap database exists)')
fltnmap = db.nmap.parse_args(args, fltnmap)
_from = [nmap_to_view(fltnmap, category=view_category)]
elif args.view_source == "passive":
if db.passive is None:
parser.error('Cannot use "passive" (no Passive database exists)')
fltpass = db.passive.parse_args(args, fltpass)
_from = [passive_to_view(fltpass, category=view_category)]
if args.test:
args.processes = 1
outdb = db.view if args.to_db is None else DBView.from_url(args.to_db)
# Output results
if args.processes > 1:
nprocs = max(args.processes - 1, 1)
with Pool(
nprocs,
initializer=worker_initializer,
initargs=(args.to_db, args.no_merge),
) as pool:
for _ in pool.imap(merge_and_output, to_view_parallel(_from)):
pass
for _ in pool.imap(worker_destroyer, [None] * nprocs):
pass
else:
if args.test:
def output(host: Record) -> None:
return displayfunction_json([host], outdb)
elif args.no_merge:
output = outdb.store_host
else:
output = outdb.store_or_merge_host
try:
datadb = outdb.globaldb.data
except AttributeError:
datadb = None
outdb.start_store_hosts()
for record in to_view(_from, datadb):
output(record)
outdb.stop_store_hosts()<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Verify G2 instruction construction"""
self.assertEqual(str(MNVR("G2X2Y2I1")), "G2{'I': 1.0, 'X': 2.0, 'Y': 2.0}")
self.assertEqual(len(MNVR("G2X2Y2I1").instr), 1)
self.assertEqual(type(MNVR("G2X2Y2I1").instr[0]), PathLanguage.MoveArcCW)<|fim_middle|>test20<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> bool:
"""
Returns: True if channels is closed, False otherwise.
"""
return self.get() == "close"<|fim_middle|>is_closed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, path=None):
return self.manager.list_prod("list_output", path)<|fim_middle|>list_output<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
self._queue = deque(self._items)<|fim_middle|>reset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, labels: Dict[str, str]) -> Metadata:
""" Get manifest from image labels.
Args:
labels: key, value string pairs
Returns:
Metadata
Raises:
MetadataError
"""
metadata_dict = translate_plain_to_tree(labels)
try:
sonic_metadata = metadata_dict['com']['azure']['sonic']
except KeyError:
raise MetadataError('No metadata found in image labels')
try:
manifest_string = sonic_metadata['manifest']
except KeyError:
raise MetadataError('No manifest found in image labels')
try:
manifest_dict = json.loads(manifest_string)
except (ValueError, TypeError) as err:
raise MetadataError(f'Failed to parse manifest JSON: {err}')
components = {}
if 'versions' in sonic_metadata:
for component, version in sonic_metadata['versions'].items():
try:
components[component] = Version.parse(version)
except ValueError as err:
raise MetadataError(f'Failed to parse component version: {err}')
labels_yang_modules = sonic_metadata.get('yang-module')
yang_modules = []
if isinstance(labels_yang_modules, str):
yang_modules.append(labels_yang_modules)
log.debug("Found one YANG module")
elif isinstance(labels_yang_modules, dict):
yang_modules.extend(labels_yang_modules.values())
log.debug(f"Found YANG modules: {labels_yang_modules.keys()}")
else:
log.debug("No YANG modules found")
return Metadata(Manifest.marshal(manifest_dict), components, yang_modules)<|fim_middle|>from_labels<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, num):
pages = num // self.per_page
self.page_start += pages
return pages * self.per_page<|fim_middle|>skip<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request): # pragma: no cover
"""This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
"""
class AwaitablePayload(object):
def __init__(self, payload):
self.payload = payload or b''
async def read(self, length=None):
if length is None:
r = self.payload
self.payload = b''
else:
r = self.payload[:length]
self.payload = self.payload[length:]
return r
uri_parts = urlsplit(request.url)
environ = {
'wsgi.input': AwaitablePayload(request.body),
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'sanic',
'REQUEST_METHOD': request.method,
'QUERY_STRING': uri_parts.query or '',
'RAW_URI': request.url,
'SERVER_PROTOCOL': 'HTTP/' + request.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'sanic',
'SERVER_PORT': '0',
'sanic.request': request
}
for hdr_name, hdr_value in request.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ<|fim_middle|>translate_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
query = {"operations": {"actions": [
"create_message",
"add_qnode(name=glucose, key=n0)",
"add_qnode(name=diabetes, key=n1)",
"connect(action=connect_nodes, max_path_length=3, shortest_path=false)",
"resultify()",
"filter_results(action=limit_number_of_results, max_results=30)",
"return(message=true, store=true)"
]}}
[response, message] = _do_arax_query(query)
assert response.status == 'OK'
assert len(message.query_graph.edges) >= 3
assert len(message.results) > 0<|fim_middle|>test_1881<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
labeled_data,
annotations_output_dir,
images_output_dir,
label_format="WKT",
):
"""Convert Labelbox JSON export to Pascal VOC format.
Args:
labeled_data (str): File path to Labelbox JSON export of label data.
annotations_output_dir (str): File path of directory to write Pascal VOC
annotation files.
images_output_dir (str): File path of directory to write images.
label_format (str): Format of the labeled data.
Valid options are: "WKT" and "XY", default is "WKT".
Todo:
* Add functionality to allow use of local copy of an image instead of
downloading it each time.
"""
# make sure annotation output directory is valid
try:
annotations_output_dir = os.path.abspath(annotations_output_dir)
assert os.path.isdir(annotations_output_dir)
except AssertionError:
logging.exception("Annotation output directory does not exist")
return None
# read labelbox JSON output
with open(labeled_data) as f:
lines = f.readlines()
label_data = json.loads(lines[0])
for data in label_data:
try:
write_label(
data["ID"],
data["Labeled Data"],
data["Label"],
label_format,
images_output_dir,
annotations_output_dir,
)
except requests.exceptions.MissingSchema:
logging.exception(
'"Labeled Data" field must be a URL. '
"Support for local files coming soon",
)
continue
except requests.exceptions.ConnectionError:
logging.exception(
"Failed to fetch image from {}".format(data["Labeled Data"]),
)
continue<|fim_middle|>from_json<|file_separator|> |