text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(obj, event):
if IRelationBrokenEvent.providedBy(event):
# these trigger too much!
return
try:
tasks.file_edited.delay(obj)
except CannotGetPortalError:
pass<|fim_middle|>on_file_edit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, collection: "Collection[ITEM]", topological: bool = True
) -> None:
datastruct = self.deserialize_raw(collection.collection_types())
if topological and isinstance(datastruct, list): # type: ignore
datastruct.sort(key=lambda x: x.get("depth", 1))
try:
if isinstance(datastruct, dict):
# This is currently the corner case for the settings type.
collection.from_dict(datastruct) # type: ignore
elif isinstance(datastruct, list): # type: ignore
collection.from_list(datastruct)
except Exception as exc:
logger.error(
"Error while loading a collection: %s. Skipping collection %s!",
exc,
collection.collection_type(),
)<|fim_middle|>deserialize<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
txt = self.py3.command_output(["ip", "address", "show"]).splitlines()
data = {}
for line in txt:
iface = self.iface_re.match(line)
if iface:
cur_iface = iface.group("iface")
if not self.remove_empty:
data[cur_iface] = {}
continue
ip4 = self.ip_re.match(line)
if ip4:
data.setdefault(cur_iface, {}).setdefault("ip4", []).append(ip4.group("ip4"))
continue
ip6 = self.ip6_re.match(line)
if ip6:
data.setdefault(cur_iface, {}).setdefault("ip6", []).append(ip6.group("ip6"))
continue
return data<|fim_middle|>get_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, x, codewords, scale):
num_codes, channels = paddle.shape(codewords)
reshaped_scale = scale.reshape([1, 1, num_codes])
expanded_x = paddle.tile(x.unsqueeze(2), [1, 1, num_codes, 1])
reshaped_codewords = codewords.reshape([1, 1, num_codes, channels])
scaled_l2_norm = paddle.multiply(
reshaped_scale,
(expanded_x - reshaped_codewords).pow(2).sum(axis=3))
return scaled_l2_norm<|fim_middle|>scaled_l2<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self):
"""Test that the measurement for the source is returned."""
junit_url2 = "https://junit2"
self.sources["junit2"] = {"type": "junit", "parameters": {"url": junit_url2}}
response = await self.collect(get_request_text=self.JUNIT_XML)
self.assert_measurement(response, value="2", api_url=junit_url2, landing_url=junit_url2, source_index=1)<|fim_middle|>test_multiple_sources<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(session_backend_config: "BaseBackendConfig") -> None:
@post("/create-session")
def create_session_handler(request: Request) -> None:
request.set_session({"foo": "bar"})
@post("/empty-session")
def empty_session_handler(request: Request) -> None:
request.set_session(Empty)
with create_test_client(
route_handlers=[create_session_handler, empty_session_handler],
middleware=[session_backend_config.middleware],
session_config=session_backend_config,
) as client:
client.post("/create-session")
client.post("/empty-session")
assert not client.get_session_data()<|fim_middle|>test_set_empty<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
c = self.get_cache()
c["foo"] = ""
c["bar"] = ""
c["baz"] = ""
c.clear()
eq_(0, len(c))
assert "foo" not in c
assert "baz" not in c
assert "bar" not in c<|fim_middle|>test_clear<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, client_port):
class ClientThread(threading.Thread):
def __init__(self, client_port):
threading.Thread.__init__(self)
self.client_port = client_port
def run(self):
class HandleRequestInput:
def RequestInput(self):
client_thread.requested_input = True
return 'RequestInput: OK'
def NotifyFinished(self, *args, **kwargs):
client_thread.notified_finished += 1
return 1
handle_request_input = HandleRequestInput()
from _pydev_bundle import pydev_localhost
self.client_server = client_server = SimpleXMLRPCServer((pydev_localhost.get_localhost(), self.client_port), logRequests=False)
client_server.register_function(handle_request_input.RequestInput)
client_server.register_function(handle_request_input.NotifyFinished)
client_server.serve_forever()
def shutdown(self):
return
self.client_server.shutdown()
client_thread = ClientThread(client_port)
client_thread.requested_input = False
client_thread.notified_finished = 0
client_thread.daemon = True
client_thread.start()
return client_thread<|fim_middle|>start_client_thread<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Test all value errors."""
signal = pf.signals.impulse(10)
# signal and sampling rate are both None
with pytest.raises(ValueError, match="Either signal or sampling_rate"):
pfilt.low_shelve_cascade(None, 1e3, sampling_rate=None)
# signal is of wrong type
with pytest.raises(ValueError, match="signal must be a pyfar Signal"):
pfilt.low_shelve_cascade(1, 1e3, sampling_rate=None)
# frequency_type has wrong value
with pytest.raises(ValueError, match="frequency_type is 'mid'"):
pfilt.low_shelve_cascade(signal, 1e3, 'mid', 10, -5)
# lower characteristic frequency is 0 Hz
with pytest.raises(ValueError, match="The lower characteristic frequency"):
pfilt.low_shelve_cascade(signal, 0, 'lower', 10, None, 2)
# lower characteristic frequency exceeds Nyquist
with pytest.raises(ValueError, match="The lower characteristic frequency"):
pfilt.low_shelve_cascade(signal, 40e3, 'lower', 10, None, 2)
# UPPER characteristic frequency exceeds Nyquist
with pytest.raises(ValueError, match="The upper characteristic frequency"):
pfilt.low_shelve_cascade(signal, 40e3, 'upper', 10, None, 2)<|fim_middle|>test_shelve_cascade_errors<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigServerResult:
"""
Get the config server and its properties.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230501preview:getConfigServer', __args__, opts=opts, typ=GetConfigServerResult).value
return AwaitableGetConfigServerResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))<|fim_middle|>get_config_server<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(root: str, split: Union[Tuple[str], str], language_pair: Tuple[str] = ("de", "en")):
"""Multi30k dataset
.. warning::
using datapipes is still currently subject to a few caveats. if you wish
to use this dataset with shuffling, multi-processing, or distributed
learning, please see :ref:`this note <datapipes_warnings>` for further
instructions.
For additional details refer to https://www.statmt.org/wmt16/multimodal-task.html#task1
Number of lines per split:
- train: 29000
- valid: 1014
- test: 1000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: ('train', 'valid', 'test')
language_pair: tuple or list containing src and tgt language. Available options are ('de','en') and ('en', 'de')
:return: DataPipe that yields tuple of source and target sentences
:rtype: (str, str)
"""
assert len(language_pair) == 2, "language_pair must contain only 2 elements: src and tgt language respectively"
assert tuple(sorted(language_pair)) == (
"de",
"en",
), "language_pair must be either ('de','en') or ('en', 'de')"
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at https://github.com/pytorch/data"
)
url_dp = IterableWrapper([URL[split]])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root, split),
hash_dict={_filepath_fn(root, split): MD5[split]},
hash_type="sha256",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_compressed_dp_1, cache_compressed_dp_2 = cache_compressed_dp.fork(num_instances=2)
src_cache_decompressed_dp = cache_compressed_dp_1.on_disk_cache(
filepath_fn=partial(_decompressed_filepath_fn, root, split, language_pair, 0)
)
src_cache_decompressed_dp = (
FileOpener(src_cache_decompressed_dp, mode="b")
.load_from_tar()
.filter(partial(_filter_fn, split, language_pair, 0))
)
src_cache_decompressed_dp = src_cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
tgt_cache_decompressed_dp = cache_compressed_dp_2.on_disk_cache(
filepath_fn=partial(_decompressed_filepath_fn, root, split, language_pair, 1)
)
tgt_cache_decompressed_dp = (
FileOpener(tgt_cache_decompressed_dp, mode="b")
.load_from_tar()
.filter(partial(_filter_fn, split, language_pair, 1))
)
tgt_cache_decompressed_dp = tgt_cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
src_data_dp = FileOpener(src_cache_decompressed_dp, encoding="utf-8").readlines(
return_path=False, strip_newline=True
)
tgt_data_dp = FileOpener(tgt_cache_decompressed_dp, encoding="utf-8").readlines(
return_path=False, strip_newline=True
)
return src_data_dp.zip(tgt_data_dp).shuffle().set_shuffle(False).sharding_filter()<|fim_middle|>multi30k<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, labeled_tensor):<|fim_middle|>decode<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.__dict__.METHOD_NAME()<|fim_middle|>items<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> typing.List[str]:
"""
return list of templates names as string like "text.odt"
"""
try:
is_dir_exist(self._config.COLLABORATIVE_DOCUMENT_EDITION__FILE_TEMPLATE_DIR)
is_dir_readable(self._config.COLLABORATIVE_DOCUMENT_EDITION__FILE_TEMPLATE_DIR)
except NotADirectoryError as exc:
raise FileTemplateNotAvailable from exc
template_filenames = [
entry
for entry in os.listdir(self._config.COLLABORATIVE_DOCUMENT_EDITION__FILE_TEMPLATE_DIR)
if isfile(
join(
self._config.COLLABORATIVE_DOCUMENT_EDITION__FILE_TEMPLATE_DIR,
entry,
)
)
]
if not self._config.COLLABORATIVE_DOCUMENT_EDITION__ENABLED_EXTENSIONS:
return template_filenames
return [
filename
for filename in template_filenames
if os.path.splitext(filename)[1][1:]
in self._config.COLLABORATIVE_DOCUMENT_EDITION__ENABLED_EXTENSIONS
]<|fim_middle|>get_template_list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
mock_subprocess,
mock_sh_zip,
mock_sh_cp,
mock_sh_find,
mock_walk,
mock_makedirs,
mock_chdir,
mock_copyfile,
mock_copystat,
):
fake_compile_dir = '/fake/compile/dir'
simulated_walk_result = [
["/fake_dir", ["__pycache__", "Lib"], ["README", "setup.py"]],
["/fake_dir/Lib", ["ctypes"], ["abc.pyc", "abc.py"]],
["/fake_dir/Lib/ctypes", [], ["util.pyc", "util.py"]],
]
mock_walk.return_value = simulated_walk_result
self.recipe.create_python_bundle(fake_compile_dir, self.arch)
recipe_build_dir = self.recipe.get_build_dir(self.arch.arch)
modules_build_dir = join(
recipe_build_dir,
'android-build',
'build',
'lib.linux{}-{}-{}'.format(
'2' if self.recipe.version[0] == '2' else '',
self.arch.command_prefix.split('-')[0],
self.recipe.major_minor_version_string
))
expected_sp_paths = [
modules_build_dir,
join(recipe_build_dir, 'Lib'),
self.ctx.get_python_install_dir(self.arch.arch),
]
for n, (sp_call, kw) in enumerate(mock_subprocess.call_args_list):
self.assertEqual(sp_call[0][-1], expected_sp_paths[n])
# we expect two calls to `walk_valid_filens`
self.assertEqual(len(mock_walk.call_args_list), 2)
mock_sh_zip.assert_called()
mock_sh_cp.assert_called()
mock_sh_find.assert_called()
mock_makedirs.assert_called()
mock_chdir.assert_called()
mock_copyfile.assert_called()
mock_copystat.assert_called()<|fim_middle|>test_create_python_bundle<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(value: object) -> str:
"""Convert *value* into a string; join its elements if it's an iterable."""
if isinstance(value, str) or not isinstance(value, Iterable):
return repr(value)
return ' '.join(repr(i) for i in value)<|fim_middle|>sanitize_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# Test maintaining downstream notifier
class ChildObserver(DummyObserver):
def iter_observables(self, object):
yield object
instance = ClassWithSet()
instance.values = set()
notifier = DummyNotifier()
child_observer = ChildObserver(notifier=notifier)
graph = create_graph(
create_observer(notify=False, optional=False),
child_observer,
)
handler = mock.Mock()
call_add_or_remove_notifiers(
object=instance.values,
graph=graph,
handler=handler,
)
# when
observable = DummyObservable()
instance.values.add(observable)
# then
self.assertEqual(observable.notifiers, [notifier])
# when
instance.values.remove(observable)
# then
self.assertEqual(observable.notifiers, [])<|fim_middle|>test_maintain_notifier<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(calendar_public_id):
"""Handle events update for given calendar."""
with global_session_scope() as db_session:
try:
calendar = (
db_session.query(Calendar)
.filter(Calendar.public_id == calendar_public_id)
.with_for_update()
.one()
)
except NoResultFound:
return f"Couldn't find calendar '{calendar_public_id}'", 404
change_notifications: List[MsGraphChangeNotification] = cast(
MsGraphChangeNotificationCollection, request.json
)["value"]
handle_event_deletions(calendar, change_notifications)
calendar.handle_webhook_notification()
db_session.commit()
return "", 200<|fim_middle|>event_update<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_double_dash1<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
interactive_loader = InteractivesLoader(
base_path=self.BASE_PATH,
content_path="",
structure_filename="is-interactive-false.yaml"
)
interactive_loader.load()
interactive = Interactive.objects.get(slug="is-interactive-false")
self.assertEqual(
False,
interactive.is_interactive
)<|fim_middle|>test_interactives_is_interactive_false<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, port_num):
# Check for invalid port_num
if port_num < self._port_start or port_num > self._port_end:
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_is_present"
port_ps = path.format(self._port_to_i2c_mapping[port_num])
reg_value = '0'
try:
reg_file = open(port_ps)
reg_value = reg_file.readline().rstrip()
reg_file.close()
except IOError as e:
print("Error: unable to access file: %s" % str(e))
return False
if reg_value == '1':
return True
return False<|fim_middle|>get_presence<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.run(sql10)
self.run(sql11)
self.run(sql12)
self.run(sql13)
self.run(sql14, lambda res: {"class":1, "data":[self.node, self.positionAsText],
"text": T_("lanes in {0}(-{1}+{2}), lanes out {3}(-{4}+{5})", res[2], res[3] or 0, res[4] or 0, res[5], res[6] or 0, res[7] or 0) })<|fim_middle|>analyser_osmosis_common<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
src: Tuple[str, ...], exclude: Optional[Pattern], extend_exclude: Optional[Pattern], skip_gitignore: bool
):
root = find_project_root(src)
if skip_gitignore:
gitignore = None
else:
gitignore = get_gitignore(root)
sources = set()
for s in src:
if s == "-":
sources.add("-")
continue
path = Path(s).resolve()
if not should_parse_path(path, exclude, extend_exclude, gitignore):
continue
if path.is_file():
sources.add(path)
elif path.is_dir():
sources.update(iterate_dir((path,), exclude, extend_exclude, gitignore))
elif s == "-":
sources.add(path)
return sources<|fim_middle|>get_paths<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
indicators_or_incidents: Iterator[Union[IncidentType, IndicatorType]]
) -> Dict[str, List[str]]:
"""
Iterates through incident/indicator types in the pack, builds a
dict of {layoutID: [List of incident/indicator type IDs]}.
where the list of the incident/indicator type IDs are the list of all the incident/indicator
types whom layout field has corresponding ID to the layoutID field.
Returns:
(Dict[str, List[str]): Dict of {layoutID: [List of incident/indicator type IDs]}.
"""
result: Dict[str, List[str]] = dict()
for incident_or_indicator in indicators_or_incidents:
layout_id = incident_or_indicator.get("layout")
id_ = incident_or_indicator.get("id")
if not layout_id or not id_:
continue
result[layout_id] = result.get(layout_id, []) + [id_]
return result<|fim_middle|>layout_to_indicators_or_incidents_dict<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Allows to connect mesh input to the operator.
Skin mesh region expected
Parameters
----------
my_mesh : MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mapping.solid_to_skin()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh<|fim_middle|>mesh<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
_tearDown_
Tear things down and go home
"""
self.testInit.clearDatabase()
self.testInit.tearDownCouch()
return<|fim_middle|>tear_down<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(state: Any, path: str, **kwargs):
"""Like torch.save but can save to other locations (e.g. s3:// , gs://).
Args:
state: State object to save
path: Any path or url supported by fsspec.
**kwargs: Keyword arguments forwarded to torch.save.
"""
with fsspec.open(path, "wb") as f:
torch.save(state, f, **kwargs)<|fim_middle|>save_fsspec<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls):
cls.test_db_name = f"{POSTGRES_DB}_testdb".lower()
# Connect with database
# NOTE: Not using context manager because it will start a transaction.
# https://stackoverflow.com/a/68112827/3436502
con = psycopg2.connect(
database=POSTGRES_DB,
host=POSTGRES_HOST,
password=POSTGRES_PASSWORD,
port=POSTGRES_PORT,
user=POSTGRES_USER,
)
# Create a new db. Drop if already exists.
# TODO: Don't drop here, just clear the data.
# and DROP after all TestClass are done.
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with con.cursor() as cursor:
cursor.execute(f"DROP DATABASE IF EXISTS {cls.test_db_name};")
cursor.execute(f"CREATE DATABASE {cls.test_db_name};")
con.commit()
con.close()
# Connect to newly created DB
cls.db_con = psycopg2.connect(
database=cls.test_db_name,
host=POSTGRES_HOST,
password=POSTGRES_PASSWORD,
port=POSTGRES_PORT,
user=POSTGRES_USER,
)
# Setup newly created DB
with open(os.path.join(BASE_DIR, "set_up_db.sql")) as fp:
cls.__db_query(fp.read())
# XXX: Force overwrite main postgres DB helper methods.
# (To use newly creatd test db)
cls.original_auth_db_connection = auth.postgresDB._db_connection
cls.original_auth__del__ = auth.postgresDB.__del__
auth.postgresDB._db_connection = cls.db_con
auth.postgresDB.__del__ = cls.no_op
cls.db = auth.postgresDB()<|fim_middle|>create_new_test_db<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(report_type, report_slug, user_id, domain, request_data):
config = ReportConfig()
# see ReportConfig.query_string()
object.__setattr__(config, '_id', 'dummy')
config.name = _("Emailed report")
config.report_type = report_type
config.report_slug = report_slug
config.owner_id = user_id
config.domain = domain
GET = dict(six.iterlists(request_data['GET']))
exclude = ['startdate', 'enddate', 'subject', 'send_to_owner', 'notes', 'recipient_emails']
filters = {}
for field in GET:
if field == 'params':
params = unquote(GET.get(field)[0])
params = params.split('&')
for param in params:
key, value = tuple(param.split('=', 1))
if key in filters:
filters[key] = filters[key] + [value] if isinstance(filters[key], list) \
else [filters[key]] + [value]
else:
filters[key] = value
if field not in exclude:
filters[field] = GET.get(field) or filters[field]
config.filters = filters
if 'startdate' in config.filters and report_slug != 'project_health':
config.start_date = datetime.strptime(config.filters['startdate'], '%Y-%m-%d').date()
if 'enddate' in config.filters:
config.date_range = 'range'
config.end_date = datetime.strptime(config.filters['enddate'], '%Y-%m-%d').date()
else:
config.date_range = 'since'
return config<|fim_middle|>create_config_for_email<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)<|fim_middle|>source<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
"""
The value specified as the prerelease is used in version comparisons.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0, prerelease=2)
self.assertTrue(va < vb)
self.assertTrue(vb > va)
self.assertTrue(va <= vb)
self.assertTrue(vb >= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("whatever", 1, 0, 0, prerelease=2))
self.assertTrue(va == va)<|fim_middle|>test_comparing_prereleases<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(parameters):
"""
Creates a Resilient Backpropagation optimizer from the PyTorch `torch.optim` module using the input parameters.
Args:
parameters (dict): A dictionary containing the input parameters for the optimizer.
Returns:
optimizer (torch.optim.Rprop): A Resilient Backpropagation optimizer.
"""
# Create the optimizer using the input parameters
return Rprop(
parameters["model_parameters"],
lr=parameters.get("learning_rate"),
etas=parameters["optimizer"].get("etas", (0.5, 1.2)),
step_sizes=parameters["optimizer"].get("step_sizes", (1e-7, 50)),
)<|fim_middle|>rprop<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(conn: SAConnection, file_id: SimcoreS3FileID) -> bool:
return bool(
await conn.scalar(
sa.select(sa.func.count())
.select_from(file_meta_data)
.where(file_meta_data.c.file_id == file_id)
)
== 1
)<|fim_middle|>exists<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Accessor for thread_id.
Returns:
The thread_id
"""
return self._thread_id<|fim_middle|>thread_id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
if six.PY3:
sys.stdout.buffer.write(self._pkcs12(self.options.password)) # pylint: disable=no-member
else:
print(self._pkcs12(self.options.password))<|fim_middle|>pkcs12<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, node: ast.Subscript) -> str:
def is_simple_tuple(value: ast.expr) -> bool:
return (
isinstance(value, ast.Tuple)
and bool(value.elts)
and not any(isinstance(elt, ast.Starred) for elt in value.elts)
)
if is_simple_tuple(node.slice):
elts = ", ".join(self.visit(e)
for e in node.slice.elts) # type: ignore[attr-defined]
return f"{self.visit(node.value)}[{elts}]"
return f"{self.visit(node.value)}[{self.visit(node.slice)}]"<|fim_middle|>visit_subscript<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
ctx,
vstudio_root=None,
omnibus_base_dir="c:\\omnibus-ruby",
arch="x64",
major_version='7',
debug=False,
rebuild=False,
):
if os.getenv("OMNIBUS_BASE_DIR"):
omnibus_base_dir = os.getenv("OMNIBUS_BASE_DIR")
if rebuild:
clean(ctx, arch, debug)
build(ctx, vstudio_root, arch, major_version, debug)
for file in glob.glob(BIN_PATH + "\\customaction*"):
shutil.copy2(
file,
f"{omnibus_base_dir}\\src\\datadog-agent\\src\\github.com\\DataDog\\datadog-agent\\bin\\agent\\{os.path.basename(file)}",
)
cmd = "omnibus\\resources\\agent\\msi\\localbuild\\rebuild.bat"
res = ctx.run(cmd, warn=True)
if res.exited is None or res.exited > 0:
print(
color_message(
f"Failed to run \"{cmd}\"",
"orange",
)
)<|fim_middle|>package<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
data = _form_data()
form = SetUpDemoOrgDetails(data=data)
previous_step = "identity"
current_step = "demo.org_details"
next_step = "demo.primary_purpose"
if not _validate_fields_present(current_step, data):
return redirect(url_for(".contact"))
if request.method == "POST" and form.validate_on_submit():
session[SESSION_FORM_KEY] = form.data
return redirect(url_for(".demo_primary_purpose"))
return render_template(
"views/contact/demo-org-details.html",
form=form,
url=url_for(".demo_organization_details"),
current_step=current_step,
next_step=next_step,
previous_step=previous_step,
step_hint=1,
total_steps_hint=2,
**_labels(previous_step, current_step, form.support_type.data),
)<|fim_middle|>demo_organization_details<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
args = build_argparser().parse_args()
core = Core()
model = core.read_model(args.model)
input_tensor_name = 'input_1:0'
output_candidates = [node.get_any_name() for node in model.outputs if node.shape[3] == 1]
if len(output_candidates) != 1:
raise RuntimeError("The model expects output tensor with 1 channel")
output_tensor_name = output_candidates[0]
compiled_model = core.compile_model(model, args.device)
infer_request = compiled_model.create_infer_request()
# Hybrid-CS-Model-MRI/Data/stats_fs_unet_norm_20.npy
stats = np.array([2.20295299e-01, 1.11048916e+03], dtype=np.float32)
# Hybrid-CS-Model-MRI/Data/sampling_mask_20perc.npy
var_sampling_mask = np.load(args.pattern)
logger.info(f'Sampling ratio: {1.0 - var_sampling_mask.sum() / var_sampling_mask.size}')
data = np.load(args.input)
num_slices, height, width = data.shape[0], data.shape[1], data.shape[2]
pred = np.zeros((num_slices, height, width), dtype=np.uint8)
data /= np.sqrt(height * width)
logger.info('Compute...')
start = time.time()
for slice_id, kspace in enumerate(data):
kspace = kspace.copy()
# Apply sampling
kspace[var_sampling_mask] = 0
kspace = (kspace - stats[0]) / stats[1]
# Forward through network
input = np.expand_dims(kspace.transpose(2, 0, 1), axis=0)
infer_request.infer(inputs={input_tensor_name: input})
output = infer_request.get_tensor(output_tensor_name).data[:]
output = output.reshape(height, width)
# Save predictions
pred[slice_id] = cv.normalize(output, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
logger.info('Elapsed time: %.1f seconds' % (time.time() - start))
WIN_NAME = 'MRI reconstruction with OpenVINO'
def callback(slice_id):
kspace = data[slice_id]
img = kspace_to_image(kspace)
kspace[var_sampling_mask] = 0
masked = kspace_to_image(kspace)
rec = pred[slice_id]
# Add a header
border_size = 20
render = cv.hconcat((img, masked, rec))
render = cv.copyMakeBorder(render, border_size, 0, 0, 0, cv.BORDER_CONSTANT, value=255)
cv.putText(render, 'Original', (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
cv.putText(render, 'Sampled (PSNR %.1f)' % cv.PSNR(img, masked), (width, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
cv.putText(render, 'Reconstructed (PSNR %.1f)' % cv.PSNR(img, rec), (width*2, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
cv.imshow(WIN_NAME, render)
cv.waitKey(1)
if not args.no_show:
cv.namedWindow(WIN_NAME, cv.WINDOW_AUTOSIZE)
cv.createTrackbar('Slice', WIN_NAME, num_slices // 2, num_slices - 1, callback)
callback(num_slices // 2) # Trigger initial visualization
cv.waitKey()<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(argv):
output = []
for part in argv:
if len(part) > 1 and part[0] == '-' and part[1] != '-':
for c in part[1:]:
output.append('-' + c)
else:
output.append(part)
return output<|fim_middle|>split_joined_options<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(expr_text, valid_variables):
"""parses the simple criteria expression syntax used in
dependency specifications.
Returns an ExprNode instance that can be evaluated like this:
```
expr = parse_expr("os=windows")
ok = expr.eval({
"os": "windows"
})
```
Whitespace is allowed between tokens. The following terms
are recognized:
KEY = VALUE # Evaluates to True if ctx[KEY] == VALUE
not(EXPR) # Evaluates to True if EXPR evaluates to False
# and vice versa
all(EXPR1, EXPR2, ...) # Evaluates True if all of the supplied
# EXPR's also evaluate True
any(EXPR1, EXPR2, ...) # Evaluates True if any of the supplied
# EXPR's also evaluate True, False if
# none of them evaluated true.
"""
p = Parser(expr_text, valid_variables)
return p.parse()<|fim_middle|>parse_expr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
return pulumi.get(self, "api_key_id")<|fim_middle|>api_key_id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(value):
"""
Convert a module name or string with underscores and periods to camel case.
:param value: the string to convert
:type value: str
:returns: the value converted to camel case.
"""
return ''.join(x.capitalize() if x else '_' for x in
re.split('[._]+', value))<|fim_middle|>camelcase<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, data):
"""Remove a byte array from the set."""
data_hash = hashlib.sha256(data).digest()
self.denominator = (self.denominator * data_to_num3072(data_hash)) % self.MODULUS<|fim_middle|>remove<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(monkeypatch):
"""Mock the RecoveryDialog in the editor plugin."""
mock = MagicMock()
monkeypatch.setattr('spyder.plugins.editor.utils.autosave.RecoveryDialog',
mock)
return mock<|fim_middle|>mock_recovery_dialog<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
best = get_best_language("fr-FR, es;q=0.8")
self.assertEqual("fr", best)<|fim_middle|>test_fr_fr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The Elastic deployment status.
"""
return pulumi.get(self, "status")<|fim_middle|>status<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return getattr(self.dataset, "supports_prefetch", False)<|fim_middle|>supports_prefetch<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(spark_session, tmp_path):
df = pd.DataFrame([[1, 2, 3], [1, 2, 3]], columns=["a", "b", "c"])
df_spark = spark_session.createDataFrame(df)
df_spark.write.format("delta").mode("overwrite").saveAsTable(
"default.temp_delta_too_many_inputs", path=tmp_path
)
with pytest.raises(MlflowException, match='Must specify exactly one of "path" or "table_name"'):
DeltaDatasetSource(path=tmp_path, delta_table_name="temp_delta_too_many_inputs")<|fim_middle|>test_delta_dataset_source_too_many_inputs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request):
"""
Unset read flag for a list of feedback items
:param request: Object representing the user call
:type request: ~django.http.HttpRequest
:return: A redirection to the admin feedback list
:rtype: ~django.http.HttpResponseRedirect
"""
selected_ids = request.POST.getlist("selected_ids[]")
selected_feedback = Feedback.objects.filter(id__in=selected_ids, is_technical=True)
for feedback in selected_feedback:
invalidate_obj(feedback)
if hasattr(feedback, "feedback_ptr"):
invalidate_obj(feedback.feedback_ptr)
selected_feedback.update(read_by=None)
logger.debug(
"Feedback objects %r marked as unread by %r", selected_ids, request.user
)
messages.success(request, _("Feedback was successfully marked as unread"))
return redirect("admin_feedback")<|fim_middle|>mark_admin_feedback_as_unread<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls):
# TODO: Use TestCase.setUpTestData() instead in Django 1.8+.
super().METHOD_NAME()
cls.calendar = Calendar.objects.create(url=EVENTS_CALENDAR_URL, slug='python-events')<|fim_middle|>set_up_class<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(view_id, name, args): ...<|fim_middle|>on_text_command<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, data):
image = data.get("image")
media_url = data.get("media_url")
if not image and not media_url:
raise ValidationError(
{
"input": ValidationError(
"Image or external URL is required.",
code=ProductErrorCode.REQUIRED.value,
)
}
)
if image and media_url:
raise ValidationError(
{
"input": ValidationError(
"Either image or external URL is required.",
code=ProductErrorCode.DUPLICATED_INPUT_ITEM.value,
)
}
)<|fim_middle|>validate_input<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, requestParameters):
return None<|fim_middle|>get_authentication_method_claims<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(**options):
with fs.cd(options["project_dir"]):
platform = None
project_options = {}
try:
project_options = get_project_options(options["environment"])
if "platform" in project_options:
platform = PlatformFactory.new(project_options["platform"])
except NotPlatformIOProjectError:
pass
options = apply_project_monitor_options(options, project_options)
register_filters(platform=platform, options=options)
options["port"] = SerialPortFinder(
board_config=platform.board_config(project_options.get("board"))
if platform and project_options.get("board")
else None,
upload_protocol=project_options.get("upload_protocol"),
ensure_ready=True,
).find(initial_port=options["port"])
if options["menu_char"] == options["exit_char"]:
raise exception.UserSideException(
"--exit-char can not be the same as --menu-char"
)
# check for unknown filters
if options["filters"]:
known_filters = set(get_available_filters())
unknown_filters = set(options["filters"]) - known_filters
if unknown_filters:
options["filters"] = list(known_filters & set(options["filters"]))
click.secho(
("Warning! Skipping unknown filters `%s`. Known filters are `%s`")
% (", ".join(unknown_filters), ", ".join(sorted(known_filters))),
fg="yellow",
)
start_terminal(options)<|fim_middle|>device_monitor_cmd<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(logits):
"""Performs the sparsemax operation when axis=-1."""
shape_op = tf.shape(logits)
obs = tf.math.reduce_prod(shape_op[:-1])
dims = shape_op[-1]
# In the paper, they call the logits z.
# The mean(logits) can be substracted from logits to make the algorithm
# more numerically stable. the instability in this algorithm comes mostly
# from the z_cumsum. Substacting the mean will cause z_cumsum to be close
# to zero. However, in practise the numerical instability issues are very
# minor and substacting the mean causes extra issues with inf and nan
# input.
# Reshape to [obs, dims] as it is almost free and means the remanining
# code doesn't need to worry about the rank.
z = tf.reshape(logits, [obs, dims])
# sort z
z_sorted, _ = tf.nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = tf.math.cumsum(z_sorted, axis=-1)
k = tf.range(1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = tf.math.reduce_sum(tf.cast(z_check, tf.int32), axis=-1)
# calculate tau(z)
# If there are inf values or all values are -inf, the k_z will be zero,
# this is mathematically invalid and will also cause the gather_nd to fail.
# Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then
# fixed later (see p_safe) by returning p = nan. This results in the same
# behavior as softmax.
k_z_safe = tf.math.maximum(k_z, 1)
indices = tf.stack([tf.range(0, obs), tf.reshape(k_z_safe, [-1]) - 1], axis=1)
tau_sum = tf.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)
# calculate p
p = tf.math.maximum(tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))
# If k_z = 0 or if z = nan, then the input is invalid
p_safe = tf.where(
tf.expand_dims(
tf.math.logical_or(tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),
axis=-1,
),
tf.fill([obs, dims], tf.cast(float("nan"), logits.dtype)),
p,
)
# Reshape back to original size
p_safe = tf.reshape(p_safe, shape_op)
return p_safe<|fim_middle|>compute_2d_sparsemax<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
env = db.DBEnv()
# check for crash fixed when db_home is used before open()
self.assertTrue(env.db_home is None)
env.open(self.homeDir, db.DB_CREATE)
if sys.version_info[0] < 3 :
self.assertEqual(self.homeDir, env.db_home)
else :
self.assertEqual(bytes(self.homeDir, "ascii"), env.db_home)<|fim_middle|>test02_db_home<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
chassis_id=None,
community=None,
contact=None,
location=None,
test=False,
commit=True,
**kwargs
): # pylint: disable=unused-argument
"""
Updates the SNMP configuration.
:param chassis_id: (optional) Chassis ID
:param community: (optional) A dictionary having the following optional keys:
- acl (if any policy / ACL need to be set)
- mode: rw or ro. Default: ro
:param contact: Contact details
:param location: Location
:param test: Dry run? If set as True, will apply the config, discard and return the changes. Default: False
:param commit: Commit? (default: True) Sometimes it is not needed to commit the config immediately
after loading the changes. E.g.: a state loads a couple of parts (add / remove / update)
and would not be optimal to commit after each operation.
Also, from the CLI when the user needs to apply the similar changes before committing,
can specify commit=False and will not discard the config.
:raise MergeConfigException: If there is an error on the configuration sent.
:return a dictionary having the following keys:
- result (bool): if the config was applied successfully. It is `False` only
in case of failure. In case there are no changes to be applied and
successfully performs all operations it is still `True` and so will be
the `already_configured` flag (example below)
- comment (str): a message for the user
- already_configured (bool): flag to check if there were no changes applied
- diff (str): returns the config changes applied
CLI Example:
.. code-block:: bash
salt 'edge01.lon01' snmp.update_config location="Greenwich, UK" test=True
Output example (for the CLI example above):
.. code-block:: yaml
edge01.lon01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit snmp]
- location "London, UK";
+ location "Greenwich, UK";
result:
True
"""
dic = {"template_name": "snmp_config", "test": test, "commit": commit}
if chassis_id:
dic["chassis_id"] = chassis_id
if community:
dic["community"] = community
if contact:
dic["contact"] = contact
if location:
dic["location"] = location
dic["inherit_napalm_device"] = napalm_device # pylint: disable=undefined-variable
return __salt__["net.load_template"](**dic)<|fim_middle|>update_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, path):
pass<|fim_middle|>load_checkpoint<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# SAME TEST AS OTHER SPLITTERS
X = pd.DataFrame(np.random.uniform(low=0.0, high=100, size=(25, 10)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(25,)))
splitter = LeaveOutTwinCV(threshold=0, auto_threshold=True, parallel_run=parallel_run)
model = SklearnModel(model='LinearRegression')
splitter.evaluate(X=X, y=y, models=[model], groups=None, savepath=os.getcwd(), plots=list())
for d in splitter.splitdirs:
self.assertTrue(os.path.exists(d))
shutil.rmtree(d)
# Setup to check exact twins
splitter = LeaveOutTwinCV(threshold=0, auto_threshold=True)
model = SklearnModel(model='LinearRegression')
n_datapoints = 25
n_features = 5
# CASE 1: every datapoint is an exact twin, twins in both X and y
X = pd.DataFrame(np.random.choice(range(-n_features*n_datapoints, n_features*n_datapoints), size=(n_datapoints, n_features), replace=False)) # This generates random numbers without repetitions
# Pull out last row to be y
y = X[n_features-1]
X.drop(columns=n_features-1, inplace=True)
# Create duplicates
X = X.append(X.copy(), ignore_index=True)
y = y.append(y.copy(), ignore_index=True)
ret = splitter.split(X, y)
numTwins = n_datapoints * 2
for r in ret:
self.assertTrue(len(r[0]) == numTwins or len(r[1]) == numTwins) # check that everything was counted as a twin in each split
# CASE 2: half datapoint is an exact twin, twins in both X and y
X = pd.DataFrame(np.random.choice(range(-n_features*n_datapoints, n_features*n_datapoints), size=(n_datapoints, n_features), replace=False))
y = X[n_features-1]
X.drop(columns=n_features-1, inplace=True)
X = X.append(X[0:int(n_datapoints/2)].copy(), ignore_index=True)
y = y.append(y[0:int(n_datapoints/2)].copy(), ignore_index=True)
ret = splitter.split(X, y)
numTwins = int(n_datapoints/2)*2
for r in ret:
self.assertTrue(len(r[0]) == numTwins or len(r[1]) == numTwins) # check correct number of twins
# Setup to check twins generated by sci kit learn blobs
splitter = LeaveOutTwinCV(threshold=2, debug=False, ceiling=1, auto_threshold=True)
model = SklearnModel(model='LinearRegression')
n_datapoints = 25
n_features = 5
cen = []
for i in range(3):
for j in range(3):
cen.append((i*10, j*10))
v = sk.make_blobs(n_samples=100, n_features=2, centers=cen, cluster_std=.1, shuffle=True, random_state=None)
X = pd.DataFrame(v[0])
y = pd.DataFrame(np.random.choice(range(-500, 500), size=(100, 1), replace=False))
ret = splitter.split(X, y)
numTwins = 100
for r in ret:
self.assertTrue(len(r[0]) == numTwins or len(r[1]) == numTwins)
# plt.scatter(v[0][:, 0], v[0][:, 1], c=v[1])
# plt.savefig("./test.png")
# self.assertTrue(False, msg="In Progress")
return<|fim_middle|>test_leaveouttwins<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.spec = {"modules": {"PlanetPhysicalModel": "PlanetPhysicalModel"}}
self.kscrit = 0.01
self.nsamp = 10000<|fim_middle|>set_up<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Returns text detected in the photo (macOS 13+ / Photos 8+ only)"""
if self._photo._db._photos_ver < 8:
return []
return self._get_text_for_category(self._categories.DETECTED_TEXT)<|fim_middle|>detected_text<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request<|fim_middle|>prepare_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, images):
out_cls_shape = (images.shape[0], self.num_classes * self.num_anchors) + images.shape[-self.spatial_dims :]
out_box_reg_shape = (images.shape[0], 2 * self.spatial_dims * self.num_anchors) + images.shape[
-self.spatial_dims :
]
return {self.cls_key: [torch.randn(out_cls_shape)], self.box_reg_key: [torch.randn(out_box_reg_shape)]}<|fim_middle|>forward<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(salt_ssh_cli, grains_filter_by_default):
"""
test grains.filter_by during template rendering with salt-ssh and default parameter
"""
ret = salt_ssh_cli.run("state.show_sls", "grains_filter_by_default")
assert ret.returncode == 0
assert ret.data
rendered = ret.data["grains-filter-by"]["file"][1]["context"]
assert "has_common" in rendered
assert "merged" in rendered
assert "defaulted" in rendered<|fim_middle|>test_grains_filter_by_default_jinja<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(node: Node, params: QueryStateDict, result: Result) -> None:
result.second_num = int(parse_num(node, result._canonical))<|fim_middle|>q_counting_second_number<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
opts = self.parser.parse_main_args(['-h'])
self.assertTrue(opts.help)<|fim_middle|>test_help_option_set<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(root: Path, file_list: Iterable[str]) -> List[ElfFile]:
"""Return a list of ELF files from file_list prepended with root.
:param str root: the root directory from where the file_list is generated.
:param file_list: a list of file in root.
:returns: a list of ElfFile objects.
"""
elf_files: Set[ElfFile] = set()
for part_file in file_list:
# Filter out object (*.o) files-- we only care about binaries.
if part_file.endswith(".o"):
continue
# No need to crawl links-- the original should be here, too.
path = Path(root, part_file)
if os.path.islink(path):
emit.debug(f"Skipped link {path!r} while finding dependencies")
continue
# Ignore if file does not have ELF header.
if not ElfFile.is_elf(path):
continue
try:
elf_file = ElfFile(path=path)
except ELFError:
# Ignore invalid ELF files.
continue
except errors.CorruptedElfFile as exception:
# Log if the ELF file seems corrupted
emit.message(str(exception))
continue
# If ELF has dynamic symbols, add it.
if elf_file.needed:
elf_files.add(elf_file)
return sorted(elf_files, key=lambda x: x.path)<|fim_middle|>get_elf_files_from_list<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_sdf_with_sdf_initiated_checkpointing<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
sess = fixture_session()
pa = with_polymorphic(Person, [Engineer])
pa_alias = with_polymorphic(Person, [Engineer], aliased=True)
eq_(
[
row
for row in sess.query(
pa.name,
pa.Engineer.primary_language,
pa_alias.name,
pa_alias.Engineer.primary_language,
)
.join(
pa_alias,
or_(
pa.Engineer.primary_language
== pa_alias.Engineer.primary_language,
and_(
pa.Engineer.primary_language == None, # noqa
pa_alias.Engineer.primary_language == None,
pa.person_id > pa_alias.person_id,
),
),
)
.order_by(pa.name, pa_alias.name)
],
[
("dilbert", "java", "dilbert", "java"),
("dogbert", None, "pointy haired boss", None),
("vlad", "cobol", "vlad", "cobol"),
("wally", "c++", "wally", "c++"),
],
)<|fim_middle|>test_join_to_join_columns<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Serialize to a JSON string
"""
return json.dumps(self._values, cls=_MetricsJSONEncoder)<|fim_middle|>to_json<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, ip, port, timeout=0.001):
"""
Sets up the send socket for the actor.
"""
self.send_socket = self.context.socket(PUB)
# bind to the socket according to the ip and port
self.address = "tcp://{}:{}".format(ip, port)
self.send_socket.bind(self.address)
time.sleep(timeout)<|fim_middle|>set_send_socket<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, text_name, data):
texts = bpy.data.texts.items()
exists = False
for t in texts:
if bpy.data.texts[t[0]].name == text_name:
exists = True
break
if not exists:
bpy.data.texts.new(text_name)
bpy.data.texts[text_name].clear()
bpy.data.texts[text_name].write(data)<|fim_middle|>write_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, translations, delays, scores):
if self.output_files["text"] is not None:
with open(self.output_files["text"], "w") as f:
for line in translations:
f.write(line + "\n")
if self.output_files["delay"] is not None:
with open(self.output_files["delay"], "w") as f:
for i, delay in enumerate(delays):
f.write(
json.dumps({"src_len": self.src_lengths()[i], "delays": delay})
+ "\n"
)
with open(self.output_files["scores"], "w") as f:
for key, value in scores.items():
f.write(f"{key}, {value}\n")<|fim_middle|>write_results_to_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Accessor for message type."""
return DIDCommPrefix.qualify_current(V20CredPreview.Meta.message_type)<|fim_middle|>type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>( module=None ):
comm = MPI.COMM_WORLD
fmod = epyccel( pmod, comm=comm )
if module:
module.comm = comm
module.fmod = fmod
else:
globals().update( locals() )<|fim_middle|>setup_module<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# collect commands output only if the openstack-heat-api service
# is running
in_container = self.container_exists('.*heat_api')
if self.is_service_running(self.service_name) or in_container:
heat_config = ""
# if containerized we need to pass the config to the cont.
if in_container:
heat_config = "--config-dir " + self.var_puppet_gen + \
"_api/etc/heat/"
self.add_cmd_output(
"heat-manage " + heat_config + " db_version",
suggest_filename="heat_db_version"
)
vars_all = [p in os.environ for p in [
'OS_USERNAME', 'OS_PASSWORD']]
vars_any = [p in os.environ for p in [
'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
if not (all(vars_all) and any(vars_any)):
self.soslog.warning("Not all environment variables set. "
"Source the environment file for the user "
"intended to connect to the OpenStack "
"environment.")
else:
self.add_cmd_output("openstack stack list")
if self.get_option("all_logs"):
self.add_copy_spec([
"/var/log/heat/",
])
else:
self.add_copy_spec([
"/var/log/heat/*.log",
])
self.add_copy_spec([
"/etc/heat/",
self.var_puppet_gen + "/etc/heat/",
self.var_puppet_gen + "/etc/my.cnf.d/tripleo.cnf",
self.var_puppet_gen + "_api/etc/heat/",
self.var_puppet_gen + "_api/etc/httpd/conf/",
self.var_puppet_gen + "_api/etc/httpd/conf.d/",
self.var_puppet_gen + "_api/etc/httpd/conf.modules.d/*.conf",
self.var_puppet_gen + "_api/var/spool/cron/heat",
self.var_puppet_gen + "_api_cfn/etc/heat/",
self.var_puppet_gen + "_api_cfn/etc/httpd/conf/",
self.var_puppet_gen + "_api_cfn/etc/httpd/conf.d/",
self.var_puppet_gen + "_api_cfn/etc/httpd/conf.modules.d/*.conf",
self.var_puppet_gen + "_api_cfn/var/spool/cron/heat",
])
self.add_file_tags({
"/var/log/heat/heat-engine.log": "heat_engine_log"
})<|fim_middle|>setup<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")<|fim_middle|>type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(text):
"""
>>> get_desc_from_manual_page_line("r.example - This is a description<br>")
'This is a description'
"""
# this matches the dash at the beginning
text = text.split(" - ", 1)[1]
# this matches a tag at the end
# (supposing no tags in the description and < represented as <
text = text.split("<", 1)[0]
return text<|fim_middle|>get_desc_from_manual_page_line<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, model_instances):
# Include records needed by the initial value of the field plus any added
# via the extra_prefetch property.
prefetch_set = set(model_instances) if model_instances else set()
prefetch_set = prefetch_set.union(set(self.extra_prefetch)) # eliminate duplicates
return sorted(
select2_id_name(list(prefetch_set)),
key=lambda item: unidecode.unidecode(item['text']),
)<|fim_middle|>make_select2_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, grant, oauth_request):
grant.validate_token_request(oauth_request)<|fim_middle|>test_does_not_raise_for_valid_input<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(location):
return (filetype.is_file(location)
and fileutils.file_name(location).lower() == '+compact_manifest')<|fim_middle|>is_freebsd_manifest<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(string_items):
env2 = Environment(autoescape=True, enable_async=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render(items=string_items) == "<foo><span>foo</span>"<|fim_middle|>test_join_string_list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
file = BytesIO()
image = Image.new('RGBA', size=(50, 50), color=(155, 0, 0))
image.save(file, 'png')
file.name = 'test.png'
file.seek(0)
return file<|fim_middle|>create_test_image<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(vector, p=2):
"""
Get p-norm of this vector
Parameters
----------
vector : numpy array, Input vector
p: int, p-norm
"""
if p < 1:
raise ValueError('p should larger or equal to 1 in p-norm')
if type(vector).__name__ != 'ndarray':
vector = np.array(vector)
return np.linalg.METHOD_NAME(vector, p)<|fim_middle|>norm<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self, message_type):<|fim_middle|>on_health_check<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self,) -> List[Dict]:
'''
Lists the available policies.
Returns:
List[Dict]:
List of policy objects.
Example:
>>> for policy in nessus.policies.list():
... print(policy)
'''
return self._get()['policies']<|fim_middle|>list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(what):
return "\n".join(sorted(what.split("\n")))<|fim_middle|>sort<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
rd = fixed.SchemaDecoder(['comment', 'start', 'length', 'column'])
(column, start, length) = rd(['This is a comment', '0', '1', 'column_name'])
self.assertEqual(False, rd.one_based)
self.assertEqual('column_name', column)
self.assertEqual(0, start)
self.assertEqual(1, length)
(column, start, length) = rd(['This is another comment', '1', '5', 'column_name2'])
self.assertEqual(False, rd.one_based)
self.assertEqual('column_name2', column)
self.assertEqual(1, start)
self.assertEqual(5, length)
(column, start, length) = rd(['yet another comment', '9', '14', 'column_name3'])
self.assertEqual(False, rd.one_based)
self.assertEqual('column_name3', column)
self.assertEqual(9, start)
self.assertEqual(14, length)<|fim_middle|>test_schema_decoder_in_action<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
return pulumi.get(self, "zone")<|fim_middle|>zone<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Clean up the test environment"""<|fim_middle|>tear_down<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(row, state, tap_stream_id, replication_key_name):
replication_key_value = row.get(replication_key_name)
if replication_key_value:
replication_key_type = replication_key_value.__class__.__name__
replication_key_value_bookmark = common.class_to_string(replication_key_value,
replication_key_type)
state = singer.write_bookmark(state,
tap_stream_id,
'replication_key_value',
replication_key_value_bookmark)
state = singer.write_bookmark(state,
tap_stream_id,
'replication_key_type',
replication_key_type)<|fim_middle|>update_bookmark<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls):
"""
Decorator for TestCase classes which copies data from Postgres into an
in-memory MatrixStore instance. This allows us to re-use database fixtures,
and the tests designed to work with those fixtures, to test
MatrixStore-powered code.
"""
# These methods have been decorated with `@classmethod` so we need to use
# `__func__` to get a reference to the original, undecorated method
decorated_setUpClass = cls.setUpClass.__func__
decorated_tearDownClass = cls.tearDownClass.__func__
def setUpClass(inner_cls):
decorated_setUpClass(inner_cls)
matrixstore = matrixstore_from_postgres()
stop_patching = patch_global_matrixstore(matrixstore)
# Have to wrap this in a staticmethod decorator otherwise Python thinks
# we're trying to create a new class method
inner_cls._stop_patching = staticmethod(stop_patching)
new_settings = override_settings(
CACHES={
"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}
}
)
new_settings.enable()
inner_cls._new_settings = new_settings
def tearDownClass(inner_cls):
inner_cls._stop_patching()
inner_cls._new_settings.disable()
decorated_tearDownClass(inner_cls)
cls.setUpClass = classmethod(setUpClass)
cls.tearDownClass = classmethod(tearDownClass)
return cls<|fim_middle|>copy_fixtures_to_matrixstore<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, key, person, advisor, created, delete_old_file=False, offset=0):
"""
get_or_create for this usage
"""
created = created + datetime.timedelta(minutes=offset)
# look for previously-imported version of this note, so we're roughly idempotent
oldnotes = AdvisorNote.objects.filter(student=person, advisor=advisor, created_at=created, unit=self.unit)
oldnotes = [n for n in oldnotes if 'import_key' in n.config and n.config['import_key'] == key]
if oldnotes:
note = oldnotes[0]
if delete_old_file and note.file_attachment and os.path.isfile(note.file_attachment.path):
# let file be recreated below
os.remove(note.file_attachment.path)
note.file_attachment = None
note.file_mediatype = None
else:
note = AdvisorNote(student=person, advisor=advisor, created_at=created, unit=self.unit)
note.config['import_key'] = key
note.config['src'] = 'crim_import'
return note, bool(oldnotes)<|fim_middle|>get_advisornote<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [10, 9, 8, 7, 6]:
scheduler.enter(x, 1, fun, (x,))
scheduler.run(blocking=False)
self.assertEqual(l, [])<|fim_middle|>test_run_non_blocking<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(create_files_in_directory):
class TestClass(spm.SPMCommand):
_jobtype = "jobtype"
_jobname = "jobname"
input_spec = spm.SPMCommandInputSpec
dc = TestClass() # dc = derived_class
filelist, outdir = create_files_in_directory
contents = {"contents": [1, 2, 3, 4]}
script = dc._make_matlab_command([contents])
assert "jobs{1}.spm.jobtype.jobname.contents(3) = 3;" in script
dc.inputs.use_v8struct = False
script = dc._make_matlab_command([contents])
assert "jobs{1}.jobtype{1}.jobname{1}.contents(3) = 3;" in script<|fim_middle|>test_make_matlab_command<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, fl_ctx: FLContext):
self._phase = "init"
self.shareable_gen = self._engine.get_component(self.shareable_generator_id)
if not isinstance(self.shareable_gen, ShareableGenerator):
self.system_panic("shareable_gen should be an instance of ShareableGenerator.", fl_ctx)
self.persistor = self._engine.get_component(self.persistor_id)
if not isinstance(self.persistor, LearnablePersistor):
self.system_panic("persistor should be an instance of LearnablePersistor.", fl_ctx)
self._global_model = self.persistor.load(fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, self._global_model, private=True, sticky=True)<|fim_middle|>start_controller<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> None:
app_iter = self._app(
environ, t.cast("StartResponse", catching_start_response)
)
response_body.extend(app_iter)
if hasattr(app_iter, "close"):
app_iter.close()<|fim_middle|>runapp<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Sequence['outputs.DeploymentResponse']:
"""
The Deployment items on this page
"""
return pulumi.get(self, "value")<|fim_middle|>value<|file_separator|> |