text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(set_configdir):
"""Compares internal actor representation to what was saved in the file."""
cfg = config("good_config.yaml")
cfg.createConfig()
cfg.saveActors()
with open("good_config_actors.yaml") as savedConfig:
data = yaml.safe_load(savedConfig)
savedKeys = len(data.keys())
originalKeys = len(cfg.actors.keys())
assert savedKeys == originalKeys<|fim_middle|>test_save_actors_clean<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(client):
admin_login_url = reverse("admin:login")
response = client.get(admin_login_url)
# Admin login page redirects to account login page
assert response.status_code == 302
assert reverse("account-login") in response["Location"]<|fim_middle|>test_admin_login_url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, privilege, grant_target_name, user_name, node=None):
"""Check that user is only able to execute `SHOW CREATE USER` when they have the necessary privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with Scenario("SHOW CREATE USER without privilege"):
target_user_name = f"target_user_{getuid()}"
with user(node, target_user_name):
with When("I grant the user NONE privilege"):
node.query(f"GRANT NONE TO {grant_target_name}")
with And("I grant the user USAGE privilege"):
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
with Then("I check the user can't use SHOW CREATE USER"):
node.query(f"SHOW CREATE USER {target_user_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("SHOW CREATE USER with privilege"):
target_user_name = f"target_user_{getuid()}"
with user(node, target_user_name):
with When(f"I grant {privilege}"):
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
with Then("I check the user can use SHOW CREATE USER"):
node.query(f"SHOW CREATE USER {target_user_name}", settings = [("user", f"{user_name}")])
with Scenario("SHOW CREATE USER with revoked privilege"):
target_user_name = f"target_user_{getuid()}"
with user(node, target_user_name):
with When(f"I grant {privilege}"):
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
with And(f"I revoke {privilege}"):
node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}")
with Then("I check the user cannot use SHOW CREATE USER"):
node.query(f"SHOW CREATE USER {target_user_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)<|fim_middle|>show_create<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(value):
"""cirq.testing.assert_equivalent_repr with cirq_google.workflow imported."""
return cirq.testing.assert_equivalent_repr(value, global_vals={'cirq_google': cg})<|fim_middle|>cg_assert_equivalent_repr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
try:
with self.assertWarns(DeprecationWarning):
from super_gradients.training.models import make_divisible # noqa
assert make_divisible(1, 1) == 1
except ImportError:
self.fail("ImportError raised unexpectedly for make_divisible")<|fim_middle|>test_deprecated_make_divisible<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Test MessageCache.get_message"""
cache = ForwardMsgCache()
session = _create_mock_session()
msg = _create_dataframe_msg([1, 2, 3])
msg_hash = populate_hash_if_needed(msg)
cache.add_message(msg, session, 0)
self.assertEqual(msg, cache.get_message(msg_hash))<|fim_middle|>test_get_message<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, move=None):
a = self.angle
ar = math.radians(a)
h = self.h
t = self.thickness
sh = math.sin(ar)*6*t + math.cos(ar)*h
tw = self.edges["a"].margin() + math.sin(ar)*h + math.cos(ar)*6*t
th = sh + 6
if self.move(tw, th, move, True):
return
self.moveTo(self.edges["a"].margin())
self.polyline(math.sin(ar)*h, a, 4*t)
self.fingerHolesAt(-3.5*t, 0, h/2, 90)
self.edgeCorner("e", "h")
self.edges["h"](h)
self.polyline(0, 90-a, math.cos(ar)*6*t, 90)
self.edges["a"](sh)
self.corner(90)
self.move(tw, th, move)<|fim_middle|>side_wall<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(seek_data):
seek_flag, seek_dest = seek_data
ctx = ucx_api.UCXContext({})
mem = ucx_api.UCXMemoryHandle.alloc(ctx, 1024)
msg_size = mem.length
packed_rkey = mem.pack_rkey()
worker = ucx_api.UCXWorker(ctx)
ep = ucx_api.UCXEndpoint.create_from_worker_address(
worker,
worker.get_address(),
endpoint_error_handling=True,
)
rkey = ep.unpack_rkey(packed_rkey)
uio = ucx_api.UCXIO(mem.address, msg_size, rkey)
send_msg = bytes(os.urandom(msg_size))
uio.write(send_msg)
uio.seek(seek_dest, seek_flag)
recv_msg = uio.read(4)
assert recv_msg == send_msg[seek_dest : seek_dest + 4]<|fim_middle|>test_ucxio_seek_good<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, get_stats=False):
"""Get the schema structure (prefixes, graphs)."""
schema = dict()
schema["1"] = {
"name": "-> Common Prefixes <-",
"columns": self._get_common_prefixes_schema(),
}
schema["2"] = {"name": "-> Graphs <-", "columns": self._get_graphs_schema()}
# schema.update(self._get_query_schema())
logger.info(f"Getting Schema Values: {schema.values()}")
return schema.values()<|fim_middle|>get_schema<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
output = json.loads(r2.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
expected = {
"192.168.255.1": {
"lastNotificationReason": "Hold Timer Expired",
}
}
return topotest.json_cmp(output, expected)<|fim_middle|>bgp_check_hold_timer_expire_reason<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_exact_match_score_with_weights<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, variable: ASTVariable) -> str:
"""
Converts for a handed over symbol the corresponding name of the buffer to a nest processable format.
:param variable: a single variable symbol.
:return: the corresponding representation as a string
"""
variable_symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE)
if variable_symbol.is_spike_input_port():
var_name = variable_symbol.get_symbol_name().upper()
if variable_symbol.get_vector_parameter() is not None:
vector_parameter = ASTUtils.get_numeric_vector_size(variable_symbol)
var_name = var_name + "_" + str(vector_parameter)
return "spike_inputs_grid_sum_[node." + var_name + " - node.MIN_SPIKE_RECEPTOR]"
return variable_symbol.get_symbol_name() + '_grid_sum_'<|fim_middle|>print_buffer_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(io):
raise Exception()<|fim_middle|>definition<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return 'ROLE'<|fim_middle|>get_type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path):
"""Get the library path of a virtualenv."""
subdir = 'Scripts' if os.name == 'nt' else 'bin'
executable = os.path.join(path, subdir, 'python')
return run_py(executable,
'from sysconfig import get_path',
'print(get_path("platlib"))')<|fim_middle|>get_venv_lib_path<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(A, basis_element):
"""Hilbert schmidt product where the basis elements are hermitian"""
return jnp.trace(basis_element @ A)<|fim_middle|>hilbert_schmidt<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, s):
return self.ffi.new("wchar_t []", s)<|fim_middle|>s2f<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, xtest, info):
data = self.prepare_data(info)
pred_on_test_set = self.best_model.predict(data)
return pred_on_test_set<|fim_middle|>query<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Datasplit:
"""
:return: the data subset used to score the model.
"""
pass<|fim_middle|>test<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, module_file):
content = textwrap.dedent("""\
set(GEOTIFF_FOUND ${GeoTIFF_FOUND})
if(DEFINED GeoTIFF_INCLUDE_DIR)
set(GEOTIFF_INCLUDE_DIR ${GeoTIFF_INCLUDE_DIR})
endif()
if(DEFINED GeoTIFF_LIBRARIES)
set(GEOTIFF_LIBRARIES ${GeoTIFF_LIBRARIES})
endif()
""")
save(self, module_file, content)<|fim_middle|>create_cmake_module_variables<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
import traceback
tb = traceback.extract_stack()
# strip itself and the caller from the output
tb = tb[:-2]
result = []
for back in tb:
# (filename, line number, function name, text)
key = back[:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result<|fim_middle|>caller_stack<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, *args, **options):
muis_url = 'https://www.muis.ee/OAIService/OAIService'
all_person_album_ids_set = set()
photos = Photo.objects.filter(source_url__contains='muis.ee')
for photo in photos:
try:
parser = ET.XMLParser(encoding="utf-8")
list_identifiers_url = f'{muis_url}?verb=GetRecord&identifier={photo.external_id}&metadataPrefix=lido'
url_response = urllib.request.urlopen(list_identifiers_url)
tree = ET.fromstring(url_response.read(), parser=parser)
ns = {'d': 'http://www.openarchives.org/OAI/2.0/', 'lido': 'http://www.lido-schema.org'}
rec = tree.find('d:GetRecord/d:record', ns)
record = 'd:metadata/lido:lidoWrap/lido:lido/'
object_identification_wrap = f'{record}lido:descriptiveMetadata/lido:objectIdentificationWrap/'
object_description_wraps = \
f'{object_identification_wrap}lido:objectDescriptionWrap/lido:objectDescriptionSet'
title_wrap = f'{object_identification_wrap}lido:titleWrap/'
event_wrap = f'{record}lido:descriptiveMetadata/lido:eventWrap/'
actor_wrap = f'{event_wrap}lido:eventSet/lido:event/lido:eventActor/'
person_album_ids = []
title_find = rec.find(f'{title_wrap}lido:titleSet/lido:appellationValue', ns)
title = title_find.text \
if title_find is not None \
else None
photo = reset_modeltranslated_field(photo, 'title', title)
photo.light_save()
dating = None
photo, dating = set_text_fields_from_muis(photo, dating, rec, object_description_wraps, ns)
photo.light_save()
creation_date_earliest = None
creation_date_latest = None
date_prefix_earliest = None
date_prefix_latest = None
date_earliest_has_suffix = False
date_latest_has_suffix = False
location = []
events = rec.findall(f'{event_wrap}lido:eventSet/lido:event', ns)
existing_dating = Dating.objects.filter(photo=photo, profile=None).first()
if events is not None and len(events) > 0:
location, \
creation_date_earliest, \
creation_date_latest, \
date_prefix_earliest, \
date_prefix_latest, \
date_earliest_has_suffix, \
date_latest_has_suffix, \
= extract_dating_from_event(
events,
location,
creation_date_earliest,
creation_date_latest,
dating is not None and existing_dating is None,
ns
)
if dating is not None and existing_dating is None:
creation_date_earliest, date_prefix_earliest, date_earliest_has_suffix = \
get_muis_date_and_prefix(dating, False)
creation_date_latest, date_prefix_latest, date_latest_has_suffix = \
get_muis_date_and_prefix(dating, True)
actors = rec.findall(f'{actor_wrap}lido:actorInRole', ns)
person_album_ids, author = add_person_albums(actors, person_album_ids, ns)
if author is not None:
photo.author = author
if location != []:
photo = add_geotag_from_address_to_photo(photo, location)
photo = add_dating_to_photo(
photo,
creation_date_earliest,
creation_date_latest,
date_prefix_earliest,
date_prefix_latest,
Dating,
date_earliest_has_suffix,
date_latest_has_suffix
)
dt = datetime.utcnow()
dt.replace(tzinfo=timezone.utc)
photo.muis_update_time = dt.replace(tzinfo=timezone.utc).isoformat()
photo.light_save()
person_albums = Album.objects.filter(id__in=person_album_ids)
if person_albums is not None:
for album in person_albums:
if not album.cover_photo:
album.cover_photo = photo
ap = AlbumPhoto(photo=photo, album=album, type=AlbumPhoto.FACE_TAGGED)
ap.save()
all_person_album_ids_set.add(album.id)
photo.set_calculated_fields()
except Exception as e:
exception = ApplicationException(exception=e, photo=photo)
exception.save()
all_person_album_ids = list(all_person_album_ids_set)
all_person_albums = Album.objects.filter(id__in=all_person_album_ids)
if all_person_albums is not None:
for person_album in all_person_albums:
person_album.set_calculated_fields()
person_album.save()<|fim_middle|>handle<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
pass<|fim_middle|>pre_operations<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(key, where=lambda e: True):
return [e[key] for e in events if where(e)]<|fim_middle|>select<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_list_pop<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[str]:
"""
The credentials are stored for this upstream or login server.
"""
return pulumi.get(self, "login_server")<|fim_middle|>login_server<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(track, groups):
"""
Given an array of groups, sets them on a track
Returns true if successful, false if there was an error
"""
grouping = ' '.join(sorted('_'.join(group.split()) for group in groups))
track.set_tag_raw(get_tagname(), grouping)
if not track.write_tags():
dialogs.error(
None,
"Error writing tags to %s"
% GObject.markup_escape_text(track.get_loc_for_io()),
)
return False
return True<|fim_middle|>set_track_groups<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
A = num.array(3)
B = mk_0to1_array(num, (2, 2))
C = mk_0to1_array(num, (2, 2))
arrays = [A, B, C]
with pytest.raises(ValueError):
num.linalg.multi_dot(arrays)<|fim_middle|>test_invalid_array_dim_zero<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, message):
# The node will send us invs for other blocks. Ignore them.
pass<|fim_middle|>on_inv<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(MatchList):
RegList = []
for regexp in MatchList:
a = re.compile(regexp)
RegList.append(a)
return RegList<|fim_middle|>set_up_filter<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(builder, string):
builder.string(string)
return builder<|fim_middle|>add_a_string<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
dirpath: str,
name: str,
url: str,
process_fn: Callable[[Dict[str, Any]], Dict[str, Any]],
) -> Dict[str, Any]:
"""
This fetch and cache utils allows sharing between different process.
"""
path = os.path.join(dirpath, name)
print(f"Downloading {url} to {path}")
def is_cached_file_valid() -> bool:
# Check if the file is new enough (see: FILE_CACHE_LIFESPAN_SECONDS). A real check
# could make a HEAD request and check/store the file's ETag
fname = pathlib.Path(path)
now = datetime.datetime.now()
mtime = datetime.datetime.fromtimestamp(fname.stat().st_mtime)
diff = now - mtime
return diff.total_seconds() < FILE_CACHE_LIFESPAN_SECONDS
if os.path.exists(path) and is_cached_file_valid():
# Another test process already download the file, so don't re-do it
with open(path) as f:
return cast(Dict[str, Any], json.load(f))
for _ in range(3):
try:
contents = urlopen(url, timeout=5).read().decode("utf-8")
processed_contents = process_fn(json.loads(contents))
with open(path, "w") as f:
f.write(json.dumps(processed_contents))
return processed_contents
except Exception as e:
print(f"Could not download {url} because: {e}.")
print(f"All retries exhausted, downloading {url} failed.")
return {}<|fim_middle|>fetch_and_cache<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""
Get a list of python plugins that were registered through
kwiver.python_plugin_registration
:return: A list of zero or more python modules containing registration
functions
"""
py_modules = []
try:
for entry_point in iter_entry_points(PYTHON_PLUGIN_ENTRYPOINT):
try:
py_modules.append(entry_point.load())
except ImportError:
logger.warn("Failed to load entry point: {0}".format(entry_point))
except DistributionNotFound:
pass
return py_modules<|fim_middle|>get_python_plugins_from_entrypoint<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Initializes a connection pool for a Vertica.
Uses the vertica_python Python package.
"""
try:
# Construct the connection string
connection_string = f"vertica://{self.username}:{self.password}@{self.host}:{self.port}/{self.dbname}"
logger.info("Connecting to the vertica Database...")
# Connect to Vertica
conn = vertica_python.connect(connection_string)
return conn
except Exception as error:
logger.error("Connection Has Failed... %s", str(error))
sys.exit(1)<|fim_middle|>connect_vertica_conn<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, x, y):
self.x = x
self.y = y
self.update()<|fim_middle|>set_pos<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(config, capsys):
# Get the merged list and ensure we get our defaults
with capsys.disabled():
output = _bootstrap("list")
assert "github-actions" in output
# Ask for a specific scope and check that the list of sources is empty
with capsys.disabled():
output = _bootstrap("list", "--scope", "user")
assert "No method available" in output<|fim_middle|>test_list_sources<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(title, filename, fix=None):
"""
Creates a formated (title, text) desciption of an issue.
TODO use this function and issue_str_line for all issues, so the format
can be easily changed (exta text, colors, etc)
"""
return (title, filename, fix)<|fim_middle|>issue_str<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, lr):
"""Set the learning rate."""
for param_group in self.param_groups:
param_group["lr"] = lr<|fim_middle|>set_lr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(file_path):
file_type = filetype.guess(file_path)
if file_type:
return file_type.mime
else:
return "text/html"<|fim_middle|>get_filetype<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self._result = 1
self.close()<|fim_middle|>on_take_control<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Returns the Parser object required to take inputs to data_prep.py"""
parser = argparse.ArgumentParser(
description="LRS-3 Data Preparation steps",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--train_val_path", type=str, help="Path to the Train/ Validation files"
)
parser.add_argument("--test_path", type=str, help="Path to the Test files")
return parser<|fim_middle|>get_parser<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(model):
metric.reset()
session = ort.InferenceSession(model.SerializeToString(),
providers=ort.get_available_providers())
ort_inputs = {}
len_inputs = len(session.get_inputs())
inputs_names = [session.get_inputs()[i].name for i in range(len_inputs)]
for idx, (inputs, labels) in enumerate(dataloader):
if not isinstance(labels, list):
labels = [labels]
if len_inputs == 1:
ort_inputs.update(
inputs if isinstance(inputs, dict) else {inputs_names[0]: inputs}
)
else:
assert len_inputs == len(inputs), 'number of input tensors must align with graph inputs'
if isinstance(inputs, dict):
ort_inputs.update(inputs)
else:
for i in range(len_inputs):
if not isinstance(inputs[i], np.ndarray):
ort_inputs.update({inputs_names[i]: np.array(inputs[i])})
else:
ort_inputs.update({inputs_names[i]: inputs[i]})
predictions = session.run(None, ort_inputs)
predictions, labels = postprocess((predictions, labels))
metric.update(predictions, labels)
return metric.result()<|fim_middle|>eval_func<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(processor, test_trace, data_source, peer_service_config):
processor._set_defaults_enabled = False
span = test_trace[0]
span.set_tag(data_source, "test_value")
processor.process_trace(test_trace)
assert span.get_tag(peer_service_config.tag_name) is None
assert span.get_tag(peer_service_config.source_tag_name) is None<|fim_middle|>test_disabled_peer_service<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return [
RadialCTFFilter(5, 200, defocus=d, Cs=2.0, alpha=0.1)
for d in np.linspace(1.5e4, 2.5e4, 7)
]<|fim_middle|>filters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
p = (packages.PackageRestriction("one", values.AlwaysTrue),)
p2 = (packages.PackageRestriction("one", values.AlwaysFalse),)
v = values.AlwaysTrue
v2 = values.AlwaysFalse
assert packages.Conditional("use", v, p) == packages.Conditional("use", v, p)
assert packages.Conditional("use", v2, p) != packages.Conditional("use", v, p)
assert packages.Conditional("use", v, p) != packages.Conditional("use", v, p2)
assert packages.Conditional("use1", v, p) != packages.Conditional("use", v, p)<|fim_middle|>test_conditional<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
multisig_tx = MultisigTransactionFactory(trusted=False)
self.assertFalse(
is_relevant_notification(multisig_tx.__class__, multisig_tx, created=False)
)
multisig_tx.trusted = True
self.assertTrue(
is_relevant_notification(multisig_tx.__class__, multisig_tx, created=False)
)
multisig_tx.created -= timedelta(minutes=75)
self.assertTrue(
is_relevant_notification(multisig_tx.__class__, multisig_tx, created=False)
)
multisig_tx.modified -= timedelta(minutes=75)
self.assertFalse(
is_relevant_notification(multisig_tx.__class__, multisig_tx, created=False)
)<|fim_middle|>test_is_relevant_notification_multisig_transaction<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
cls,
pex_version, # type: str
style, # type: LockStyle.Value
requires_python, # type: Iterable[str]
target_systems, # type: Iterable[TargetSystem.Value]
requirements, # type: Iterable[Union[Requirement, ParsedRequirement]]
constraints, # type: Iterable[Requirement]
allow_prereleases, # type: bool
allow_wheels, # type: bool
allow_builds, # type: bool
prefer_older_binary, # type: bool
use_pep517, # type: Optional[bool]
build_isolation, # type: bool
transitive, # type: bool
locked_resolves, # type: Iterable[LockedResolve]
source=None, # type: Optional[str]
pip_version=None, # type: Optional[PipVersionValue]
resolver_version=None, # type: Optional[ResolverVersion.Value]
):
# type: (...) -> Lockfile
pin_by_local_project_directory = {} # type: Dict[str, Pin]
requirement_by_local_project_directory = {} # type: Dict[str, Requirement]
for locked_resolve in locked_resolves:
for locked_requirement in locked_resolve.locked_requirements:
if isinstance(locked_requirement.artifact, LocalProjectArtifact):
local_directory = locked_requirement.artifact.directory
local_pin = locked_requirement.pin
pin_by_local_project_directory[local_directory] = local_pin
requirement_by_local_project_directory[
local_directory
] = local_pin.as_requirement()
def extract_requirement(req):
# type: (Union[Requirement, ParsedRequirement]) -> Requirement
if isinstance(req, Requirement):
return req
if isinstance(req, LocalProjectRequirement):
local_project_directory = os.path.abspath(req.path)
pin = pin_by_local_project_directory[local_project_directory]
requirement = Requirement.parse(
"{project_name}{extras}=={version}{marker}".format(
project_name=pin.project_name,
extras="[{extras}]".format(extras=",".join(req.extras))
if req.extras
else "",
version=pin.version,
marker="; {marker}".format(marker=req.marker) if req.marker else "",
)
)
# N.B.: We've already mapped all available local projects above, but the user may
# have supplied the local project requirement with more specific constraints (
# extras and / or marker restrictions) and we need to honor those; so we over-write.
requirement_by_local_project_directory[local_project_directory] = requirement
return requirement
return req.requirement
resolve_requirements = OrderedSet(extract_requirement(req) for req in requirements)
pip_ver = pip_version or PipVersion.DEFAULT
return cls(
pex_version=pex_version,
style=style,
requires_python=SortedTuple(requires_python),
target_systems=SortedTuple(target_systems),
pip_version=pip_ver,
resolver_version=resolver_version or ResolverVersion.default(pip_ver),
requirements=SortedTuple(resolve_requirements, key=str),
constraints=SortedTuple(constraints, key=str),
allow_prereleases=allow_prereleases,
allow_wheels=allow_wheels,
allow_builds=allow_builds,
prefer_older_binary=prefer_older_binary,
use_pep517=use_pep517,
build_isolation=build_isolation,
transitive=transitive,
locked_resolves=SortedTuple(locked_resolves),
local_project_requirement_mapping=requirement_by_local_project_directory,
source=source,
)<|fim_middle|>create<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.input_message = IP_REPORT
self.run_bot()
self.assertMessageEqual(0, IP_EVENTS)<|fim_middle|>test_ip<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, message, message_queue=None):
topic = message.topic.name
system_properties = message.system_properties
message_id = system_properties.message_id
body_digest = system_properties.body_digest
check_sum = body_digest.checksum
raw = message.body
corrupted = False
digest_type = body_digest.type
# Digest Type check
if digest_type == ProtoDigestType.CRC32:
expected_check_sum = format(binascii.crc32(raw) & 0xFFFFFFFF, '08X')
if not expected_check_sum == check_sum:
corrupted = True
elif digest_type == ProtoDigestType.MD5:
expected_check_sum = hashlib.md5(raw).hexdigest()
if not expected_check_sum == check_sum:
corrupted = True
elif digest_type == ProtoDigestType.SHA1:
expected_check_sum = hashlib.sha1(raw).hexdigest()
if not expected_check_sum == check_sum:
corrupted = True
elif digest_type in [ProtoDigestType.unspecified, None]:
print(f"Unsupported message body digest algorithm, digestType={digest_type}, topic={topic}, messageId={message_id}")
# Body Encoding check
body_encoding = system_properties.body_encoding
body = raw
if body_encoding == ProtoEncoding.GZIP:
body = gzip.decompress(message.body)
elif body_encoding in [ProtoEncoding.IDENTITY, None]:
pass
else:
print(f"Unsupported message encoding algorithm, topic={topic}, messageId={message_id}, bodyEncoding={body_encoding}")
tag = system_properties.tag
message_group = system_properties.message_group
delivery_time = system_properties.delivery_timestamp
keys = list(system_properties.keys)
born_host = system_properties.born_host
born_time = system_properties.born_timestamp
delivery_attempt = system_properties.delivery_attempt
queue_offset = system_properties.queue_offset
properties = {key: value for key, value in message.user_properties.items()}
receipt_handle = system_properties.receipt_handle
return cls(message_id, topic, body, tag, message_group, delivery_time, keys, properties, born_host,
born_time, delivery_attempt, message_queue, receipt_handle, queue_offset, corrupted)<|fim_middle|>from_protobuf<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, obj=None):
"""Render self.parameters as a Jinja2 template with the given object as context."""
try:
return {key: render_jinja2(value, {"obj": obj}) for key, value in self.parameters.items()}
except (TemplateSyntaxError, UndefinedError) as exc:
raise SecretParametersError(self, registry["secrets_providers"].get(self.provider), str(exc)) from exc<|fim_middle|>rendered_parameters<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(
default_processor: MessageProcessor,
) -> DialogueStateTracker:
reminders = [
ReminderScheduled("greet", datetime.now(), kill_on_user_message=False),
ReminderScheduled(
intent="greet",
entities=[{"entity": "name", "value": "Jane Doe"}],
trigger_date_time=datetime.now(),
kill_on_user_message=False,
),
ReminderScheduled(
intent="default",
entities=[{"entity": "name", "value": "Jane Doe"}],
trigger_date_time=datetime.now(),
kill_on_user_message=False,
),
ReminderScheduled(
intent="greet",
entities=[{"entity": "name", "value": "Bruce Wayne"}],
trigger_date_time=datetime.now(),
kill_on_user_message=False,
),
ReminderScheduled("default", datetime.now(), kill_on_user_message=False),
ReminderScheduled(
"default", datetime.now(), kill_on_user_message=False, name="special"
),
]
sender_id = uuid.uuid4().hex
tracker = await default_processor.tracker_store.get_or_create_tracker(sender_id)
for reminder in reminders:
tracker.update(UserUttered("test"))
tracker.update(ActionExecuted("action_reminder_reminder"))
tracker.update(reminder)
await default_processor.tracker_store.save(tracker)
return tracker<|fim_middle|>tracker_with_six_scheduled_reminders<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
# define the functor for evaluating the norm of the L1 error vector
ref_table = "input/vlct/MHD_shock_tube/rj2a_shock_tube_t0.2_res256.csv"
l1_func= CalcTableL1Norm(["density","velocity_x","velocity_y","velocity_z",
"pressure","bfield_x","bfield_y","bfield_z"],
default_ref_table = ref_table)
err_compare = partial(analyze_shock,
target_template = "method_vlct-1-{:s}_rj2a_N256_0.2",
name_template = "{}-axis rj2a shock tube N=256",
l1_functor = l1_func)
r = []
# check the average L1-Norm along the active axis (averaged over multiple
# slices along the active axis)
# check that the standard deviation of the L1-Norms computed along the
# active axis (There should be no definitely be no differences if we only
# use 1 block - if we use more than one block, it's unclear to me if it's
# ok to have round-off errors)
r.append(err_compare(0.012523489882320429, "x"))
r.append(err_compare(0.0, "x", std_dev=True))
r.append(err_compare(0.012523489882320308, "y"))
r.append(err_compare(0.0, "y", std_dev=True))
r.append(err_compare(0.012523489882320315, "z"))
r.append(err_compare(0.0, "z", std_dev=True))
n_passed = np.sum(r)
n_tests = len(r)
print("{:d} Tests passed out of {:d} Tests.".format(n_passed,n_tests))
return n_passed == n_tests<|fim_middle|>analyze_tests<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_ncdhw_to_ndhw_c2_d<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
timings = {"model_creation": datetime.datetime.now()}
logger = logging.getLogger("calliope.testlogger")
# TODO: capture logging output and check that comment is in string
log_time(logger, timings, "test", comment="test_comment", level="info")
assert isinstance(timings["test"], datetime.datetime)
log_time(logger, timings, "test2", comment=None, level="info")
assert isinstance(timings["test2"], datetime.datetime)
# TODO: capture logging output and check that time_since_solve_start is in the string
log_time(
logger,
timings,
"test",
comment=None,
level="info",
time_since_solve_start=True,
)<|fim_middle|>test_timing_log<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
limit = self.cleaned_data['limit']
return 20 if limit is None else limit<|fim_middle|>clean_limit<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)<|fim_middle|>extract_data<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(in_str: str):
"""Base-64 encode non-printable characters in test names so we can handle that obnoxious names module"""
if in_str.isprintable() and not any((c in in_str) for c in {'"', "'", ";"}):
return f'"{in_str}"'
else:
return f"str(b64decode(\"{b64encode(in_str.encode('utf-8')).decode('utf-8')}\"), 'utf-8')"<|fim_middle|>escape_null<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
now = timezone.now()
org = self.create_organization(owner=self.user)
user2 = self.create_user()
self.create_member(user=user2, organization=self.organization)
entry1 = AuditLogEntry.objects.create(
organization_id=org.id,
event=audit_log.get_event_id("ORG_EDIT"),
actor=self.user,
datetime=now,
)
AuditLogEntry.objects.create(
organization_id=org.id,
event=audit_log.get_event_id("ORG_EDIT"),
actor=user2,
datetime=now,
)
response = self.get_success_response(org.slug, qs_params={"actor": self.user.id})
assert len(response.data["rows"]) == 1
assert response.data["rows"][0]["id"] == str(entry1.id)<|fim_middle|>test_filter_by_user<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(a: T.handle, b: T.handle):
m = T.int64()
A = T.match_buffer(a, (m * 2,))
B = T.match_buffer(b, (m, 2))
for i, j in T.grid(m, 2):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi * 2 + vj]<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, var0, var1, update_op):<|fim_middle|>assert_dense_correct<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, tpus):<|fim_middle|>test_scale_global_to_worker_tpu<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> bool:
USE_MIGRATIONS = "use-migrations"
CREATE_TABLES = "create-tables"
migrations_env = os.environ.get("QUETZ_TEST_DBINIT", CREATE_TABLES)
if migrations_env.lower() == CREATE_TABLES:
return False
elif migrations_env.lower() == USE_MIGRATIONS:
return True
else:
raise ValueError(
f"QUETZ_TEST_DBINIT should be either {CREATE_TABLES} or {USE_MIGRATIONS}"
)<|fim_middle|>use_migrations<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# print(f"\n{self.__class__.__name__}: {self.case}")
self.prepare_inputs()<|fim_middle|>set_up<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
The EngagementFabric account
Azure REST API version: 2018-09-01-preview.
:param str account_name: Account Name
:param str resource_group_name: Resource Group Name
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:engagementfabric:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
sku=pulumi.get(__ret__, 'sku'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))<|fim_middle|>get_account<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, request, project_id):
"""查询业务实例拓扑"""
try:
lang = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, settings.LANGUAGE_CODE)
project = Project.get_project(request.user.token.access_token, project_id)
topo_info = cc.BizTopoQueryService(request.user.username, project["cc_app_id"], lang).fetch()
except CompParseBkCommonResponseError as e:
raise error_codes.ComponentError(_('查询业务拓扑信息失败:{}').format(e))
except BaseCompError as e:
logger.error('查询业务拓扑信息失败:%s', e)
raise error_codes.ComponentError(_('发生未知错误,请稍候再试'))
return Response(data=topo_info)<|fim_middle|>biz_inst_topo<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Set with `self.shop_tab.set(main=self, left={index})`
- index
1: Monthly shops
2: General supply shops
"""
grids = ButtonGrid(
origin=(340, 93), delta=(189, 0),
button_shape=(188, 54), grid_shape=(2, 1),
name='SHOP_TAB')
return Navbar(
grids=grids,
# Yellow bottom dash
active_color=(255, 219, 83), active_threshold=221, active_count=100,
# Black bottom dash
inactive_color=(181, 178, 181), inactive_threshold=221, inactive_count=100,
)<|fim_middle|>shop_tab<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, dts: Set[date], multiple: bool = False) -> None:
"""
When multiple is True, each holiday from a given date has its own observed date.
"""
for dt in sorted(dts):
if not self._is_observed(dt):
continue
if multiple:
for name in self.get_list(dt):
self._add_observed(dt, name)
else:
self._add_observed(dt)<|fim_middle|>populate_observed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, mcmt_class):
"""Test that the MCMT can act as normal control gate."""
qc = QuantumCircuit(2)
mcmt = mcmt_class(gate=CHGate(), num_ctrl_qubits=1, num_target_qubits=1)
qc = qc.compose(mcmt, [0, 1])
ref = QuantumCircuit(2)
ref.ch(0, 1)
self.assertEqual(qc, ref)<|fim_middle|>test_mcmt_as_normal_control<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> tf.compat.v1.ConfigProto:
return tf.compat.v1.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
)<|fim_middle|>session_config<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(monkeypatch, transport, rule):
channel = await transport.check_open('test')
os.kill(rule.peer.transport._process.pid, 9)
await transport.assert_msg('', command='close', channel=channel, problem='terminated')<|fim_middle|>test_killed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(num_channel_groups):
return {"channel_groups": num_channel_groups,
"filter_groups": num_channel_groups}<|fim_middle|>channelwise_parallel_strategy<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(api_client, checkout):
# given
checkout.metadata_storage.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.metadata_storage.save(update_fields=["metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_clear_public_metadata_for_item(
api_client, None, checkout_id, "Checkout", key="Not-exits"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["deleteMetadata"]["item"],
checkout.metadata_storage,
checkout_id,
)<|fim_middle|>test_delete_public_metadata_for_not_exist<|file_separator|> |
<|fim_prefix|>ync def <|fim_suffix|>(self):<|fim_middle|>test_getinfo_config_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
io_loop, channel_minion_id, channel, server, received, timeout=60
):
log.info("TEST - BEFORE CHANNEL CONNECT")
yield channel.connect()
log.info("TEST - AFTER CHANNEL CONNECT")
def cb(payload):
log.info("TEST - PUB SERVER MSG %r", payload)
received.append(payload)
io_loop.stop()
channel.on_recv(cb)
server.publish({"tgt_type": "glob", "tgt": [channel_minion_id], "WTF": "SON"})
start = time.time()
while time.time() - start < timeout:
yield salt.ext.tornado.gen.sleep(1)
io_loop.stop()<|fim_middle|>connect_and_publish<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> RouteStringGenerator:
return RouteStringGenerator()<|fim_middle|>route_generator<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> bool:
"""``True`` if ``max_deadline`` is not over yet, or if no deadline is
set."""
if self.deadline is None:
return True
return self.max_deadline >= now() if self.max_deadline else True<|fim_middle|>is_open<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
row = GroupAssigneeRow.from_bulk(
{
"project_id": "2",
"group_id": "1359",
"date_added": "2019-09-19 00:17:55+00",
"user_id": "1",
"team_id": "",
}
)
write_processed_messages(
self.storage, [InsertBatch([row.to_clickhouse()], None)]
)
ret = (
self.storage.get_cluster()
.get_query_connection(ClickhouseClientSettings.QUERY)
.execute("SELECT * FROM groupassignee_local;")
.results
)
assert ret[0] == (
0, # offset
0, # deleted
2, # project_id
1359, # group_id
datetime(2019, 9, 19, 0, 17, 55),
1, # user_id
None, # team_id
)<|fim_middle|>test_bulk_load<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(feature: float, background: float) -> float:
"""The Weber contrast. Used for patterns with a small feature within a large background. Ranges from 0 to infinity.
For backwards compatibility with previous versions, the absolute difference is used, making the range 0 to infinity vs -1 to infinity.
.. seealso::
https://en.wikipedia.org/wiki/Contrast_(vision)#Weber_contrast
.. danger::
The default definition does not use the absolute value. We only use it here for backwards compatibility.
"""
return abs(feature - background) / background<|fim_middle|>weber<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(generator_helper, data_helper):
# Given
input_data = data_helper.generate_multi_feature_full()
generator_1 = DatetimeFeatureGenerator()
generator_2 = DatetimeFeatureGenerator(features=["hour"])
expected_feature_metadata_in_full = {
("datetime", ()): ["datetime"],
("object", ("datetime_as_object",)): ["datetime_as_object"],
}
expected_feature_metadata_full_1 = {
("int", ("datetime_as_int",)): [
"datetime",
"datetime.year",
"datetime.month",
"datetime.day",
"datetime.dayofweek",
"datetime_as_object",
"datetime_as_object.year",
"datetime_as_object.month",
"datetime_as_object.day",
"datetime_as_object.dayofweek",
]
}
expected_feature_metadata_full_2 = {
("int", ("datetime_as_int",)): [
"datetime",
"datetime.hour",
"datetime_as_object",
"datetime_as_object.hour",
]
}
expected_output_data_feat_datetime = [
1533140820000000000,
1301322000000000000,
1301322000000000000,
1524238620000000000,
1524238620000000000,
-5364662400000000000,
7289654340000000000,
1301322000000000000,
1301322000000000000,
]
expected_output_data_feat_datetime_year = [
2018,
2011, # blank and nan values are set to the mean of good values = 2011
2011,
2018,
2018,
1800,
2200,
2011, # 2700 and 1000 are out of range for a pandas datetime so they are set to the mean
2011, # see limits at https://pandas.pydata.org/docs/reference/api/pandas.Timestamp.max.html
]
expected_output_data_feat_datetime_hour = [16, 14, 14, 15, 15, 0, 23, 14, 14]
# When
output_data_1 = generator_helper.fit_transform_assert(
input_data=input_data,
generator=generator_1,
expected_feature_metadata_in_full=expected_feature_metadata_in_full,
expected_feature_metadata_full=expected_feature_metadata_full_1,
)
assert list(output_data_1["datetime"].values) == list(output_data_1["datetime_as_object"].values)
assert expected_output_data_feat_datetime == list(output_data_1["datetime"].values)
assert expected_output_data_feat_datetime_year == list(output_data_1["datetime.year"].values)
output_data_2 = generator_helper.fit_transform_assert(
input_data=input_data,
generator=generator_2,
expected_feature_metadata_in_full=expected_feature_metadata_in_full,
expected_feature_metadata_full=expected_feature_metadata_full_2,
)
assert list(output_data_2["datetime"].values) == list(output_data_2["datetime_as_object"].values)
assert expected_output_data_feat_datetime == list(output_data_2["datetime"].values)
assert expected_output_data_feat_datetime_hour == list(output_data_2["datetime.hour"].values)<|fim_middle|>test_datetime_feature_generator<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Test Inkbird parser for Inkbird iBBQ with 4 probes."""
data_string = "043e27020100001e6771c1e2a81b0201060302f0ff13ff00000000a8e2c171671e0000000000000000c2"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_raw_data(data)
assert sensor_msg["firmware"] == "Inkbird"
assert sensor_msg["type"] == "iBBQ-4"
assert sensor_msg["mac"] == "A8E2C171671E"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature probe 1"] == 0
assert sensor_msg["temperature probe 2"] == 0
assert sensor_msg["temperature probe 3"] == 0
assert sensor_msg["temperature probe 4"] == 0
assert sensor_msg["rssi"] == -62<|fim_middle|>test_inkbird_i_bb_q_4_probes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position<|fim_middle|>get_position<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(account_name: Optional[pulumi.Input[str]] = None,
filter: Optional[pulumi.Input[Optional[str]]] = None,
orderby: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
skip_token: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListShareSynchronizationsResult]:
"""
List synchronizations of a share
Azure REST API version: 2021-08-01.
:param str account_name: The name of the share account.
:param str filter: Filters the results using OData syntax.
:param str orderby: Sorts the results using OData syntax.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
:param str skip_token: Continuation token
"""
...<|fim_middle|>list_share_synchronizations_output<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = {}
location = {}
civic_conf = "\n".join(filter(lambda x: ("civic-based" in x), conf))
elin_conf = "\n".join(filter(lambda x: ("elin" in x), conf))
coordinate_conf = "\n".join(
filter(lambda x: ("coordinate-based" in x), conf)
)
disable = "\n".join(filter(lambda x: ("disable" in x), conf))
coordinate_based_conf = self.parse_attribs(
["altitude", "datum", "longitude", "latitude"], coordinate_conf
)
elin_based_conf = self.parse_lldp_elin_based(elin_conf)
civic_based_conf = self.parse_lldp_civic_based(civic_conf)
if disable:
config["enable"] = False
if coordinate_conf:
location["coordinate_based"] = coordinate_based_conf
config["location"] = location
elif civic_based_conf:
location["civic_based"] = civic_based_conf
config["location"] = location
elif elin_conf:
location["elin"] = elin_based_conf
config["location"] = location
return utils.remove_empties(config)<|fim_middle|>render_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(signal, frame):
print()<|fim_middle|>signal_ignore<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return GLiteJobFileFactory<|fim_middle|>glite_job_file_factory_cls<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(size1, size2):
""" Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == "scalable":
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
return 1.0
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0<|fim_middle|>score_size<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, ver_and_role: str):
repo = self.server.repo
ver_str, sep, role = ver_and_role.rpartition(".")
if sep == "":
# 0 will lead to list lookup with -1, meaning latest version
ver = 0
else:
ver = int(ver_str)
if role not in repo.role_cache or ver > len(repo.role_cache[role]):
return None
# return metadata
return repo.role_cache[role][ver - 1].to_bytes(JSONSerializer())<|fim_middle|>get_metadata<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(E):
if np.sum(np.abs(E)) != 0: # check E != 0 for both floats and arrays
try:
m = _magn(E)
assert ((1 <= m) & (m <= 5)).all()
except AssertionError:
print(("Warning. Input values may not be in cm-1", E, "cm-1?"))<|fim_middle|>assertcm<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self, *args):<|fim_middle|>data_metadata_updated<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tmp_path):
test_dir_a = tmp_path / 'a'
test_dir_b = tmp_path / 'b'
os.mkdir(test_dir_a)
os.mkdir(test_dir_b)
test_a = 'unittests/resources/checks/emptycheck.py'
test_b = 'unittests/resources/checks/hellocheck.py'
shutil.copyfile(test_a, test_dir_a / 'test.py')
shutil.copyfile(test_b, test_dir_b / 'test.py')
return RegressionCheckLoader(
[test_dir_a.as_posix(), test_dir_b.as_posix()]
)<|fim_middle|>loader_with_path_tmpdir<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIotHubResourceResult]:
"""
Get the non-security related metadata of an IoT hub.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
...<|fim_middle|>get_iot_hub_resource_output<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, app, auth_client_header, third_party_user, group_payload
):
group_payload["groupid"] = "group:23456@thirdparty.com"
headers = auth_client_header
headers["X-Forwarded-User"] = third_party_user.userid
app.post_json("/api/groups", group_payload, headers=headers)
res = app.post_json(
"/api/groups", group_payload, headers=headers, expect_errors=True
)
assert res.status_code == 409<|fim_middle|>test_it_returns_htt_p_conflict_if<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(empty_study: FileStudy, command_context: CommandContext):
study_path = empty_study.config.study_path
area1 = "Area1"
area1_id = transform_name_to_id(area1)
CreateArea.parse_obj(
{
"area_name": area1,
"command_context": command_context,
}
).apply(empty_study)
update_settings_command = UpdateConfig(
target="settings/generaldata/optimization/simplex-range",
data="day",
command_context=command_context,
)
output = update_settings_command.apply(empty_study)
assert output.status
generaldata = MultipleSameKeysIniReader().read(study_path / "settings/generaldata.ini")
assert generaldata["optimization"]["simplex-range"] == "day"
assert generaldata["optimization"]["transmission-capacities"]
update_settings_command = UpdateConfig(
target=f"input/areas/{area1_id}/optimization/nodal optimization/other-dispatchable-power",
data=False,
command_context=command_context,
)
output = update_settings_command.apply(empty_study)
assert output.status
area_config = MultipleSameKeysIniReader().read(study_path / f"input/areas/{area1_id}/optimization.ini")
assert not area_config["nodal optimization"]["other-dispatchable-power"]
# test UpdateConfig with byte object which is necessary with the API PUT /v1/studies/{uuid}/raw
data = json.dumps({"first_layer": {"0": "Nothing"}}).encode("utf-8")
command = UpdateConfig(
target="layers/layers",
data=data,
command_context=command_context,
)
command.apply(empty_study)
layers = MultipleSameKeysIniReader().read(study_path / "layers/layers.ini")
assert layers == {"first_layer": {"0": "Nothing"}}
new_data = json.dumps({"1": False}).encode("utf-8")
command = UpdateConfig(
target="layers/layers/first_layer",
data=new_data,
command_context=command_context,
)
command.apply(empty_study)
layers = MultipleSameKeysIniReader().read(study_path / "layers/layers.ini")
assert layers == {"first_layer": {"1": False}}<|fim_middle|>test_update_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.import_taxonomies: gapic_v1.method.wrap_method(
self.import_taxonomies,
default_timeout=None,
client_info=client_info,
),
self.export_taxonomies: gapic_v1.method.wrap_method(
self.export_taxonomies,
default_timeout=None,
client_info=client_info,
),
}<|fim_middle|>prep_wrapped_messages<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> np.ndarray:
"""
Calculate the updraught from CAPE data
Calculation is 0.25 * sqrt(2 * cape)
Returns zero where CAPE < 10 J kg-1
"""
updraught = 0.25 * (2 * self.cape.data) ** 0.5
updraught[self.cape.data < self._minimum_cape] = 0.0
return updraught.astype(np.float32)<|fim_middle|>updraught_from_cape<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self):<|fim_middle|>validate_batch<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> MatchProperties | None:
"""Return match properties if any."""
return self._match<|fim_middle|>match<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Read email from stdin
"""
data = sys.stdin.readlines()
for line in data:
if not line.strip():
continue
if line.startswith("To: "):
self.email_to = line[3:].strip()
elif line.startswith("Subject: "):
self.email_subject = line[8:].strip()
else:
self.email_body.append(line.strip())<|fim_middle|>read_mail<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>( ver1, ver2 ):
intVer1 = cmssw_version_to_int(ver1)
intVer2 = cmssw_version_to_int(ver2)
if intVer1<intVer2:
return -1
elif intVer1>intVer2:
return 1
else:
return 0<|fim_middle|>cmp_cmssw_version<|file_separator|> |