text
stringlengths
67
7.88k
<|fim_prefix|>def <|fim_suffix|>(self): """To fetch the participating clients from the main parent process Returns: clients """ pass<|fim_middle|>sync_clients_from_main_process<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, request, *args, **kwargs): partial = kwargs.pop("partial", False) instance = self.get_object_or_none() serializer = self.get_serializer(instance, data=request.data, partial=partial) serializer.is_valid(raise_exception=True) if instance is None: lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field lookup_value = self.kwargs[lookup_url_kwarg] extra_kwargs = {self.lookup_field: lookup_value} serializer.save(**extra_kwargs) return Response(serializer.data, status=status.HTTP_201_CREATED) serializer.save() return Response(serializer.data)<|fim_middle|>update<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(cfg_dict, version): if version == 1: cfg_dict['type_hint'] = 'classification_image_data' return cfg_dict<|fim_middle|>clf_data_config_upgrader<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): parser = argparse.ArgumentParser("generic-image-rec train script") parser.add_argument( '-c', '--config', type=str, default='configs/config.yaml', help='config file path') parser.add_argument( '-o', '--override', action='append', default=[], help='config options to be overridden') parser.add_argument( '-p', '--profiler_options', type=str, default=None, help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' ) args = parser.METHOD_NAME() return args<|fim_middle|>parse_args<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): return "ODataV4Format"<|fim_middle|>error_format<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( nb_name: str, kernel_name: str = "wandb_python", notebook_type: PythonType = "jupyter", save_code: bool = True, **kwargs: Any, ): nb_path = assets_path(pathlib.Path("notebooks") / nb_name) shutil.copy(nb_path, os.path.join(os.getcwd(), os.path.basename(nb_path))) with open(nb_path) as f: nb = nbformat.read(f, as_version=4) # set up extra env vars and do monkey-patching. # in particular, we import and patch wandb. # this goes in the first cell of the notebook. setup_cell = io.StringIO() # env vars, particularly to point the sdk at the live test server (local_testcontainer): if not save_code: wandb_env["WANDB_SAVE_CODE"] = "false" wandb_env["WANDB_NOTEBOOK_NAME"] = "" else: wandb_env["WANDB_SAVE_CODE"] = "true" wandb_env["WANDB_NOTEBOOK_NAME"] = nb_name setup_cell.write("import os\n") for k, v in wandb_env.items(): setup_cell.write(f"os.environ['{k}'] = '{v}'\n") # make wandb think we're in a specific type of notebook: setup_cell.write( "import pytest\n" "mp = pytest.MonkeyPatch()\n" "import wandb\n" f"mp.setattr(wandb.sdk.wandb_settings, '_get_python_type', lambda: '{notebook_type}')" ) # inject: nb.cells.insert(0, nbformat.v4.new_code_cell(setup_cell.getvalue())) client = WandbNotebookClient(nb, kernel_name=kernel_name) try: with client.setup_kernel(**kwargs): yield client finally: pass # with open(os.path.join(os.getcwd(), "notebook.log"), "w") as f: # f.write(client.all_output_text()) # wandb.termlog("Find debug logs at: %s" % os.getcwd()) # wandb.termlog(client.all_output_text())<|fim_middle|>notebook_loader<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(key, subkey, arch, value_name, tags_to_apply, get_configuration, configure_environment, restart_syscheckd, wait_for_fim_start): ''' description: Check if the 'wazuh-syscheckd' daemon detects events and creates 'diff' files from monitored keys when the 'report_changes' option is enabled. For this purpose, the test will monitor a key, add a testing value and modify it. Finally, the test will verify that the 'diff' file has been created, and the FIM event generated includes the 'content_changes' field. wazuh_min_version: 4.2.0 tier: 1 parameters: - key: type: str brief: Path of the registry root key (HKEY_* constants). - subkey: type: str brief: The registry key being monitored by syscheck. - arch: type: str brief: Architecture of the registry. - value_name: type: str brief: Name of the testing value that will be created - tags_to_apply: type: set brief: Run test if matches with a configuration identifier, skip otherwise. - get_configuration: type: fixture brief: Get configurations from the module. - configure_environment: type: fixture brief: Configure a custom environment for testing. - restart_syscheckd: type: fixture brief: Clear the 'ossec.log' file and start a new monitor. - wait_for_fim_start: type: fixture brief: Wait for realtime start, whodata start, or end of initial FIM scan. assertions: - Verify that FIM events are generated for the changes made on the testing values. - Verify that a 'diff' file is created for each monitored value. - Verify that FIM events include the 'content_changes' field. input_description: A test case (test_report_changes) is contained in external YAML file (wazuh_registry_report_changes.yaml) which includes configuration settings for the 'wazuh-syscheckd' daemon. That is combined with the testing registry keys to be monitored defined in this module. expected_output: - r'.*Sending FIM event: (.+)$' ('added', 'modified', and 'deleted' events) tags: - scheduled - time_travel ''' check_apply_test(tags_to_apply, get_configuration['tags']) values = {value_name: "some content"} def report_changes_validator(event): """Validate content_changes attribute exists in the event""" for value in values: _, diff_file = calculate_registry_diff_paths(key, subkey, arch, value) assert os.path.exists(diff_file), '{diff_file} does not exist' assert event['data'].get('content_changes') is not None, 'content_changes is empty' registry_value_cud(key, subkey, wazuh_log_monitor, arch=arch, value_list=values, time_travel=get_configuration['metadata']['fim_mode'] == 'scheduled', min_timeout=global_parameters.default_timeout, triggers_event=True, validators_after_update=[report_changes_validator])<|fim_middle|>test_report_changes<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, commit): self._run_git("checkout", commit)<|fim_middle|>checkout<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(tmp_dir, dvc): tmp_dir.dvc_gen({"dir": {"foo": "foo", "bar": "bar", "subdir": {"data": "data"}}}) fs = DataFileSystem(index=dvc.index.data["repo"]) subdir = "dir/subdir" assert fs.info(subdir).get("md5") is None _, _, obj = build(dvc.cache.local, subdir, fs, "md5", dry_run=True) assert obj.hash_info == HashInfo("md5", "af314506f1622d107e0ed3f14ec1a3b5.dir") data = posixpath.join(subdir, "data") assert fs.info(data)["md5"] == "8d777f385d3dfec8815d20f7496026dc" _, _, obj = build(dvc.cache.local, data, fs, "md5", dry_run=True) assert obj.hash_info == HashInfo("md5", "8d777f385d3dfec8815d20f7496026dc")<|fim_middle|>test_get_hash_granular<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(working_directory: str, target_folder: str): command_array = [ sys.executable, generate_mgmt_script, "-p", target_folder, "-o", working_directory, "--verbose", ] try: logging.info("Command to generate management sphinx sources: {}".format(command_array)) check_call( command_array ) except CalledProcessError as e: logging.error( "script failed for path {} exited with error {}".format( args.working_directory, e.returncode ) ) exit(1)<|fim_middle|>mgmt_apidoc<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(self): signed = await sign( document=TEST_VC_DOCUMENT, suite=Ed25519Signature2018( key_pair=self.sign_key_pair, verification_method=self.verification_method, ), document_loader=custom_document_loader, purpose=AssertionProofPurpose(), ) assert signed<|fim_middle|>test_sign_vc<|file_separator|>
<|fim_prefix|> <|fim_suffix|>(self):<|fim_middle|>test_full_width_exclmation_point<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(path): path_elements = path.split('/') bucket_name = path_elements[2] prefix = "/".join(path_elements[3:]) return bucket_name, prefix<|fim_middle|>tokenize_s3_path<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): tools.get(**self.conan_data["sources"][self.version]) extracted_dir = self.name + "-" + self.version os.rename(extracted_dir, self._source_subfolder)<|fim_middle|>source<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(argv): path_to_json_file = '' try: opts, args = getopt.getopt(argv, "hi:", ["ifile="]) except getopt.GetoptError: print("Usage: plot_pamm_results.py -i path/to/json/file") sys.exit(2) for opt, arg in opts: if opt == '-h' or opt == '': print("Usage: plot_pamm_results.py -i path/to/json/file") sys.exit() elif opt in ("-i", "--ifile"): path_to_json_file = arg with open(path_to_json_file) as file: data = json.load(file) pprint.pprint(data) fig = plt.figure(figsize=(12, 12)) # TIMER DATAFRAME timer = pandas.DataFrame(list(data['Timer'].items()), columns=[ 'Timer', 'Duration']) # convert ms to sec timer['Duration'] = timer['Duration'].apply( lambda x: np.around(x/1000, decimals=2)) timer['DFA'] = timer['Timer'].apply( lambda x: True if 'DFA' in x and x != 'DFA Runtime' else False) timer['Timer'] = timer['Timer'].apply( lambda x: x[4:] if 'DFA' in x and x != 'DFA Runtime' else x) pprint.pprint(timer) ax = plt.subplot2grid((2, 2), (0, 0)) drawTimer(timer.loc[timer['DFA'] == True], ax, 'Timer', 'Duration', plt, 'Phases') ax = plt.subplot2grid((2, 2), (0, 1)) drawTimer(timer.loc[timer['DFA'] == False], ax, 'Timer', 'Duration', plt, 'Analysis Steps') # COUNTER DATAFRAME ax = plt.subplot2grid((2, 1), (1, 0)) stats_df = pandas.DataFrame( list(data['Counter'].items()), columns=['Statistic', 'Count']) stats_df['Statistic'] = stats_df['Statistic'].apply(lambda x: x[3:]) drawCounter(stats_df, ax, 'Statistic', 'Count', plt, 'General Statistics') # HISTOGRAM DATAFRAME # Gather all histogram data # maping: histo type -> {value -> #occurence } histo_map = {} for prop, values in data.items(): if "Histogram" in prop: histo_map[prop] = values # dfacts_df = pandas.DataFrame(list(data['Histogram']['Data-flow facts'].items()), columns=['Value', '#Occurrences']) # pprint.pprint(dfacts_df) # dfToNumeric(dfacts_df) # maxValue = dfacts_df.loc[dfacts_df['Value'].idxmax()]['Value'] # bins = np.arange(0, maxValue+10, 10) # # pprint.pprint(bins) # xrange = np.arange(10, maxValue+10, 10) # # pprint.pprint(xrange) # g = dfacts_df.groupby(pandas.cut(dfacts_df['Value'], bins)).sum() # # pprint.pprint(g) # g.plot.bar(y='#Occurrences', x='Value', color=['tab:green', 'tab:red'], alpha=0.8, width=1, legend=True, fontsize=9) # ax = plt.subplot2grid((3, 3), (2, 1)) # drawHistogram(g, ax, xrange, '#Occurrences', 'Data-flow facts Dist.') plt.tight_layout(pad=0.9, w_pad=0.15, h_pad=1.0) plt.show()<|fim_middle|>main<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(args): prefix_hash_set = set() train_file = path.join(args.notation_data_dir, "train.txt") with open(train_file) as f: for line in f: moves = line.strip().split() for i in range(len(moves)): prefix_hash_set.add(hash(tuple(moves[0: i + 1]))) print(f"Unique train prefixes: {len(prefix_hash_set)}") return prefix_hash_set<|fim_middle|>load_train_prefixes<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(x, y, adjoint_a, adjoint_b): def body(t, prev): with ops.control_dependencies([prev]): return (t + 1, math_ops.matmul( x, y, transpose_a=adjoint_a, transpose_b=adjoint_b, a_is_sparse=True, b_is_sparse=False)) t0 = constant_op.constant(0) v0 = constant_op.constant(0.0) def _timeit(iterations, _): (_, final) = control_flow_ops.while_loop( lambda t, _: t < iterations, body, (t0, v0), parallel_iterations=1, back_prop=False, shape_invariants=(tensor_shape.TensorShape(()), tensor_shape.TensorShape(None))) return [final] return _timeit<|fim_middle|>sparse_tensor_dense_vs_dense_matmul_benchmark<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.assertEqual(45, self.harvester_class.total_time())<|fim_middle|>test_total_time<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(request, slug=None): schedule = fetch_schedule(slug) if not schedule.published and not request.user.is_staff: raise Http404() presentations = Presentation.objects.filter(section=schedule.section) presentations = presentations.exclude(cancelled=True).order_by("id") response = HttpResponse(content_type="text/csv") if slug: file_slug = slug else: file_slug = "presentations" response["Content-Disposition"] = ( 'attachment; filename="%s.csv"' % file_slug ) response.write( loader.get_template("symposion/schedule/schedule_list.csv").render( {"presentations": presentations} ) ) return response<|fim_middle|>schedule_list_csv<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, data, tags, members): return self.node(data, tags)<|fim_middle|>relation<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): args_dicts = self.args_dict() if asyncio.iscoroutinefunction(self.fn): asyncio.METHOD_NAME(self.fn(**args_dicts)) else: self.fn(**args_dicts)<|fim_middle|>run<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): return self._y<|fim_middle|>y<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, graph): return self._apply_optimization(graph, self._optimize_at_current_graph_level)<|fim_middle|>optimize<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(server=None): """Returns the default config of the operator. This config can then be changed to the user needs and be used to instantiate the operator. The Configuration allows to customize how the operation will be processed by the operator. Parameters ---------- server : server.DPFServer, optional Server with channel connected to the remote or local instance. When ``None``, attempts to use the global server. """ return Operator.METHOD_NAME(name="component_wise_divide", server=server)<|fim_middle|>default_config<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(app): with app.app_context(): db = get_db() populate_entries(db) with db.scoped_session() as session: test_fn(session)<|fim_middle|>wrapper<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(cmd, driver_id): """ Load driver :param cmd: Driver load cmd :param driver_id: Driver id in windows guest """ nic_index = len(vm.virtnet) - 1 session = vm.wait_for_login(nic_index=nic_index) if params["os_type"] == "windows": cmd = cmd.replace("DRIVER_ID", driver_id) status, output = session.cmd_status_output(cmd) session.close() if status != 0: test.fail("failed to load driver, %s" % output)<|fim_middle|>load_driver<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(s): try: return json.loads(s) except ValueError: return s<|fim_middle|>parse_arg<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): return { "workspace": { "name": "coreos", "token": { "path": "some/path", "field": "bot_token", }, "api_client": create_api_config(), "managedUsergroups": ["foo"], } }<|fim_middle|>permissions_workspace<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(config: schema.Main): keycloak_server_url = os.environ.get( "KEYCLOAK_SERVER_URL", f"https://{config.domain}/auth/" ) keycloak_username = os.environ.get("KEYCLOAK_ADMIN_USERNAME", "root") keycloak_password = os.environ.get( "KEYCLOAK_ADMIN_PASSWORD", config.security.keycloak.initial_root_password ) should_verify_tls = config.certificate.type != CertificateEnum.selfsigned try: keycloak_admin = keycloak.KeycloakAdmin( server_url=keycloak_server_url, username=keycloak_username, password=keycloak_password, realm_name=os.environ.get("KEYCLOAK_REALM", "nebari"), user_realm_name="master", auto_refresh_token=("get", "put", "post", "delete"), verify=should_verify_tls, ) except ( keycloak.exceptions.KeycloakConnectionError, keycloak.exceptions.KeycloakAuthenticationError, ) as e: raise ValueError(f"Failed to connect to Keycloak server: {e}") return keycloak_admin<|fim_middle|>get_keycloak_admin_from_config<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(same_function_name_arn: str, same_function_name_fn: str): # GIVEN service = data_builder.build_service_name() method_name_todos = "same_function_name.Todos.get_all" method_subsegment_todos = f"## {method_name_todos}" method_metadata_key_todos = f"{method_name_todos} response" method_name_comments = "same_function_name.Comments.get_all" method_subsegment_comments = f"## {method_name_comments}" method_metadata_key_comments = f"{method_name_comments} response" trace_query = data_builder.build_trace_default_query(function_name=same_function_name_fn) # WHEN event = json.dumps({"service": service}) _, execution_time = data_fetcher.get_lambda_response(lambda_arn=same_function_name_arn, payload=event) # THEN trace = data_fetcher.get_traces(start_date=execution_time, filter_expression=trace_query) assert len(trace.get_metadata(key=method_metadata_key_todos, namespace=service)) == 1 assert len(trace.get_metadata(key=method_metadata_key_comments, namespace=service)) == 1 assert len(trace.get_subsegment(name=method_subsegment_todos)) == 1 assert len(trace.get_subsegment(name=method_subsegment_comments)) == 1<|fim_middle|>test_lambda_handler_trace_multiple_functions_same<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(df: pd.DataFrame, labels_series: pd.Series, category_id: int, labeled_status: LabeledStatus, label_types: Set[LabelType] = None): """ :param df: :param labels_series: series of the label dictionary corresponding to each element in the dataframe :param category_id: :param labeled_status: unlabeled, labeled or all :param label_types: set of applicable label types if filtering for labeled elements (LabelStatus.LABELED) :return: """ if labeled_status in [LabeledStatus.UNLABELED, LabeledStatus.ALL] and label_types is not None: raise Exception(f"Label type is inapplicable when fetching {labeled_status} elements") if labeled_status == LabeledStatus.LABELED and label_types is None: raise Exception(f"label_types must be provided when filtering labeled elements") if labeled_status == LabeledStatus.UNLABELED: return df[labels_series.apply(lambda x: category_id not in x)] elif labeled_status == LabeledStatus.LABELED: return df[labels_series.apply(lambda x: category_id in x and x[category_id].label_type in label_types)] return df<|fim_middle|>filter_by_labeled_status<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(alt_list): bytes = bytearray() for entry in alt_list: location, alt, primary = entry room, scene_setup, flag = alt alt_override = (room << 8) + (scene_setup << 14) + flag room, scene_setup, flag = primary primary_override = (room << 8) + (scene_setup << 14) + flag bytes.append(location.scene) bytes.append(0x06) bytes.append((alt_override & 0xFF00) >> 8) bytes.append(alt_override & 0xFF) bytes.append(location.scene) bytes.append(0x06) bytes.append((primary_override & 0xFF00) >> 8) bytes.append(primary_override & 0xFF) return bytes<|fim_middle|>get_alt_list_bytes<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(pipeline_response): deserialized = self._deserialize("OperationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem)<|fim_middle|>extract_data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): parameters = { **self.serialize_header_param( "Accept", "application/json", ), } return parameters<|fim_middle|>header_parameters<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(caller, **kwargs): text = ( """ Your name is |g%s|n! click |lclook|lthere|le to trigger a look command under MXP. This node's option has no explicit key (nor the "_default" key set), and so gets assigned a number automatically. You can infact -always- use numbers (1...N) to refer to listed options also if you don't see a string option key (try it!). """ % caller.key ) if kwargs.get("executed_from_dynamic_node", False): # we are calling this node as a exec, skip return values caller.msg("|gCalled from dynamic node:|n \n {}".format(text)) return else: options = {"desc": "back to main", "goto": "test_start_node"} return text, options<|fim_middle|>test_view_node<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, index): if not index.isValid(): return QModelIndex() key = index.internalPointer() if key is None: return QModelIndex() row = self._keys.index(key) return self.createIndex(row, 0, None)<|fim_middle|>parent<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(ctx, baseline_ref=None, report_file=None): if check_uncommitted_changes(ctx): raise Exit( color_message( "There are uncomitted changes in your repository. Please commit or stash them before trying again.", "red", ), code=1, ) current_branch = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() commit_sha = os.getenv("CI_COMMIT_SHA") if commit_sha is None: commit_sha = ctx.run("git rev-parse HEAD", hide=True).stdout.strip() if not baseline_ref: base_branch = _get_release_json_value("base_branch") baseline_ref = ctx.run(f"git merge-base {commit_sha} origin/{base_branch}", hide=True).stdout.strip() # platforms are the agent task recognized os/platform and arch values, not Go-specific values binaries = { "agent": { "entrypoint": "cmd/agent", "platforms": ["linux/x64", "linux/arm64", "win32/x64", "win32/x86", "darwin/x64", "darwin/arm64"], }, "iot-agent": { "build": "agent", "entrypoint": "cmd/agent", "flavor": AgentFlavor.iot, "platforms": ["linux/x64", "linux/arm64"], }, "heroku-agent": { "build": "agent", "entrypoint": "cmd/agent", "flavor": AgentFlavor.heroku, "platforms": ["linux/x64"], }, "cluster-agent": {"entrypoint": "cmd/cluster-agent", "platforms": ["linux/x64", "linux/arm64"]}, "cluster-agent-cloudfoundry": { "entrypoint": "cmd/cluster-agent-cloudfoundry", "platforms": ["linux/x64", "linux/arm64"], }, "dogstatsd": {"entrypoint": "cmd/dogstatsd", "platforms": ["linux/x64", "linux/arm64"]}, "process-agent": { "entrypoint": "cmd/process-agent", "platforms": ["linux/x64", "linux/arm64", "win32/x64", "darwin/x64", "darwin/arm64"], }, "heroku-process-agent": { "build": "process-agent", "entrypoint": "cmd/process-agent", "flavor": AgentFlavor.heroku, "platforms": ["linux/x64"], }, "security-agent": { "entrypoint": "cmd/security-agent", "platforms": ["linux/x64", "linux/arm64"], }, "serverless": {"entrypoint": "cmd/serverless", "platforms": ["linux/x64", "linux/arm64"]}, "system-probe": {"entrypoint": "cmd/system-probe", "platforms": ["linux/x64", "linux/arm64", "win32/x64"]}, "trace-agent": { "entrypoint": "cmd/trace-agent", "platforms": ["linux/x64", "linux/arm64", "win32/x64", "win32/x86", "darwin/x64", "darwin/arm64"], }, "heroku-trace-agent": { "build": "trace-agent", "entrypoint": "cmd/trace-agent", "flavor": AgentFlavor.heroku, "platforms": ["linux/x64"], }, } diffs = {} dep_cmd = "go list -f '{{ range .Deps }}{{ printf \"%s\\n\" . }}{{end}}'" with tempfile.TemporaryDirectory() as tmpdir: try: # generate list of imports for each target+branch combo branches = {"current": None, "main": baseline_ref} for branch_name, branch_ref in branches.items(): if branch_ref: ctx.run(f"git checkout -q {branch_ref}") for binary, details in binaries.items(): with ctx.cd(details.get("entrypoint")): for combo in details.get("platforms"): platform, arch = combo.split("/") goos, goarch = GOOS_MAPPING.get(platform), GOARCH_MAPPING.get(arch) target = f"{binary}-{goos}-{goarch}" depsfile = os.path.join(tmpdir, f"{target}-{branch_name}") flavor = details.get("flavor", AgentFlavor.base) build = details.get("build", binary) build_tags = get_default_build_tags( build=build, arch=arch, platform=platform, flavor=flavor ) env = {"GOOS": goos, "GOARCH": goarch} ctx.run(f"{dep_cmd} -tags \"{' '.join(build_tags)}\" > {depsfile}", env=env) finally: ctx.run(f"git checkout -q {current_branch}") # compute diffs for each target for binary, details in binaries.items(): for combo in details.get("platforms"): platform, arch = combo.split("/") goos, goarch = GOOS_MAPPING.get(platform), GOARCH_MAPPING.get(arch) target = f"{binary}-{goos}-{goarch}" prdeps = os.path.join(tmpdir, f"{target}-current") maindeps = os.path.join(tmpdir, f"{target}-main") res = ctx.run( f"diff -u0 {maindeps} {prdeps} | grep -v '^@@' | grep -v '^[+-][+-]'", hide=True, warn=True ) if len(res.stdout) > 0: diffs[target] = res.stdout.strip() # output, also to file if requested if len(diffs) > 0: pr_comment = [ f"Baseline: {baseline_ref}", f"Comparison: {commit_sha}\n", ] for binary, details in binaries.items(): for combo in details.get("platforms"): platform, arch = combo.split("/") goos, goarch = GOOS_MAPPING.get(platform), GOARCH_MAPPING.get(arch) target = f"{binary}-{goos}-{goarch}" prettytarget = f"{binary} {goos}/{goarch}" if target in diffs: targetdiffs = diffs[target] add, remove = patch_summary(targetdiffs) color_add = color_message(f"+{add}", "green") color_remove = color_message(f"-{remove}", "red") print(f"== {prettytarget} {color_add}, {color_remove} ==") print(f"{color_patch(targetdiffs)}\n") summary = f"<summary>{prettytarget} +{add}, -{remove}</summary>" diff_block = f"\n```diff\n{targetdiffs}\n```\n" pr_comment.append(f"<details>{summary}\n{diff_block}</details>\n") else: print(f"== {prettytarget} ==\nno changes\n") if report_file: with open(report_file, 'w') as f: f.write("\n".join(pr_comment)) else: print("no changes for all binaries") if report_file: # touch file open(report_file, 'w').close()<|fim_middle|>go_deps<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, lib, opts, args): if not self.url: raise ui.UserError( 'This plugin is deprecated since AcousticBrainz no longer ' 'accepts new submissions. See the base_url configuration ' 'option.' ) else: # Get items from arguments items = lib.items(ui.decargs(args)) self.opts = opts util.par_map(self.analyze_submit, items)<|fim_middle|>command<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(wrap, lt=None, is_argsort=False, is_list=False, is_np_array=False): intp = types.intp zero = intp(0) # Two subroutines to make the core algorithm generic wrt. argsort # or normal sorting. Note the genericity may make basic sort() # slightly slower (~5%) if is_argsort: if is_list: @wrap def make_res(A): return [x for x in range(len(A))] else: @wrap def make_res(A): return np.arange(A.size) @wrap def GET(A, idx_or_val): return A[idx_or_val] else: @wrap def make_res(A): return A @wrap def GET(A, idx_or_val): return idx_or_val def default_lt(a, b): """ Trivial comparison function between two keys. """ return a < b LT = wrap(lt if lt is not None else default_lt) @wrap def insertion_sort(A, R, low, high): """ Insertion sort A[low:high + 1]. Note the inclusive bounds. """ assert low >= 0 if high <= low: return for i in range(low + 1, high + 1): k = R[i] v = GET(A, k) # Insert v into A[low:i] j = i while j > low and LT(v, GET(A, R[j - 1])): # Make place for moving A[i] downwards R[j] = R[j - 1] j -= 1 R[j] = k @wrap def partition(A, R, low, high): """ Partition A[low:high + 1] around a chosen pivot. The pivot's index is returned. """ assert low >= 0 assert high > low mid = (low + high) >> 1 # NOTE: the pattern of swaps below for the pivot choice and the # partitioning gives good results (i.e. regular O(n log n)) # on sorted, reverse-sorted, and uniform arrays. Subtle changes # risk breaking this property. # median of three {low, middle, high} if LT(GET(A, R[mid]), GET(A, R[low])): R[low], R[mid] = R[mid], R[low] if LT(GET(A, R[high]), GET(A, R[mid])): R[high], R[mid] = R[mid], R[high] if LT(GET(A, R[mid]), GET(A, R[low])): R[low], R[mid] = R[mid], R[low] pivot = GET(A, R[mid]) # Temporarily stash the pivot at the end R[high], R[mid] = R[mid], R[high] i = low j = high - 1 while True: while i < high and LT(GET(A, R[i]), pivot): i += 1 while j >= low and LT(pivot, GET(A, R[j])): j -= 1 if i >= j: break R[i], R[j] = R[j], R[i] i += 1 j -= 1 # Put the pivot back in its final place (all items before `i` # are smaller than the pivot, all items at/after `i` are larger) R[i], R[high] = R[high], R[i] return i @wrap def partition3(A, low, high): """ Three-way partition [low, high) around a chosen pivot. A tuple (lt, gt) is returned such that: - all elements in [low, lt) are < pivot - all elements in [lt, gt] are == pivot - all elements in (gt, high] are > pivot """ mid = (low + high) >> 1 # median of three {low, middle, high} if LT(A[mid], A[low]): A[low], A[mid] = A[mid], A[low] if LT(A[high], A[mid]): A[high], A[mid] = A[mid], A[high] if LT(A[mid], A[low]): A[low], A[mid] = A[mid], A[low] pivot = A[mid] A[low], A[mid] = A[mid], A[low] lt = low gt = high i = low + 1 while i <= gt: if LT(A[i], pivot): A[lt], A[i] = A[i], A[lt] lt += 1 i += 1 elif LT(pivot, A[i]): A[gt], A[i] = A[i], A[gt] gt -= 1 else: i += 1 return lt, gt @wrap def run_quicksort1(A): R = make_res(A) if len(A) < 2: return R stack = [Partition(zero, zero)] * MAX_STACK stack[0] = Partition(zero, len(A) - 1) n = 1 while n > 0: n -= 1 low, high = stack[n] # Partition until it becomes more efficient to do an insertion sort while high - low >= SMALL_QUICKSORT: assert n < MAX_STACK i = partition(A, R, low, high) # Push largest partition on the stack if high - i > i - low: # Right is larger if high > i: stack[n] = Partition(i + 1, high) n += 1 high = i - 1 else: if i > low: stack[n] = Partition(low, i - 1) n += 1 low = i + 1 insertion_sort(A, R, low, high) return R if is_np_array: @wrap def run_quicksort(A): if A.ndim == 1: return run_quicksort1(A) else: for idx in np.ndindex(A.shape[:-1]): run_quicksort1(A[idx]) return A else: @wrap def run_quicksort(A): return run_quicksort1(A) # Unused quicksort implementation based on 3-way partitioning; the # partitioning scheme turns out exhibiting bad behaviour on sorted arrays. @wrap def _run_quicksort(A): stack = [Partition(zero, zero)] * 100 stack[0] = Partition(zero, len(A) - 1) n = 1 while n > 0: n -= 1 low, high = stack[n] # Partition until it becomes more efficient to do an insertion sort while high - low >= SMALL_QUICKSORT: assert n < MAX_STACK l, r = partition3(A, low, high) # One trivial (empty) partition => iterate on the other if r == high: high = l - 1 elif l == low: low = r + 1 # Push largest partition on the stack elif high - r > l - low: # Right is larger stack[n] = Partition(r + 1, high) n += 1 high = l - 1 else: stack[n] = Partition(low, l - 1) n += 1 low = r + 1 insertion_sort(A, low, high) return QuicksortImplementation(wrap, partition, partition3, insertion_sort, run_quicksort)<|fim_middle|>make_quicksort_impl<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): return {lvs: {}}<|fim_middle|>configure_loader_modules<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())<|fim_middle|>to_str<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(request: Request, provider: str) -> RpcIntegration: return get_integration_from_jwt(request.GET.get("jwt"), request.path, provider, request.GET)<|fim_middle|>get_integration_from_request<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(conn, connection_record): conn.execute('PRAGMA trusted_schema=OFF') conn.execute('PRAGMA temp_store=MEMORY') if foreign_keys: conn.execute('PRAGMA foreign_keys=ON') if orm_lockdown: conn.set_authorizer(authorizer_callback)<|fim_middle|>do_connect<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.setUpForConfig("sd123_hpr01_presence.yaml", SD123_PRESENCE_PAYLOAD) self.setUpBasicBinarySensor( PRESENCE_DPS, self.entities.get("binary_sensor_occupancy"), device_class=BinarySensorDeviceClass.OCCUPANCY, testdata=("presence", "none"), ) self.setUpBasicLight( LIGHT_DPS, self.entities.get("light_led"), testdata=("normal", "slient"), ) self.setUpBasicSwitch(SWITCH_DPS, self.entities.get("switch")) self.setUpMultiNumber( [ { "name": "number_trigger_power", "dps": TRIGPOW_DPS, "max": 5000, "step": 100, }, { "name": "number_maintain_power", "dps": MAINTPOW_DPS, "max": 5000, "step": 100, }, { "name": "number_trigger_frames", "dps": TRIGFRAME_DPS, "max": 20, }, { "name": "number_interrupt_frames", "dps": INTFRAME_DPS, "max": 20, }, { "name": "number_trigger_points", "dps": TRIGPOINT_DPS, "max": 10, }, { "name": "number_maintain_points", "dps": MAINTPOINT_DPS, "max": 10, }, ], ) self.setUpMultiSelect( [ { "name": "select_safe_range", "dps": SAFERANGE_DPS, "options": { "0_meters": "0m", "1_meters": "1m", "2_meters": "2m", "3_meters": "3m", "4_meters": "4m", "5_meters": "5m", "6_meters": "6m", }, }, { "name": "select_max_range", "dps": MAXRANGE_DPS, "options": { "0_meters": "0m", "1_meters": "1m", "2_meters": "2m", "3_meters": "3m", "4_meters": "4m", "5_meters": "5m", "6_meters": "6m", "7_meters": "7m", }, }, { "name": "select_delay", "dps": DELAY_DPS, "options": { "case_0": "10s", "case_1": "30s", "case_2": "1m", "case_3": "2m", "case_4": "5m", "case_5": "10m", "case_6": "30m", }, }, { "name": "select_configuration", "dps": MODE_DPS, "options": { "case_0": "Sleep/Micro motion", "case_1": "Meeting/Office", "case_2": "Classroom/Corridor", "case_3": "Custom", }, }, ], ) self.mark_secondary( [ "light_led", "number_interrupt_frames", "number_maintain_points", "number_maintain_power", "number_trigger_frames", "number_trigger_points", "number_trigger_power", "select_configuration", "select_delay", "select_max_range", "select_safe_range", "switch", ] )<|fim_middle|>set_up<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> str: """ The type of the resource group. """ return pulumi.get(self, "type")<|fim_middle|>type<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> str: """ (Required only by `compute.RegionDiskIamPolicy`) The policy data generated by a `organizations_get_iam_policy` data source. """ return pulumi.get(self, "policy_data")<|fim_middle|>policy_data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(target: Union[Path, str], only_if_empty: bool = False): target = Path(target).expanduser() assert target.is_dir() for p in sorted(target.glob("**/*"), reverse=True): if not p.exists(): continue p.chmod(0o666) if p.is_dir(): p.rmdir() else: if only_if_empty: raise RuntimeError(f"{p.parent} is not empty!") p.unlink() target.rmdir()<|fim_middle|>del_dir<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> Dict[str, Any]: responses = [message for (method, message) in self.transcript if method == "send_reply"] self.ensure_unique_response(responses) return responses[0]<|fim_middle|>unique_reply<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, number): self.verify_ordinal(number) outwords = self.to_cardinal(number).split(" ") lastword = outwords[-1].lower() try: if len(outwords) > 1: if outwords[-2] in self.ords_feminine: outwords[-2] = self.ords_feminine.get( outwords[-2], outwords[-2]) elif outwords[-2] == 'десять': outwords[-2] = outwords[-2][:-1] + 'и' if len(outwords) == 3: if outwords[-3] in ['один', 'одна']: outwords[-3] = '' lastword = self.ords[lastword] except KeyError: if lastword[:-3] in self.ords_feminine: lastword = self.ords_feminine.get( lastword[:-3], lastword) + "сотый" elif lastword[-1] == "ь" or lastword[-2] == "т": lastword = lastword[:-1] + "ый" elif lastword[-1] == "к": lastword = lastword + "овой" elif lastword[-5:] == "десят": lastword = lastword.replace('ь', 'и') + 'ый' elif lastword[-2] == "ч" or lastword[-1] == "ч": if lastword[-2] == "ч": lastword = lastword[:-1] + "ный" if lastword[-1] == "ч": lastword = lastword + "ный" elif lastword[-1] == "н" or lastword[-2] == "н": lastword = lastword[:lastword.rfind('н') + 1] + "ный" elif lastword[-1] == "д" or lastword[-2] == "д": lastword = lastword[:lastword.rfind('д') + 1] + "ный" outwords[-1] = self.title(lastword) return " ".join(outwords).strip()<|fim_middle|>to_ordinal<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(tested_str: str, str_or_list_to_match_to: Union[List[str], str]) -> bool: """ Return True if tested_str matches at least one element in str_or_list_to_match_to. :param tested_str: One of the supported entity types to be matched - currently possible to pass either NNCFNodeName (to refer to the original model operations) or QuantizerId (to refer to specific quantizers). :param str_or_list_to_match_to: A list of strings specifying for the serializable_id. Entries of the strings may be prefixed with `{re}` to enable regex matching. :return: A boolean value specifying whether a tested_str should matches at least one element in str_or_list_to_match_to. """ if str_or_list_to_match_to is None: return False str_list = [str_or_list_to_match_to] if isinstance(str_or_list_to_match_to, str) else str_or_list_to_match_to for item in str_list: if "{re}" in item: regex = item.replace("{re}", "") if re.search(regex, tested_str): return True else: if tested_str == item: return True return False<|fim_middle|>matches_any<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( phase_factory, maptopic_factory, user, admin, user_factory, group_factory ): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_archived=True, ) anonymous, moderator, initiator = setup_users(project) creator = item.creator ( project, group_member_in_org, group_member_in_pro, group_member_out, ) = setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, group_member_out, item) assert not rules.has_perm(perm_name, group_member_in_org, item) assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item)<|fim_middle|>test_post_phase_project_archived<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): params_to_scope = PARAMS_FOR_PRESENT.copy() params_to_scope['data']['scopeUris'] = ['test'] self.mock_ansible_module.params = params_to_scope resource_data = NETWORK_SET.copy() resource_data['scopeUris'] = ['fake'] resource_data['uri'] = 'rest/network-sets/fake' self.resource.get_by.return_value = [resource_data] patch_return = resource_data.copy() patch_return['scopeUris'] = ['test'] self.resource.patch.return_value = patch_return NetworkSetModule().run() self.resource.patch.assert_called_once_with('rest/network-sets/fake', operation='replace', path='/scopeUris', value=['test']) self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, ansible_facts=dict(network_set=patch_return), msg=NetworkSetModule.MSG_UPDATED )<|fim_middle|>test_update_scopes_when_different<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): _res = {} for ic in ignore_columns: if "." in ic: k, v = ic.split(".") if k not in _res: _res[k] = [v] else: _res[k].append(v) return _res<|fim_middle|>get_nested_columns<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(username='flexget', session=None): user = session.query(User).filter(User.name == username).first() if not user: user = User() user.name = username session.add(user) return user<|fim_middle|>get_user<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): mod = self.make_module(""" #include <Python.h> HPyDef_METH(f, "f", HPyFunc_NOARGS) static HPy f_impl(HPyContext *ctx, HPy self) { return HPyUnicode_FromString(ctx, HPY_ABI); } @EXPORT(f) @INIT """) hpy_abi = mod.f() expected = self.compiler.hpy_abi if expected in ('hybrid+debug', 'hybrid+trace'): expected = 'hybrid' assert hpy_abi == expected<|fim_middle|>test_abi<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(db, client, admin_jwt): invoice = get_invoice(db, UserFactory()) response = client.get( f'/v1/event-invoices/{invoice.identifier}', content_type='application/vnd.api+json', headers=admin_jwt, ) assert response.status_code == 200 response_dict = json.loads(response.data) assert response_dict['data']['attributes']['identifier'] == invoice.identifier<|fim_middle|>test_invoice_identifier_admin<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, provider): """Test passing an entry point string that doesn't exist.""" with pytest.raises(EntryPointError, match=r'got string `.*` but could not load corresponding entry point'): provider.get_version_info('aiida.calculations:core.non_existing')<|fim_middle|>test_entry_point_string_non_existant<|file_separator|>
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_c_ublox_startup<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(tabledef): return type(tabledef.name, (object,), { 'tableid': tabledef.tableId, '_id': tabledef.idFieldName })<|fim_middle|>make_class<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, period_length_prior): return CosineKernel(period_length_prior=period_length_prior)<|fim_middle|>create_kernel_with_prior<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(hotkey: _ParseableHotkey) -> None: ...<|fim_middle|>release<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.logical_id = "MQEvent" self.mq_event_source = MQ(self.logical_id) self.mq_event_source.relative_id = "EventId"<|fim_middle|>set_up<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(request): channel = request.GET['channel'] if channel is None: return _internal_server_error({ "error": "Channel missing" }) try: pubnub.unsubscribe().channels(channel).execute() return _ok({ "subscribed_channels": pubnub.get_subscribed_channels() }) except PubNubException as e: return _internal_server_error({ "message": str(e) })<|fim_middle|>remove_channel_handler<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(lst): for i in range(0, len(lst), 2): yield lst[i:i+2]<|fim_middle|>pairs<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, name_id, entities=None, check_not_on_or_after=True): """Get all the identity information that has been received and are still valid about the subject. :param name_id: The subject identifier, a NameID instance :param entities: The identifiers of the entities whoes assertions are interesting. If the list is empty all entities are interesting. :return: A 2-tuple consisting of the identity information (a dictionary of attributes and values) and the list of entities whoes information has timed out. """ if not entities: try: cni = code(name_id) entities = self._db[cni].keys() except KeyError: return {}, [] res = {} oldees = [] for entity_id in entities: try: info = self.get(name_id, entity_id, check_not_on_or_after) except TooOld: oldees.append(entity_id) continue if not info: oldees.append(entity_id) continue for key, vals in info["ava"].items(): try: tmp = set(res[key]).union(set(vals)) res[key] = list(tmp) except KeyError: res[key] = vals return res, oldees<|fim_middle|>get_identity<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): p = generate_probable_safe_prime(exact_bits=161) self.assertEqual(p.size_in_bits(), 161)<|fim_middle|>test_generate_safe_prime<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(appointer: Participant): appointer = Participant.objects.select_for_update().get(pk=appointer.pk) assert appointer.credit > 0, '信用分不足,本月无法发起预约!'<|fim_middle|>check_credit<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, inputs): self._validate_inputs(inputs) images = inputs.get("images", None) labels = inputs.get("labels", None) bounding_boxes = inputs.get("bounding_boxes", None) segmentation_masks = inputs.get("segmentation_masks", None) images, lambda_sample, permutation_order = self._mixup(images) if labels is not None: labels = self._update_labels( tf.cast(labels, dtype=self.compute_dtype), lambda_sample, permutation_order, ) inputs["labels"] = labels if bounding_boxes is not None: bounding_boxes = self._update_bounding_boxes( bounding_boxes, permutation_order ) inputs["bounding_boxes"] = bounding_boxes inputs["images"] = images if segmentation_masks is not None: segmentation_masks = self._update_segmentation_masks( segmentation_masks, lambda_sample, permutation_order ) inputs["segmentation_masks"] = segmentation_masks return inputs<|fim_middle|>batch_augment<|file_separator|>
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_incompatible_shapes<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, args): options = [] for k in args: options.append("-"+k) return tuple(options)<|fim_middle|>get<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): """ Retrieves the model number (or part number) of the device Returns: string: Model/part number of device """ default = Common.NULL_VAL if self._is_psu_fan: return default return self._api_common.get_output(self._fan_index, self._config['get_model'], default)<|fim_middle|>get_model<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, METHOD_NAME: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult: """Query index for top k most similar nodes. Args: query_embedding (List[float]): query embedding similarity_top_k (int): top k most similar nodes """ if METHOD_NAME.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify metadata filters via both query and kwargs. " "Use kwargs only for chroma specific items that are " "not supported via the generic query interface." ) where = _to_chroma_filter(METHOD_NAME.filters) else: where = kwargs.pop("where", {}) results = self._collection.METHOD_NAME( query_embeddings=METHOD_NAME.query_embedding, n_results=METHOD_NAME.similarity_top_k, where=where, **kwargs, ) logger.debug(f"> Top {len(results['documents'])} nodes:") nodes = [] similarities = [] ids = [] for node_id, text, metadata, distance in zip( results["ids"][0], results["documents"][0], results["metadatas"][0], results["distances"][0], ): try: node = metadata_dict_to_node(metadata) node.set_content(text) except Exception: # NOTE: deprecated legacy logic for backward compatibility metadata, node_info, relationships = legacy_metadata_dict_to_node( metadata ) node = TextNode( text=text, id_=node_id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships=relationships, ) nodes.append(node) similarity_score = 1.0 - math.exp(-distance) similarities.append(similarity_score) logger.debug( f"> [Node {node_id}] [Similarity score: {similarity_score}] " f"{truncate_text(str(text), 100)}" ) ids.append(node_id) return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)<|fim_middle|>query<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, path, media_id): return self._download_json( self._API_BASE + path + media_id, media_id, headers=self._HEADERS)<|fim_middle|>call_api<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(classes, file=sys.stdout) -> None: for classname in string_to_classes(classes): file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))<|fim_middle|>count_logged_instances<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(tree): tree = tree.copy() for identifier in tree.find_all(exp.Identifier): identifier.args["quoted"] = False return tree<|fim_middle|>remove_quotes_from_identifiers<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(key, data, errors, context): value = data.get(key) if isinstance(value, string_types): return list_of_strings_or_lists(key, data, errors, context)<|fim_middle|>list_of_strings_or_string<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(uidb64, token): """ Checks that the user token is correct. :param uidb: :param token: :return: True on success, False in all other situations """ if uidb64 is not None and token is not None: try: uid = int(urlsafe_base64_decode(uidb64)) except ValueError as e: logger.info("Could not decode UID: {0}".format(e)) return False try: user = User.objects.get(pk=uid) if user is not None and default_token_generator.METHOD_NAME(user, token): return True except User.DoesNotExist: return False return False<|fim_middle|>check_token<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(src, flag, compiler): return f"{src}-{compiler}-O{flag}"<|fim_middle|>flag_and_compiler_to_filename<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, key: str, type_: type, typestr: str) -> None: assert key in self._raw if not isinstance(self._raw[key], type_): raise TypeError( f"'{self._name}' member '{key}' must be a {typestr}" )<|fim_middle|>check_value<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(pipeline_response): deserialized = self._deserialize("OperationList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem)<|fim_middle|>extract_data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(status, output): """ ratio <5% is acceptance.""" if status != 0: test.fail("Ping failed, staus:%s, output:%s" % (status, output)) # if status != 0 the ping process seams hit issue. ratio = utils_test.get_loss_ratio(output) if ratio == -1: test.fail("The ratio is %s, and status is %s, " "output is %s" % (ratio, status, output)) elif ratio > int(params["failed_ratio"]): test.fail("The loss raito is %s, test failed" % ratio) test.log.info("ping pass with loss raito:%s, that less than %s", ratio, params["failed_ratio"])<|fim_middle|>check_ping<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(path: str) -> str: """Expand both environment variables and user home in the given path.""" path = os.path.expandvars(path) path = os.path.expanduser(path) return path<|fim_middle|>expand_path<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( lint_module_fixture: Callable[[str], tuple[Path, Path, LintModuleOutputUpdate]] ) -> None: """The file is updated following a successful tests with wrong output.""" filename, expected_output_file, lmou = lint_module_fixture("fine_name") expected_output_file.write_text("", encoding="utf8") filename.write_text("", encoding="utf8") lmou.runTest() assert not expected_output_file.exists()<|fim_middle|>test_lint_module_output_update_remove_useless<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, waveform: str): rawdata, channel, wavename = self.wavemeta[waveform] ntwk = re.sub('([^.]+)(.*)','\\1', wavename) sttn = re.sub('([^.]+\.)([^.]+)(.*)','\\2', wavename) outwave = rawdata.get_waveforms(network=ntwk, station=sttn, location=channel.location_code, \ channel=channel.code, starttime=channel.start_date, endtime=channel.end_date, tag="raw_recording") return [str(w) for w in outwave]<|fim_middle|>getsegments<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(pixel_layer): assert pixel_layer.is_visible()<|fim_middle|>test_layer_is_visible<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): G = nx.barbell_graph(8, 4) nx.add_path(G, [7, 20, 21, 22]) nx.add_cycle(G, [22, 23, 24, 25]) pts = set(nx.articulation_points(G)) assert pts == {7, 8, 9, 10, 11, 12, 20, 21, 22} answer = [ {12, 13, 14, 15, 16, 17, 18, 19}, {0, 1, 2, 3, 4, 5, 6, 7}, {22, 23, 24, 25}, {11, 12}, {10, 11}, {9, 10}, {8, 9}, {7, 8}, {21, 22}, {20, 21}, {7, 20}, ] assert_components_equal(list(nx.biconnected_components(G)), answer) G.add_edge(2, 17) pts = set(nx.articulation_points(G)) assert pts == {7, 20, 21, 22}<|fim_middle|>test_barbell<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(inputs, edge_type=_DataNode): """ Group inputs into three groups: * edges of type ``edge_type`` - those are actual inputs like DataNode, * integer constants, * real constants. Generate `categories_idxs` mapping, that is a list that for an input `i` contains a tuple: (category of ith input, index of ith input in appropriate category) Parameters ---------- inputs : All arguments that were passed to the arithmetic operators edge_type : What should be considered an input, _DataNode or a TensorList (used for debug and eager modes), by default _DataNode Returns ------- (`categories_idxs`, input edge category, integer constants category, real constants category) Mapping of inputs into the categories and the three possible categories. """ categories_idxs = [] edges = [] integers = [] reals = [] for input in inputs: if not isinstance(input, (edge_type, _ScalarConstant, int, float)): input = _Constant(input) if isinstance(input, edge_type): categories_idxs.append(("edge", len(edges))) edges.append(input) elif _is_integer_like(input): categories_idxs.append(("integer", len(integers))) integers.append(input) elif _is_real_like(input): categories_idxs.append(("real", len(reals))) reals.append(input) else: raise TypeError(f"Argument to arithmetic operation not supported." f"Got {str(type(input))}, expected a return value from other" f"DALI Operator or a constant value of type 'bool', 'int', " f"'float' or 'nvidia.dali.types.Constant'.") if len(integers) == 0: integers = None if len(reals) == 0: reals = None return (categories_idxs, edges, integers, reals)<|fim_middle|>group_inputs<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(msg): """ Convert a PointStamped message to a stamped PyKDL Vector. :param msg: The PointStamped message to convert. :type msg: geometry_msgs.msg.PointStamped :return: The timestamped converted PyKDL vector. :rtype: PyKDL.Vector """ vector = PyKDL.Vector(msg.point.x, msg.point.y, msg.point.z) return tf2_ros.Stamped(vector, msg.header.stamp, msg.header.frame_id)<|fim_middle|>from_msg_vector<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): """ Test valid course with and without user specified. Test skipped if git global config override environment variable GIT_CONFIG is set. """ git_export_utils.export_to_git( self.course.id, f'file://{self.bare_repo_dir}', 'enigma' ) expect_string = '{}|{}\n'.format( git_export_utils.GIT_EXPORT_DEFAULT_IDENT['name'], git_export_utils.GIT_EXPORT_DEFAULT_IDENT['email'] ) cwd = os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR / 'test_bare') git_log = subprocess.check_output(['git', 'log', '-1', '--format=%an|%ae'], cwd=cwd).decode('utf-8') self.assertEqual(expect_string, git_log) # Make changes to course so there is something to commit self.populate_course() git_export_utils.export_to_git( self.course.id, f'file://{self.bare_repo_dir}', self.user.username ) expect_string = '{}|{}\n'.format( self.user.username, self.user.email, ) git_log = subprocess.check_output( ['git', 'log', '-1', '--format=%an|%ae'], cwd=cwd).decode('utf-8') self.assertEqual(expect_string, git_log)<|fim_middle|>test_git_ident<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.query_device('fw') self._position += 1<|fim_middle|>move_forward<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, p): return 0.0<|fim_middle|>stress<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): return self.data_file.read()<|fim_middle|>data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(instance): """Last modifiying user """ last_snapshot = get_last_snapshot(instance) return get_fullname(last_snapshot)<|fim_middle|>fullname<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, vlan_oid): """ Remove vlan and its members. Args: vlan_ports: vlan member ports index Returns: dict: vlan_list[vlan_id][vlan_members] """ print("Remove vlan {} and its members ...".format(vlan_oid)) sai_thrift_remove_vlan(self.client, vlan_oid) self.test_obj.assertEqual(self.test_obj.status(), SAI_STATUS_SUCCESS)<|fim_middle|>remove_vlan<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): sys.unraisablehook, prev = sys.__unraisablehook__, sys.unraisablehook try: yield finally: sys.unraisablehook = prev<|fim_middle|>restore_unraisablehook<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(cls): return sa.Column( sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False, index=True, doc="ID of the Annotation author's User instance.", )<|fim_middle|>author_id<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(referenceable, methodName, *args, **kw): """ A utility method that will call a remote method which expects a PageCollector as the first argument. """ d = defer.Deferred() referenceable.callRemote(methodName, CallbackPageCollector(d.callback), *args, **kw) return d<|fim_middle|>get_all_pages<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(cls): cls.repeater.delete() cls.connx.delete() cls.mobile_user.delete(None, None) cls.teardown_subscriptions() cls.domain_obj.delete() clear_plan_version_cache() super().METHOD_NAME()<|fim_middle|>tear_down_class<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, boxlist1, boxlist2, scope=None): """Computes matrix of pairwise similarity between BoxLists. This op (to be overriden) computes a measure of pairwise similarity between the boxes in the given BoxLists. Higher values indicate more similarity. Note that this method simply measures similarity and does not explicitly perform a matching. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. scope: Op scope name. Defaults to 'Compare' if None. Returns: a (float32) tensor of shape [N, M] with pairwise similarity score. """ if not scope: scope = "Compare" with tf.name_scope(scope): return self._compare(boxlist1, boxlist2)<|fim_middle|>compare<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, linesource): for line in linesource: rec = int(line.strip())/1000000000.0 self.data.append(rec)<|fim_middle|>rlines<|file_separator|>