ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
82,113
277,639
441
keras/layers/reshaping/reshape.py
105
18
def _fix_unknown_dimension(self, input_shape, output_shape): output_shape = list(output_shape) msg = ( "total size of new array must be unchanged, " "input_shape = {}, output_shape = {}".format( input_shape, output_shape ) ) known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError( f"There must be at most one unknown dimension in " f"output_shape. Received: output_shape={output_shape}." ) else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_
reduce layers line-too-long
_fix_unknown_dimension
8401e08334d4b1f102a6ee9479738bacfee0600c
keras
reshape.py
17
28
https://github.com/keras-team/keras.git
8
128
0
65
212
Python
{ "docstring": "Find and replace a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n Args:\n input_shape: Shape of array being reshaped\n output_shape: Desired shape of the array with at most a single -1\n which indicates a dimension that should be derived from the input\n shape.\n\n Returns:\n The new output shape with a -1 replaced with its computed value.\n\n Raises:\n ValueError: If the total array size of the output_shape is\n different than the input_shape, or more than one unknown dimension\n is specified.\n ", "language": "en", "n_whitespaces": 209, "n_words": 91, "vocab_size": 65 }
def _fix_unknown_dimension(self, input_shape, output_shape): output_shape = list(output_shape) msg = ( "total size of new array must be unchanged, " "input_shape = {}, output_shape = {}".format( input_shape, output_shape ) ) known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError( f"There must be at most one unknown dimension in " f"output_shape. Received: output_shape={output_shape}." ) else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_shape
38,437
159,898
52
numpy/lib/tests/test_loadtxt.py
23
15
def test_converter_with_unicode_dtype(): txt = StringIO('abc,def\nrst,xyz') conv = bytes.upper res = np.loadtxt( txt, dtype=np.dtype("U3"), converters=conv, delimiter=",") expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) assert_equal(res, expec
TST,STY: Add small additional tests for converters/usecols Also fix style a bit to silence linter (hopefully), removes some black style, but I am not too opinionated about that :)
test_converter_with_unicode_dtype
1e6b72b42292e62c1c86e4f77e30324e43aaa218
numpy
test_loadtxt.py
12
7
https://github.com/numpy/numpy.git
1
67
0
20
118
Python
{ "docstring": "\n With the default 'bytes' encoding, tokens are encoded prior to being\n passed to the converter. This means that the output of the converter may\n be bytes instead of unicode as expected by `read_rows`.\n\n This test checks that outputs from the above scenario are properly decoded\n prior to parsing by `read_rows`.\n ", "language": "en", "n_whitespaces": 69, "n_words": 50, "vocab_size": 37 }
def test_converter_with_unicode_dtype(): txt = StringIO('abc,def\nrst,xyz') conv = bytes.upper res = np.loadtxt( txt, dtype=np.dtype("U3"), converters=conv, delimiter=",") expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) assert_equal(res, expected)
10,767
53,272
23
src/prefect/orion/database/alembic_commands.py
11
6
def alembic_stamp(revision): # lazy import for performance import alembic.command alemb
code review revisions pt3
alembic_stamp
36e7e0838aeaffc9492b330297e4905f3ab4b11f
prefect
alembic_commands.py
9
3
https://github.com/PrefectHQ/prefect.git
1
24
0
10
42
Python
{ "docstring": "\n Stamp the revision table with the given revision; don’t run any migrations\n\n Args:\n revision: The revision passed to `alembic stamp`.\n ", "language": "en", "n_whitespaces": 37, "n_words": 20, "vocab_size": 18 }
def alembic_stamp(revision): # lazy import for performance import alembic.command alembic.command.stamp(alembic_config(), revision=revision)
27,261
122,886
182
jax/_src/pjit.py
98
17
def unflatten_superdims(assignment): def check(cond): if cond: return raise NotImplementedError("Failed to convert OpSharding into a ShardingSpec. " "Please open a bug report!") flat_assignment = np.asarray(assignment, dtype=np.int64) check(flat_assignment[0] == 0) dims = [] while flat_assignment.size > 1: stride = flat_assignment[1] for i in range(len(flat_assignment)): if flat_assignment[i] != i * stride: break else: # After this loop i should point to an "element after the sequence", so # we have t
Move `pjit.py` to `jax/_src` in preparation for merging the `jit` and `pjit` frontend APIs PiperOrigin-RevId: 495944279
unflatten_superdims
4b587fa1f0049db5366fd04812ab940d80a71a22
jax
pjit.py
11
16
https://github.com/google/jax.git
4
101
0
74
192
Python
{ "docstring": "Unflatten a list of dimension sizes and their strides that generates assignment.\n\n If this function succeeds for a given ``assignment``, then the following property\n should be satisfied::\n\n dims_with_strides = unflatten_superdims(assignment)\n base_array = np.arange(map(fst, sorted(dims_with_strides, key=snd, reverse=True)))\n assignment == base_array.transpose(argsort(dims_with_strides, key=snd, reverse=True)).flatten()\n\n That is, the returned dimensions list all sizes of the base array (with strides\n indicating their initial order). The order of dimensions in the list corresponds\n to the permutation that applied to the base array generates the assignment.\n ", "language": "en", "n_whitespaces": 94, "n_words": 79, "vocab_size": 56 }
def unflatten_superdims(assignment): def check(cond): if cond: return raise NotImplementedError("Failed to convert OpSharding into a ShardingSpec. " "Please open a bug report!") flat_assignment = np.asarray(assignment, dtype=np.int64) check(flat_assignment[0] == 0) dims = [] while flat_assignment.size > 1: stride = flat_assignment[1] for i in range(len(flat_assignment)): if flat_assignment[i] != i * stride: break else: # After this loop i should point to an "element after the sequence", so # we have to increment it if the whole array is a strided sequence. i += 1 size = i dims.append((size, stride)) assert size > 1 # Ensure progress flat_assignment = flat_assignment[::size] return dims
18,080
86,128
262
tests/sentry/event_manager/test_event_manager.py
50
21
def test_perf_issue_no_associate_error_event(self): self.project.update_option("sentry:performance_issue_creation_rate", 1.0) with mock.patch("sentry_sdk.tracing.Span.containing_transaction"), self.feature( { "projects:performance-suspect-spans-ingestion": True, "organizations:performance-issues-ingest": True, } ): manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert len(event.groups) == 1 # sneakily make the group type wrong group = event.groups[0] group.type = GroupType.PERFORMANCE_N_PLUS_ONE.value group.save() manager = EventManager(make_event()) manager.normalize() event = manager.
chore(perf issues): Check group type before adding event (#39171) Ensure the group type matches the kind of event before association, e.g. don't put an error event on a performance issue and vice versa.
test_perf_issue_no_associate_error_event
bbd7137b3d379744265f46564d5878490046dd3b
sentry
test_event_manager.py
12
19
https://github.com/getsentry/sentry.git
1
132
0
35
222
Python
{ "docstring": "Test that you can't associate an error event with a performance issue", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_perf_issue_no_associate_error_event(self): self.project.update_option("sentry:performance_issue_creation_rate", 1.0) with mock.patch("sentry_sdk.tracing.Span.containing_transaction"), self.feature( { "projects:performance-suspect-spans-ingestion": True, "organizations:performance-issues-ingest": True, } ): manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert len(event.groups) == 1 # sneakily make the group type wrong group = event.groups[0] group.type = GroupType.PERFORMANCE_N_PLUS_ONE.value group.save() manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert len(event.groups) == 0
29,495
131,286
476
python/ray/tests/test_autoscaler.py
134
45
def testNodeTerminatedDuringUpdate(self): cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG) cluster_config["available_node_types"]["ray.worker.default"]["min_workers"] = 2 cluster_config["worker_start_ray_commands"] = ["ray_start_cmd"] # Don't need the extra node type or a docker config. cluster_config["head_node_type"] = ["ray.worker.default"] del cluster_config["available_node_types"]["ray.head.default"] del cluster_config["docker"] config_path = self.write_config(cluster_config) self.provider = MockProvider() runner = MockProcessRunner() lm = LoadMetrics() mock_metrics = Mock(spec=AutoscalerPrometheusMetrics()) autoscaler = MockAutoscaler( config_path, lm, MockNodeInfoStub(), max_failures=0, process_runner=runner, update_interval_s=0, prom_metrics=mock_metrics, ) # Scale up to two up-to-date workers autoscaler.update() self.waitForNodes(2) self.provider.finish_starting_nodes() autoscaler.update() self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE}) # Mark both nodes as unhealthy for _ in range(5): if autoscaler.updaters: time.sleep(0.05) autoscaler.update() lm.last_heartbeat_time_by_ip["172.0.0.0"] = 0 lm.last_hear
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
testNodeTerminatedDuringUpdate
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
test_autoscaler.py
11
89
https://github.com/ray-project/ray.git
3
545
0
99
392
Python
{ "docstring": "\n Tests autoscaler handling a node getting terminated during an update\n triggered by the node missing a heartbeat.\n\n Extension of testRecoverUnhealthyWorkers.\n\n In this test, two nodes miss a heartbeat.\n One of them (node 0) is terminated during its recovery update.\n The other (node 1) just fails its update.\n\n When processing completed updates, the autoscaler terminates node 1\n but does not try to terminate node 0 again.\n ", "language": "en", "n_whitespaces": 129, "n_words": 65, "vocab_size": 51 }
def testNodeTerminatedDuringUpdate(self): cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG) cluster_config["available_node_types"]["ray.worker.default"]["min_workers"] = 2 cluster_config["worker_start_ray_commands"] = ["ray_start_cmd"] # Don't need the extra node type or a docker config. cluster_config["head_node_type"] = ["ray.worker.default"] del cluster_config["available_node_types"]["ray.head.default"] del cluster_config["docker"] config_path = self.write_config(cluster_config) self.provider = MockProvider() runner = MockProcessRunner() lm = LoadMetrics() mock_metrics = Mock(spec=AutoscalerPrometheusMetrics()) autoscaler = MockAutoscaler( config_path, lm, MockNodeInfoStub(), max_failures=0, process_runner=runner, update_interval_s=0, prom_metrics=mock_metrics, ) # Scale up to two up-to-date workers autoscaler.update() self.waitForNodes(2) self.provider.finish_starting_nodes() autoscaler.update() self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE}) # Mark both nodes as unhealthy for _ in range(5): if autoscaler.updaters: time.sleep(0.05) autoscaler.update() lm.last_heartbeat_time_by_ip["172.0.0.0"] = 0 lm.last_heartbeat_time_by_ip["172.0.0.1"] = 0 # Expect both updates to be successful, no nodes in updating state assert mock_metrics.successful_updates.inc.call_count == 2 assert mock_metrics.worker_update_time.observe.call_count == 2 mock_metrics.updating_nodes.set.assert_called_with(0) assert not autoscaler.updaters # Set up process runner to terminate worker 0 during missed heartbeat # recovery and also cause the updater to fail.
84,626
284,059
529
openbb_terminal/cryptocurrency/overview/overview_controller.py
66
35
def call_cr(self, other_args): parser = argparse.ArgumentParser( prog="cr", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-t", "--type", dest="type", type=str, help="Select interest rate type", default="supply", choices=["borrow", "supply"], ) parser.add_argument( "-c", "--cryptocurrrencies", dest="cryptos", type=loanscan_model.check_valid_coin, help=f, default="BTC,ETH,USDT,USDC", ) parser.add_argument( "-p", "--platforms", dest="platforms", type=loanscan_model.check_valid_platform, help=f, default="BlockFi,Ledn,SwissBorg,Youhodler", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-t") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ON
Replaces coingecko deprecated commands (#1650) * removes cgproducts and cgplatforms and replaces with cr * add ignore word * added .openbb script * reverted crypto change * doc * failing tests * trying chart and fixed minh issues * Create barh * Fix ticker labels * fix test * loanscan mock * defi test * defi test * Fix defi test Co-authored-by: Minh Hoang <nminh.hoang1023@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>
call_cr
670402396e7e25e95bd6497affb143565d9bd4ea
OpenBBTerminal
overview_controller.py
13
49
https://github.com/OpenBB-finance/OpenBBTerminal.git
4
196
0
56
346
Python
{ "docstring": "Process cr commandDisplays crypto {borrow,supply} interest rates for cryptocurrencies across several platforms.\n You can select rate type with --type {borrow,supply}\n You can display only N number of platforms with --limit parameter.Cryptocurrencies to search interest rates for separated by comma.\n Default: BTC,ETH,USDT,USDC. Options: {\",\".join(loanscan_model.CRYPTOS)}Platforms to search interest rates in separated by comma.\n Default: BlockFi,Ledn,SwissBorg,Youhodler. Options: {\",\".join(loanscan_model.PLATFORMS)}", "language": "en", "n_whitespaces": 106, "n_words": 55, "vocab_size": 39 }
def call_cr(self, other_args): parser = argparse.ArgumentParser( prog="cr", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-t", "--type", dest="type", type=str, help="Select interest rate type", default="supply", choices=["borrow", "supply"], ) parser.add_argument( "-c", "--cryptocurrrencies", dest="cryptos", type=loanscan_model.check_valid_coin, help=f, default="BTC,ETH,USDT,USDC", ) parser.add_argument( "-p", "--platforms", dest="platforms", type=loanscan_model.check_valid_platform, help=f, default="BlockFi,Ledn,SwissBorg,Youhodler", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-t") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, limit=10 ) if ns_parser: loanscan_view.display_crypto_rates( rate_type=ns_parser.type, cryptos=ns_parser.cryptos, platforms=ns_parser.platforms, limit=ns_parser.limit, export=ns_parser.export, )
24,479
111,750
74
nni/retiarii/oneshot/pytorch/base_lightning.py
20
6
def export(self): result = {} for na
Lightning implementation for retiarii oneshot nas (#4479)
export
8b2eb425274cdb4537fbce4a315aec12a378d6db
nni
base_lightning.py
12
6
https://github.com/microsoft/nni.git
3
37
0
17
61
Python
{ "docstring": "\n Export the NAS result, ideally the best choice of each nas_modules.\n You may implement an ``export`` method for your customized nas_module.\n\n Returns\n --------\n result : Dict[str, int]\n Keys are names of nas_modules, and values are the choice indices of them.\n ", "language": "en", "n_whitespaces": 94, "n_words": 40, "vocab_size": 34 }
def export(self): result = {} for name, module in self.nas_modules: if name not in result: result[name] = module.export() return result
43,614
181,840
162
tpot/base.py
70
13
def clean_pipeline_string(self, individual): dirty_string = str(individual) # There are many parameter prefixes in the pipeline strings, used solely for # making the terminal name unique, eg. LinearSVC__. parameter_prefixes = [ (m.start(), m.end()) for m in re.finditer(", [\w]+__", dirty_string) ] # We handle them in reverse so we do not mess up indices pretty = dirty_string for (start, end) in reversed(parameter_prefixes
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
clean_pipeline_string
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
base.py
12
9
https://github.com/EpistasisLab/tpot.git
3
74
0
55
120
Python
{ "docstring": "Provide a string of the individual without the parameter prefixes.\n\n Parameters\n ----------\n individual: individual\n Individual which should be represented by a pretty string\n\n Returns\n -------\n A string like str(individual), but with parameter prefixes removed.\n\n ", "language": "en", "n_whitespaces": 94, "n_words": 34, "vocab_size": 28 }
def clean_pipeline_string(self, individual): dirty_string = str(individual) # There are many parameter prefixes in the pipeline strings, used solely for # making the terminal name unique, eg. LinearSVC__. parameter_prefixes = [ (m.start(), m.end()) for m in re.finditer(", [\w]+__", dirty_string) ] # We handle them in reverse so we do not mess up indices pretty = dirty_string for (start, end) in reversed(parameter_prefixes): pretty = pretty[: start + 2] + pretty[end:] return pretty
49,151
199,118
448
sympy/polys/matrices/linsolve.py
129
33
def _lin_eq2dict(a, symset): if a in symset: return S.Zero, {a: S.One} elif a.is_Add: terms_list = defaultdict(list) coeff_list = [] for ai in a.args: ci, ti = _lin_eq2dict(ai, symset) coeff_list.append(ci) for mij, cij in ti.items(): terms_list[mij].append(cij) coeff = Add(*coeff_list) terms = {sym: Add(*coeffs) for sym, coeffs in t
Revert "solve changes"
_lin_eq2dict
5534ff6796b8d515192576f771af8488a838775c
sympy
linsolve.py
16
38
https://github.com/sympy/sympy.git
14
252
0
62
402
Python
{ "docstring": "Efficiently convert a linear equation to a dict of coefficients", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def _lin_eq2dict(a, symset): if a in symset: return S.Zero, {a: S.One} elif a.is_Add: terms_list = defaultdict(list) coeff_list = [] for ai in a.args: ci, ti = _lin_eq2dict(ai, symset) coeff_list.append(ci) for mij, cij in ti.items(): terms_list[mij].append(cij) coeff = Add(*coeff_list) terms = {sym: Add(*coeffs) for sym, coeffs in terms_list.items()} return coeff, terms elif a.is_Mul: terms = terms_coeff = None coeff_list = [] for ai in a.args: ci, ti = _lin_eq2dict(ai, symset) if not ti: coeff_list.append(ci) elif terms is None: terms = ti terms_coeff = ci else: raise PolyNonlinearError coeff = Mul(*coeff_list) if terms is None: return coeff, {} else: terms = {sym: coeff * c for sym, c in terms.items()} return coeff * terms_coeff, terms elif a.is_Equality: return _lin_eq2dict(a.lhs - a.rhs, symset) elif not a.has_free(*symset): return a, {} else: raise PolyNonlinearError
29,231
130,325
62
python/ray/autoscaler/_private/_azure/node_provider.py
12
6
def internal_ip(self, node_id): ip = ( self._get_cached_node(node_id=node_id)["internal_ip"] or self._get_node(node_id=node_id)["internal_ip"] ) return ip
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
internal_ip
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
node_provider.py
12
6
https://github.com/ray-project/ray.git
2
37
0
11
63
Python
{ "docstring": "Returns the internal ip (Ray ip) of the given node.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def internal_ip(self, node_id): ip = ( self._get_cached_node(node_id=node_id)["internal_ip"] or self._get_node(node_id=node_id)["internal_ip"] ) return ip
77,374
262,793
261
PyInstaller/archive/writers.py
49
30
def _write_file(self, source, dest, type, compress=False): start = self.lib.tell() length = os.stat(source).st_size with open(source, 'rb') as f:
Fix file handle leaks. This is mostly a refactoring of CArchiveWriter().add() which has gotten somewhat tangled trying to apply various file modifications whilst simultaneously juggling file streaming and optional zip compression. Since the modifications are all done on Python source/byte code files which are small, split the archive packing into two helpers, one which streams big files and a more malleable one which writes small files from memory without streaming, both of which handle the bookkeeping job of updating the TOC. This fixes a file handle leak. Additionally take advantage of Python's builtin, but suppressed by default, file handle leakage detection; any such leakages under a pytest run will now fail the test. This requires a few other leakage fixes throughout the test suite to make it pass.
_write_file
9541ad638f73c1442c35ea870ad9c6e4f8cd9b62
pyinstaller
writers.py
17
16
https://github.com/pyinstaller/pyinstaller.git
4
152
0
42
243
Python
{ "docstring": "\n Stream copy a large file into the archive and update the table of contents.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
def _write_file(self, source, dest, type, compress=False): start = self.lib.tell() length = os.stat(source).st_size with open(source, 'rb') as f: if compress: buffer = bytearray(16 * 1024) compressor = zlib.compressobj(self.LEVEL) while 1: read = f.readinto(buffer) if not read: break self.lib.write(compressor.compress(buffer[:read])) self.lib.write(compressor.flush()) else: shutil.copyfileobj(f, self.lib) self.toc.add(start, self.lib.tell() - start, length, int(compress), type, dest)
41,755
176,189
517
networkx/linalg/graphmatrix.py
164
38
def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None): import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) if edgelist is None: if G.is_multigraph(): edgelist = list(G.edges(keys=True)) else: edgelist = list(G.edges()) A = sp.sparse.lil_array((len(nodelist), len(edgelist))) node_index = {node: i for i, node in enumerate(nodelist)} for ei, e in enumerate(edgelist): (u, v) = e[:2] if u == v: continue # self loops give zero column try: ui = node_index[u] vi = node_index[v] except KeyError as err: raise nx.NetworkXError( f"node {u} or {v} in edgelist but not in nodelist" ) from err if weight is None: wt = 1 else: if G.is_multigraph(): ekey = e[2] wt = G[u][v][ekey].get(weight, 1) else: wt = G[u][v].get(weight, 1) if oriented: A[ui, ei] = -wt A[vi, ei] = wt else: A[ui, ei] = wt A[vi, ei] = wt import warnings warnings.warn( "incidence
Use scipy.sparse array datastructure (#5139) * Step 1: use sparse arrays in nx.to_scipy_sparse_matrix. Seems like a reasonable place to start. nx.to_scipy_sparse_matrix is one of the primary interfaces to scipy.sparse from within NetworkX. * 1: Use np.outer instead of mult col/row vectors Fix two instances in modularitymatrix where a new 2D array was being created via an outer product of two \"vectors\". In the matrix case, this was a row vector \* a column vector. In the array case this can be disambiguated by being explicit with np.outer. * Update _transition_matrix in laplacianmatrix module - A few instances of matrix multiplication operator - Add np.newaxis + transpose to get shape right for broadcasting - Explicitly convert e.g. sp.sparse.spdiags to a csr_array. * Update directed_combinitorial_laplacian w/ sparse array. - Wrap spdiags in csr_array and update matmul operators. * Rm matrix-specific code from lgc and hmn modules - Replace .A call with appropriate array semantics - wrap sparse.diags in csr_array. * Change hits to use sparse array semantics. - Replace * with @ - Remove superfluous calls to flatten. * Update sparse matrix usage in layout module. - Simplify lil.getrowview call - Wrap spdiags in csr_array. * lil_matrix -> lil_array in graphmatrix.py. * WIP: Start working on algebraic connectivity module. * Incorporate auth mat varname feedback. * Revert 1D slice and comment for 1D sparse future. * Add TODOs: rm csr_array wrapper around spdiags etc. * WIP: cleanup algebraicconn: tracemin_fiedler. * Typo. * Finish reviewing algebraicconnectivity. * Convert bethe_hessian matrix to use sparse arrays. * WIP: update laplacian. Update undirected laplacian functions. * WIP: laplacian - add comment about _transition_matrix return types. * Finish laplacianmatrix review. * Update attrmatrix. * Switch to official laplacian function. * Update pagerank to use sparse array. * Switch bipartite matrix to sparse arrays. * Check from_scipy_sparse_matrix works with arrays. Modifies test suite. * Apply changes from review. * Fix failing docstring tests. * Fix missing axis for in-place multiplication. * Use scipy==1.8rc2 * Use matrix multiplication * Fix PyPy CI * [MRG] Create plot_subgraphs.py example (#5165) * Create plot_subgraphs.py https://github.com/networkx/networkx/issues/4220 * Update plot_subgraphs.py black * Update plot_subgraphs.py lint plus font_size * Update plot_subgraphs.py added more plots * Update plot_subgraphs.py removed plots from the unit test and added comments * Update plot_subgraphs.py lint * Update plot_subgraphs.py typos fixed * Update plot_subgraphs.py added nodes to the plot of the edges removed that was commented out for whatever reason * Update plot_subgraphs.py revert the latest commit - the line was commented out for a reason - it's broken * Update plot_subgraphs.py fixed node color issue * Update plot_subgraphs.py format fix * Update plot_subgraphs.py forgot to draw the nodes... now fixed * Fix sphinx warnings about heading length. * Update examples/algorithms/plot_subgraphs.py * Update examples/algorithms/plot_subgraphs.py Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> Co-authored-by: Dan Schult <dschult@colgate.edu> * Add traveling salesman problem to example gallery (#4874) Adds an example of the using Christofides to solve the TSP problem to the example galery. Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037) * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() * Resolved Requested Changes * Revert changes to degree docstrings. * Update comments in example. * Apply wording to edges method in all graph classes. Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Compatibility updates from testing with numpy/scipy/pytest rc's (#5226) * Rm deprecated scipy subpkg access. * Use recwarn fixture in place of deprecated pytest pattern. * Rm unnecessary try/except from tests. * Replace internal `close` fn with `math.isclose`. (#5224) * Replace internal close fn with math.isclose. * Fix lines in docstring examples. * Fix Python 3.10 deprecation warning w/ int div. (#5231) * Touchups and suggestions for subgraph gallery example (#5225) * Simplify construction of G with edges rm'd * Rm unused graph attribute. * Shorten categorization by node type. * Simplify node coloring. * Simplify isomorphism check. * Rm unit test. * Rm redundant plotting of each subgraph. * Use new package name (#5234) * Allowing None edges in weight function of bidirectional Dijkstra (#5232) * added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None. * changed syntax for better readability and code duplicate avoidance Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de> * Add an FAQ about assigning issues. (#5182) * Add FAQ about assigning issues. * Add note about linking issues from new PRs. * Update dev deps (#5243) * Update minor doc issues with tex notation (#5244) * Add FutureWarnings to fns that return sparse matrices - biadjacency_matrix. - bethe_hessian_matrix. - incidence_matrix. - laplacian functions. - modularity_matrix functions. - adjacency_matrix. * Add to_scipy_sparse_array and use it everywhere. Add a new conversion function to preserve array semantics internally while not altering behavior for users. Also adds FutureWarning to to_scipy_sparse_matrix. * Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix. * Handle deprecations in separate PR. * Fix docstring examples. Co-authored-by: Mridul Seth <mail@mriduls.com> Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com> Co-authored-by: Andrew Knyazev <andrew.knyazev@ucdenver.edu> Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: eskountis <56514439+eskountis@users.noreply.github.com> Co-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com> Co-authored-by: NikHoh <nikhoh@web.de> Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de> Co-authored-by: Sultan Orazbayev <contact@econpoint.com> Co-authored-by: Mridul Seth <mail@mriduls.com>
incidence_matrix
5dfd57af2a141a013ae3753e160180b82bec9469
networkx
graphmatrix.py
18
44
https://github.com/networkx/networkx.git
11
290
0
103
463
Python
{ "docstring": "Returns incidence matrix of G.\n\n The incidence matrix assigns each row to a node and each column to an edge.\n For a standard incidence matrix a 1 appears wherever a row's node is\n incident on the column's edge. For an oriented incidence matrix each\n edge is assigned an orientation (arbitrarily for undirected and aligning to\n direction for directed). A -1 appears for the source (tail) of an edge and\n 1 for the destination (head) of the edge. The elements are zero otherwise.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional (default= all nodes in G)\n The rows are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n edgelist : list, optional (default= all edges in G)\n The columns are ordered according to the edges in edgelist.\n If edgelist is None, then the ordering is produced by G.edges().\n\n oriented: bool, optional (default=False)\n If True, matrix elements are +1 or -1 for the head or tail node\n respectively of each edge. If False, +1 occurs at both nodes.\n\n weight : string or None, optional (default=None)\n The edge data key used to provide each value in the matrix.\n If None, then each edge has weight 1. Edge weights, if used,\n should be positive so that the orientation can provide the sign.\n\n Returns\n -------\n A : SciPy sparse matrix\n The incidence matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges in edgelist should be\n (u,v,key) 3-tuples.\n\n \"Networks are the best discrete model for so many problems in\n applied mathematics\" [1]_.\n\n References\n ----------\n .. [1] Gil Strang, Network applications: A = incidence matrix,\n http://videolectures.net/mit18085f07_strang_lec03/\n ", "language": "en", "n_whitespaces": 428, "n_words": 272, "vocab_size": 140 }
def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None): import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) if edgelist is None: if G.is_multigraph(): edgelist = list(G.edges(keys=True)) else: edgelist = list(G.edges()) A = sp.sparse.lil_array((len(nodelist), len(edgelist))) node_index = {node: i for i, node in enumerate(nodelist)} for ei, e in enumerate(edgelist): (u, v) = e[:2] if u == v: continue # self loops give zero column try: ui = node_index[u] vi = node_index[v] except KeyError as err: raise nx.NetworkXError( f"node {u} or {v} in edgelist but not in nodelist" ) from err if weight is None: wt = 1 else: if G.is_multigraph(): ekey = e[2] wt = G[u][v][ekey].get(weight, 1) else: wt = G[u][v].get(weight, 1) if oriented: A[ui, ei] = -wt A[vi, ei] = wt else: A[ui, ei] = wt A[vi, ei] = wt import warnings warnings.warn( "incidence_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0.", FutureWarning, stacklevel=2, ) # TODO: Rm sp.sparse.csc_matrix in Networkx 3.0 return A.asformat("csc")
82,375
278,117
677
keras/feature_column/sequence_feature_column.py
98
31
def call(self, features, training=None): if not isinstance(features, dict): raise ValueError( "We expected a dictionary here. Instead we got: ", features ) if training is None: training = backend.learning_phase() transformation_cache = ( tf.__internal__.feature_column.FeatureTransformationCache(features) ) output_tensors = [] sequence_lengths = [] for column in self._feature_columns: with backend.name_scope(column.name): try: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager, training=training, ) except TypeError: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager ) # Flattens the final dimension to produce a 3D Tensor. output_tensors.append( self._process_dense_tensor(column, dense_tensor) ) sequence_lengths.append(sequence_length) # Check and process sequence lengths. kfc._verify
resolve line-too-long in feature_column
call
6fafb567af4e4d9f42974d0b6c55b18bc03e17eb
keras
sequence_feature_column.py
16
39
https://github.com/keras-team/keras.git
5
167
0
73
264
Python
{ "docstring": "Returns sequence input corresponding to the `feature_columns`.\n\n Args:\n features: A dict mapping keys to tensors.\n training: Python boolean or None, indicating whether to the layer is\n being run in training mode. This argument is passed to the call\n method of any `FeatureColumn` that takes a `training` argument. For\n example, if a `FeatureColumn` performed dropout, the column could\n expose a `training` argument to control whether the dropout should\n be applied. If `None`, defaults to\n `tf.keras.backend.learning_phase()`.\n\n\n Returns:\n An `(input_layer, sequence_length)` tuple where:\n - input_layer: A float `Tensor` of shape `[batch_size, T, D]`.\n `T` is the maximum sequence length for this batch, which could\n differ from batch to batch. `D` is the sum of `num_elements` for\n all `feature_columns`.\n - sequence_length: An int `Tensor` of shape `[batch_size]`. The\n sequence length for each example.\n\n Raises:\n ValueError: If features are not a dictionary.\n ", "language": "en", "n_whitespaces": 335, "n_words": 137, "vocab_size": 99 }
def call(self, features, training=None): if not isinstance(features, dict): raise ValueError( "We expected a dictionary here. Instead we got: ", features ) if training is None: training = backend.learning_phase() transformation_cache = ( tf.__internal__.feature_column.FeatureTransformationCache(features) ) output_tensors = [] sequence_lengths = [] for column in self._feature_columns: with backend.name_scope(column.name): try: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager, training=training, ) except TypeError: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager ) # Flattens the final dimension to produce a 3D Tensor. output_tensors.append( self._process_dense_tensor(column, dense_tensor) ) sequence_lengths.append(sequence_length) # Check and process sequence lengths. kfc._verify_static_batch_size_equality( sequence_lengths, self._feature_columns ) sequence_length = _assert_all_equal_and_return(sequence_lengths) return self._verify_and_concat_tensors(output_tensors), sequence_length
56,094
220,706
43
python3.10.4/Lib/asyncio/sslproto.py
15
7
def feed_eof(self): self._incoming.write
add python 3.10.4 for windows
feed_eof
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
sslproto.py
9
4
https://github.com/XX-net/XX-Net.git
2
36
0
12
62
Python
{ "docstring": "Send a potentially \"ragged\" EOF.\n\n This method will raise an SSL_ERROR_EOF exception if the EOF is\n unexpected.\n ", "language": "en", "n_whitespaces": 38, "n_words": 17, "vocab_size": 17 }
def feed_eof(self): self._incoming.write_eof() ssldata, appdata = self.feed_ssldata(b'') assert appdata == [] or appdata == [b'']
44,714
184,614
59
src/textual/app.py
16
6
def screen(self) -> Screen: try: return
lots of docstrings
screen
b22436933acc0d7440ec300f971a249bd6105a5b
textual
app.py
11
13
https://github.com/Textualize/textual.git
2
28
0
16
49
Python
{ "docstring": "Get the current screen.\n\n Raises:\n ScreenStackError: If there are no screens on the stack.\n\n Returns:\n Screen: The currently active screen.\n ", "language": "en", "n_whitespaces": 63, "n_words": 20, "vocab_size": 18 }
def screen(self) -> Screen: try: return self._screen_stack[-1] except IndexError: raise ScreenStackError("No screens on stack") from None
49,872
201,102
31
tests/apps/tests.py
6
7
def test_empty_dunder_path_no_dunder_file(self): with self.assertRaises(ImproperlyConfigured): AppConfig("label", Stub(__path__=[]
Refs #33476 -- Reformatted code with Black.
test_empty_dunder_path_no_dunder_file
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
13
3
https://github.com/django/django.git
1
26
0
6
49
Python
{ "docstring": "If the __path__ attr is empty and there is no __file__, raise.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
def test_empty_dunder_path_no_dunder_file(self): with self.assertRaises(ImproperlyConfigured): AppConfig("label", Stub(__path__=[]))
37,440
158,287
352
d2l/tensorflow.py
134
33
def train_epoch_ch3(net, train_iter, loss, updater): # Sum of training loss, sum of training accuracy, no. of examples metric = Accumulator(3) for X, y
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> * 重复语句 (#1188) Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> Co-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com> Co-authored-by: Xinwei Liu <xinzone@outlook.com> Co-authored-by: Anirudh Dagar <anirudhdagar6@gmail.com> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com> Co-authored-by: gyro永不抽风 <1247006353@qq.com> Co-authored-by: CanChengZheng <zcc550169544@163.com> Co-authored-by: linlin <jajupmochi@gmail.com> Co-authored-by: iuk <liukun0104@gmail.com> Co-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com> Co-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com> Co-authored-by: Chiyuan Fu <fuchiyuan2019@outlook.com> Co-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com> Co-authored-by: Haiker Sun <haizhou.uestc2011@gmail.com> Co-authored-by: Ming Liu <akira.liu@njnu.edu.cn> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: silenceZheng66 <13754430639@163.com> Co-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com> Co-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com> Co-authored-by: Krahets <krahets@163.com> Co-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com> Co-authored-by: Jameson <miraclecome@gmail.com> Co-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com> Co-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com> Co-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com> Co-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com> Co-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com> Co-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com> Co-authored-by: VigourJiang <jiangfuqiang154@163.com> Co-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com> Co-authored-by: LYF <27893441+liyufan@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> Co-authored-by: xiaotinghe <xiaotih@amazon.com> Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com> Co-authored-by: HinGwenWoong <peterhuang0323@qq.com> Co-authored-by: Shuai Zhang <cheungdaven@gmail.com>
train_epoch_ch3
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
tensorflow.py
15
19
https://github.com/d2l-ai/d2l-zh.git
5
207
0
98
324
Python
{ "docstring": "The training loop defined in Chapter 3.\n\n Defined in :numref:`sec_softmax_scratch`", "language": "en", "n_whitespaces": 12, "n_words": 10, "vocab_size": 9 }
def train_epoch_ch3(net, train_iter, loss, updater): # Sum of training loss, sum of training accuracy, no. of examples metric = Accumulator(3) for X, y in train_iter: # Compute gradients and update parameters with tf.GradientTape() as tape: y_hat = net(X) # Keras implementations for loss takes (labels, predictions) # instead of (predictions, labels) that users might implement # in this book, e.g. `cross_entropy` that we implemented above if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) # Keras loss by default returns the average loss in a batch l_sum = l * float(tf.size(y)) if isinstance( loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) # Return training loss and training accuracy return metric[0] / metric[2], metric[1] / metric[2]
76,674
261,169
82
sklearn/utils/discovery.py
29
11
def all_estimators(type_filter=None): # lazy import to avoid circular imports from sklearn.base from . import IS_PYPY from ._testing import ignore_warnings
MNT numpydoc validation for Displays (#21469) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
all_estimators
b22f7fa552c03aa7f6b9b4d661470d0173f8db5d
scikit-learn
discovery.py
7
67
https://github.com/scikit-learn/scikit-learn.git
23
361
0
23
62
Python
{ "docstring": "Get a list of all estimators from `sklearn`.\n\n This function crawls the module and gets all classes that inherit\n from BaseEstimator. Classes that are defined in test-modules are not\n included.\n\n Parameters\n ----------\n type_filter : {\"classifier\", \"regressor\", \"cluster\", \"transformer\"} \\\n or list of such str, default=None\n Which kind of estimators should be returned. If None, no filter is\n applied and all estimators are returned. Possible values are\n 'classifier', 'regressor', 'cluster' and 'transformer' to get\n estimators only of these specific types, or a list of these to\n get the estimators that fit at least one of the types.\n\n Returns\n -------\n estimators : list of tuples\n List of (name, class), where ``name`` is the class name as string\n and ``class`` is the actual type of the class.\n ", "language": "en", "n_whitespaces": 215, "n_words": 124, "vocab_size": 83 }
def all_estimators(type_filter=None): # lazy import to avoid circular imports from sklearn.base from . import IS_PYPY from ._testing import ignore_warnings from ..base import ( BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin, )
@image_comparison(['legend_various_labels'], remove_text=True)
24,225
110,587
319
lib/matplotlib/tests/test_legend.py
109
39
def test_legend_auto5(): fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8)) leg_bboxes = [] for ax, loc in zip(axs.flat, ("center", "best")): # An Ellipse patch at the top, a U-shaped Polygon patch at the # bottom and a ring-like Wedge patch: the correct placement of # the legend should be in the center. for _patch in [ mpatches.Ellipse( xy=(0.5, 0.9), width=0.8, height=0.2, fc="C1"), mpatches.Polygon(np.array([ [0, 1], [0, 0], [1, 0], [1, 1], [0.9, 1.0], [0.9, 0.1], [0.1, 0.1], [0.1, 1.0], [0.1, 1.0]]), fc="C1"), mpatches.Wedge((0.5, 0.5), 0.5, 0, 360, width=0.05, fc="C0") ]: ax.add_patch(_patch) ax.plot([0.1, 0.9], [0.9, 0.9], label="A segment") # sthg to label leg = ax.legend(loc=loc) fig.canvas.draw() leg_bboxes.append( leg.get_window_extent().transformed(ax.transAxes.inverted())) assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds) @image_comparison(['legend_various_labels'], remove_t
ENH: rely on non-rectangular patch paths rather than bboxes for legend auto-placing (fix #9580) (#9598) * use path rather than bbox for non rectangular patches * Add tests * Add a short breadcrumb note in api_changes
test_legend_auto5
d8bb1a52316c38434e526412c27d9c4b01960084
matplotlib
test_legend.py
14
19
https://github.com/matplotlib/matplotlib.git
3
300
1
82
390
Python
{ "docstring": "\n Check that the automatic placement handle a rather complex\n case with non rectangular patch. Related to issue #9580.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 18 }
def test_legend_auto5(): fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8)) leg_bboxes = [] for ax, loc in zip(axs.flat, ("center", "best")): # An Ellipse patch at the top, a U-shaped Polygon patch at the # bottom and a ring-like Wedge patch: the correct placement of # the legend should be in the center. for _patch in [ mpatches.Ellipse( xy=(0.5, 0.9), width=0.8, height=0.2, fc="C1"), mpatches.Polygon(np.array([ [0, 1], [0, 0], [1, 0], [1, 1], [0.9, 1.0], [0.9, 0.1], [0.1, 0.1], [0.1, 1.0], [0.1, 1.0]]), fc="C1"), mpatches.Wedge((0.5, 0.5), 0.5, 0, 360, width=0.05, fc="C0") ]: ax.add_patch(_patch) ax.plot([0.1, 0.9], [0.9, 0.9], label="A segment") # sthg to label leg = ax.legend(loc=loc) fig.canvas.draw() leg_bboxes.append( leg.get_window_extent().transformed(ax.transAxes.inverted())) assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds) @image_comparison(['legend_various_labels'], remove_text=True)
44,195
183,454
49
src/textual/widgets/text_input.py
13
8
def _toggle_cursor_visible(self): if time.monotonic() - self._last_keypress_time > self.cursor
Conditional blinking
_toggle_cursor_visible
d8179c70dc06e06b2f445fdfb47fb7012d4cb2ed
textual
text_input.py
10
4
https://github.com/Textualize/textual.git
2
34
0
12
59
Python
{ "docstring": "Manages the blinking of the cursor - ensuring blinking only starts when the\n user hasn't pressed a key in some time", "language": "en", "n_whitespaces": 27, "n_words": 21, "vocab_size": 18 }
def _toggle_cursor_visible(self): if time.monotonic() - self._last_keypress_time > self.cursor_blink_period: self._cursor_blink_visible = not self._cursor_blink_visible self.refresh()
19,571
98,450
651
src/sentry/search/events/filter.py
191
55
def parse_semver(version, operator) -> Optional[SemverFilter]: (operator, negated) = handle_operator_negation(operator) try: operator = OPERATOR_TO_DJANGO[operator] except KeyError: raise InvalidSearchQuery("Invalid operation 'IN' for semantic version filter.") version = version if "@" in version else f"{SEMVER_FAKE_PACKAGE}@{version}" parsed = parse_release_relay(version) parsed_version = parsed.get("version_parsed") if parsed_version: # Convert `pre` to always be a string prerelease = parsed_version["pre"] if parsed_version["pre"] else "" semver_filter = SemverFilter( operator, [ parsed_version["major"], parsed_version["minor"], parsed_version["patch"], parsed_version["revision"], 0 if prerelease else 1, prerelease, ], negated=negated, ) if parsed["package"] and parsed["package"] != SEMVER_FAKE_PACKAGE: semver_filter.package = parsed["package"] return semver_filter else: # Try to parse as a wildcard match package, version = version.split("@", 1) version_parts = [] if version: for part in version.split(".", 3): if part in SEMVER_WILDCARDS: break try: # We assume all ints for a wildcard match - not handling prerelease as # part of these version_parts.append(int(part)) except ValueError: raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE) package = package if package and package != SEMVER_FAKE_PACKAGE else None return SemverFilter("exact", version_parts, package, negated) key_convers
fix(events-search): Return helpful error message on semver filter (#33785) 'IN' type queries currently raise an unhandled KeyError, raising an InvalidSearchQuery instead.
parse_semver
4ffb52489e662029a08169351cd997d525977e88
sentry
filter.py
18
50
https://github.com/getsentry/sentry.git
14
224
0
132
498
Python
{ "docstring": "\n Attempts to parse a release version using our semver syntax. version should be in\n format `<package_name>@<version>` or `<version>`, where package_name is a string and\n version is a version string matching semver format (https://semver.org/). We've\n slightly extended this format to allow up to 4 integers. EG\n - sentry@1.2.3.4\n - sentry@1.2.3.4-alpha\n - 1.2.3.4\n - 1.2.3.4-alpha\n - 1.*\n ", "language": "en", "n_whitespaces": 91, "n_words": 55, "vocab_size": 39 }
def parse_semver(version, operator) -> Optional[SemverFilter]: (operator, negated) = handle_operator_negation(operator) try: operator = OPERATOR_TO_DJANGO[operator] except KeyError: raise InvalidSearchQuery("Invalid operation 'IN' for semantic version filter.") version = version if "@" in version else f"{SEMVER_FAKE_PACKAGE}@{version}" parsed = parse_release_relay(version) parsed_version = parsed.get("version_parsed") if parsed_version: # Convert `pre` to always be a string prerelease = parsed_version["pre"] if parsed_version["pre"] else "" semver_filter = SemverFilter( operator, [ parsed_version["major"], parsed_version["minor"], parsed_version["patch"], parsed_version["revision"], 0 if prerelease else 1, prerelease, ], negated=negated, ) if parsed["package"] and parsed["package"] != SEMVER_FAKE_PACKAGE: semver_filter.package = parsed["package"] return semver_filter else: # Try to parse as a wildcard match package, version = version.split("@", 1) version_parts = [] if version: for part in version.split(".", 3): if part in SEMVER_WILDCARDS: break try: # We assume all ints for a wildcard match - not handling prerelease as # part of these version_parts.append(int(part)) except ValueError: raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE) package = package if package and package != SEMVER_FAKE_PACKAGE else None return SemverFilter("exact", version_parts, package, negated) key_conversion_map: Mapping[ str, Callable[[SearchFilter, str, Mapping[str, Union[int, str, datetime]]], Optional[Sequence[any]]], ] = { "environment": _environment_filter_converter, "message": _message_filter_converter, TRANSACTION_STATUS_ALIAS: _transaction_status_filter_converter, "issue.id": _issue_id_filter_converter, USER_DISPLAY_ALIAS: _user_display_filter_converter, ERROR_UNHANDLED_ALIAS: _error_unhandled_filter_converter, "error.handled": _error_handled_filter_converter, TEAM_KEY_TRANSACTION_ALIAS: _team_key_transaction_filter_converter, RELEASE_STAGE_ALIAS: _release_stage_filter_converter, SEMVER_ALIAS: _semver_filter_converter, SEMVER_PACKAGE_ALIAS: _semver_package_filter_converter, SEMVER_BUILD_ALIAS: _semver_build_filter_converter, }
15,757
71,810
79
wagtail/admin/tests/test_account_management.py
26
9
def test_not_logged_in_gives_403_to_ajax_requests(self): # Get dashboard response = self.client.get( reverse("wagtailadmin_home"), HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) # AJAX requests should be given a 403 error instead of being redirected self.assertEqual(response.status_code, 403)
Reformat with black
test_not_logged_in_gives_403_to_ajax_requests
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_account_management.py
11
5
https://github.com/wagtail/wagtail.git
1
33
0
25
60
Python
{ "docstring": "\n This tests that a not logged in user is given a 403 error on AJAX requests\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
def test_not_logged_in_gives_403_to_ajax_requests(self): # Get dashboard response = self.client.get( reverse("wagtailadmin_home"), HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) # AJAX requests should be given a 403 error instead of being redirected self.assertEqual(response.status_code, 403)
47,077
194,784
207
parlai/utils/bpe.py
62
18
def bytes_to_unicode(self) -> Dict[int, str]: bs: List[int] = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs: List[int] = bs[:]
autoformat (#4378)
bytes_to_unicode
81f722d29045a7a5841d0931a082ded1d1f13863
ParlAI
bpe.py
17
25
https://github.com/facebookresearch/ParlAI.git
4
151
0
43
247
Python
{ "docstring": "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n\n The reversible bpe codes work on unicode strings. This means you need a large #\n of unicode characters in your vocab if you want to avoid UNKs. When you're at\n something like a 10B token dataset you end up needing around 5K for decent\n coverage. This is a significant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n ", "language": "en", "n_whitespaces": 150, "n_words": 93, "vocab_size": 69 }
def bytes_to_unicode(self) -> Dict[int, str]: bs: List[int] = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs: List[int] = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 str_cs: List[str] = [chr(n) for n in cs] return dict(zip(bs, str_cs))
90,913
291,809
200
tests/components/caldav/test_calendar.py
64
10
async def test_get_events_custom_calendars(hass, calendar, get_api_events): config = dict(CALDAV_CONFIG) config["custom_calendars"] = [ {"name": "Private", "calendar": "Private", "search": "This is a normal event"} ] assert await async_setup_component(hass, "calendar", {"calend
Local calendar integration (#79601)
test_get_events_custom_calendars
532ab12a48b6832180599088250fc23446a45d1e
core
test_calendar.py
12
20
https://github.com/home-assistant/core.git
1
110
0
48
212
Python
{ "docstring": "Test that only searched events are returned on API.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def test_get_events_custom_calendars(hass, calendar, get_api_events): config = dict(CALDAV_CONFIG) config["custom_calendars"] = [ {"name": "Private", "calendar": "Private", "search": "This is a normal event"} ] assert await async_setup_component(hass, "calendar", {"calendar": config}) await hass.async_block_till_done() events = await get_api_events("calendar.private_private") assert events == [ { "end": {"dateTime": "2017-11-27T10:00:00-08:00"}, "start": {"dateTime": "2017-11-27T09:00:00-08:00"}, "summary": "This is a normal event", "location": "Hamburg", "description": "Surprisingly rainy", "uid": None, "recurrence_id": None, "rrule": None, } ]
24,940
113,538
54
nni/mutable/symbol.py
14
8
def leaf_symbols(self) -> Iterable[Symbol]: for arg in self.arguments: if isinstanc
Mutable V3 (Stage 2) - Symbolic execution engine (#5195)
leaf_symbols
8f454f3bf29e2c3cd0d359231a46edd8ee768d42
nni
symbol.py
12
10
https://github.com/microsoft/nni.git
3
33
0
14
54
Python
{ "docstring": "\n Return a generator of all leaf symbols.\n\n Useful for when you want to inspect when the symbols come from.\n No deduplication even if the symbols has duplicates.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 24 }
def leaf_symbols(self) -> Iterable[Symbol]: for arg in self.arguments: if isinstance(arg, SymbolicExpression): yield from arg.leaf_symbols()
@eval_app.command()
31,241
137,777
175
rllib/evaluate.py
35
16
def append_step(self, obs, action, next_obs, reward, terminated, truncated, info): if self._outfile: if self._save_info: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated, info] ) else: self._current_rollout.append( [obs, action,
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
append_step
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
ray
evaluate.py
13
11
https://github.com/ray-project/ray.git
3
79
1
22
120
Python
{ "docstring": "Add a step to the current rollout, if we are saving them", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def append_step(self, obs, action, next_obs, reward, terminated, truncated, info): if self._outfile: if self._save_info: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated, info] ) else: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated] ) self._total_steps += 1 @eval_app.command()
50,188
202,956
122
tests/prefetch_related/tests.py
21
17
def test_nested_prefetch_is_not_overwritten_by_related_object(self): queryset = House.objects.only('name').prefetch_related( Prefetch('rooms', queryset=Room.objects.prefetch_related( Prefetch('house', queryset=House.objects.only('address')), )), ) with self.assertNumQueries(3): house = queryset.first() self.assertIs(
Fixed #32511 -- Corrected handling prefetched nested reverse relationships. When prefetching a set of child objects related to a set of parent objects, we usually want to populate the relationship back from the child to the parent to avoid a query when accessing that relationship attribute. However, there's an edge case where the child queryset itself specifies a prefetch back to the parent. In that case, we want to use the prefetched relationship rather than populating the reverse relationship from the parent.
test_nested_prefetch_is_not_overwritten_by_related_object
f5233dce309543c826224be9dfa9c9f4f855f73c
django
tests.py
19
11
https://github.com/django/django.git
1
102
0
19
175
Python
{ "docstring": "\n The prefetched relationship is used rather than populating the reverse\n relationship from the parent, when prefetching a set of child objects\n related to a set of parent objects and the child queryset itself\n specifies a prefetch back to the parent.\n ", "language": "en", "n_whitespaces": 76, "n_words": 40, "vocab_size": 29 }
def test_nested_prefetch_is_not_overwritten_by_related_object(self): queryset = House.objects.only('name').prefetch_related( Prefetch('rooms', queryset=Room.objects.prefetch_related( Prefetch('house', queryset=House.objects.only('address')), )), ) with self.assertNumQueries(3): house = queryset.first() self.assertIs(Room.house.is_cached(self.room), True) with self.assertNumQueries(0): house.rooms.first().house.address
49,612
200,382
147
sympy/combinatorics/permutations.py
51
13
def apply(self, i): r i = _sympify(i) if i.is_integer is False: raise NotImplementedError("{} should be an integer.".format(i)) n = self.size if (i < 0) == True or (i >= n) == True: raise NotImplementedError( "{} should be an integer between 0 and {}".format(i, n-1)) if i.is_Integer: return Integer(self._array_form[i]) retur
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
apply
24f1e7730119fe958cc8e28411f790c9a5ec04eb
sympy
permutations.py
12
46
https://github.com/sympy/sympy.git
5
90
0
41
144
Python
{ "docstring": "Apply the permutation to an expression.\n\n Parameters\n ==========\n\n i : Expr\n It should be an integer between $0$ and $n-1$ where $n$\n is the size of the permutation.\n\n If it is a symbol or a symbolic expression that can\n have integer values, an ``AppliedPermutation`` object\n will be returned which can represent an unevaluated\n function.\n\n Notes\n =====\n\n Any permutation can be defined as a bijective function\n $\\sigma : \\{ 0, 1, \\dots, n-1 \\} \\rightarrow \\{ 0, 1, \\dots, n-1 \\}$\n where $n$ denotes the size of the permutation.\n\n The definition may even be extended for any set with distinctive\n elements, such that the permutation can even be applied for\n real numbers or such, however, it is not implemented for now for\n computational reasons and the integrity with the group theory\n module.\n\n This function is similar to the ``__call__`` magic, however,\n ``__call__`` magic already has some other applications like\n permuting an array or attaching new cycles, which would\n not always be mathematically consistent.\n\n This also guarantees that the return type is a SymPy integer,\n which guarantees the safety to use assumptions.\n ", "language": "en", "n_whitespaces": 386, "n_words": 180, "vocab_size": 116 }
def apply(self, i): r i = _sympify(i) if i.is_integer is False: raise NotImplementedError("{} should be an integer.".format(i)) n = self.size if (i < 0) == True or (i >= n) == True: raise NotImplementedError( "{} should be an integer between 0 and {}".format(i, n-1)) if i.is_Integer: return Integer(self._array_form[i]) return AppliedPermutation(self, i)
23,670
109,611
355
lib/matplotlib/collections.py
112
27
def _convert_mesh_to_triangles(self, coordinates): if isinstance(coordinates, np.ma.MaskedArray): p = coordinates.data else: p = coordinates p_a = p[:-1, :-1] p_b = p[:-1, 1:] p_c = p[1:, 1:] p_d = p[1:, :-1] p_center = (p_a + p_
Deprecate draw_gouraud_triangle (#23824) * Deprecate draw_gouraud_triangle * DOC: minor rewording Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com> Co-authored-by: Thomas A Caswell <tcaswell@gmail.com> Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com>
_convert_mesh_to_triangles
4a5d09cba5f4a20e14553cebd8f70c1f34d20d35
matplotlib
collections.py
12
29
https://github.com/matplotlib/matplotlib.git
2
273
0
56
390
Python
{ "docstring": "\n Convert a given mesh into a sequence of triangles, each point\n with its own color. The result can be used to construct a call to\n `~.RendererBase.draw_gouraud_triangles`.\n ", "language": "en", "n_whitespaces": 56, "n_words": 26, "vocab_size": 23 }
def _convert_mesh_to_triangles(self, coordinates): if isinstance(coordinates, np.ma.MaskedArray): p = coordinates.data else: p = coordinates p_a = p[:-1, :-1] p_b = p[:-1, 1:] p_c = p[1:, 1:] p_d = p[1:, :-1] p_center = (p_a + p_b + p_c + p_d) / 4.0 triangles = np.concatenate([ p_a, p_b, p_center, p_b, p_c, p_center, p_c, p_d, p_center, p_d, p_a, p_center, ], axis=2).reshape((-1, 3, 2)) c = self.get_facecolor().reshape((*coordinates.shape[:2], 4)) c_a = c[:-1, :-1] c_b = c[:-1, 1:] c_c = c[1:, 1:] c_d = c[1:, :-1] c_center = (c_a + c_b + c_c + c_d) / 4.0 colors = np.concatenate([ c_a, c_b, c_center, c_b, c_c, c_center, c_c, c_d, c_center, c_d, c_a, c_center, ], axis=2).reshape((-1, 3, 4)) return triangles, colors
39,415
163,270
419
pandas/core/series.py
126
40
def count(self, level=None): if level is None: return notna(self._values).sum().astype("int64") else: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. ser.count(level=1) should use ser.groupby(level=1).count().", FutureWarning, stacklevel=find_stack_level(), ) if not isinstance(self.index, MultiIndex): raise ValueError("Series.count level is only valid with a MultiIndex") index = self.index assert isinstance(index, MultiIndex) # for mypy if isinstance(level, str): level = index._get_level_number(level) lev = index.levels[level] level_codes = np.array(index.codes[level], subok=False, copy=True) mask = level_codes == -1 if mask.any(): level_codes[mask] = cnt = len(lev) lev = lev.insert(cnt, lev._na_value) obs = level_codes[notna(self._values)]
TYP: Ignore numpy related issues (#45244)
count
d603d43df2057ecdf74010d9dadc735e37f8f7b5
pandas
series.py
14
28
https://github.com/pandas-dev/pandas.git
6
211
0
98
343
Python
{ "docstring": "\n Return number of non-NA/null observations in the Series.\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series.\n\n Returns\n -------\n int or Series (if level specified)\n Number of non-null values in the Series.\n\n See Also\n --------\n DataFrame.count : Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> s = pd.Series([0.0, 1.0, np.nan])\n >>> s.count()\n 2\n ", "language": "en", "n_whitespaces": 220, "n_words": 74, "vocab_size": 58 }
def count(self, level=None): if level is None: return notna(self._values).sum().astype("int64") else: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. ser.count(level=1) should use ser.groupby(level=1).count().", FutureWarning, stacklevel=find_stack_level(), ) if not isinstance(self.index, MultiIndex): raise ValueError("Series.count level is only valid with a MultiIndex") index = self.index assert isinstance(index, MultiIndex) # for mypy if isinstance(level, str): level = index._get_level_number(level) lev = index.levels[level] level_codes = np.array(index.codes[level], subok=False, copy=True) mask = level_codes == -1 if mask.any(): level_codes[mask] = cnt = len(lev) lev = lev.insert(cnt, lev._na_value) obs = level_codes[notna(self._values)] # Argument "minlength" to "bincount" has incompatible type "Optional[int]"; # expected "SupportsIndex" [arg-type] out = np.bincount(obs, minlength=len(lev) or None) # type: ignore[arg-type] return self._constructor(out, index=lev, dtype="int64").__finalize__( self, method="count" )
55,704
219,678
951
python3.10.4/Lib/_pydecimal.py
350
31
def __format__(self, specifier, context=None, _localeconv=None): # Note: PEP 3101 says that if the type is not present then # there should be at least one digit after the decimal point. # We take the liberty of ignoring this requirement for # Decimal---it's presumably there to make sure that # format(float, '') behaves similarly to str(float). if context is None: context = getcontext() spec = _parse_format_specifier(specifier, _localeconv=_localeconv) # special values don't care about the type or precision if self._is_special: sign = _format_sign(self._sign, spec) body = str(self.copy_abs()) if spec['type'] == '%': body += '%' return _format_align(sign, body, spec) # a type of None defaults to 'g' or 'G', depending on context if spec['type'] is None: spec['type'] = ['g', 'G'][context.capitals] # if type is '%', adjust exponent of self accordingly if spec['type'] == '%': self = _dec_from_triple(self._sign, self._int, self._exp+2) # round if necessary, taking rounding mode from the context rounding = context.rounding precision = spec['precision'] if precision is not None: if spec['type'] in 'eE': self = self._round(precision+1, rounding) elif spec['type'] in 'fF%': self = self._rescale(-precision, rounding) elif spec['type'] in 'gG' and len(self._int) > precision: self = self._round(precision, rounding) # special case: zeros with a positive exponent can't be # represented in fixed point; rescale them to 0e0. if not self and self._exp > 0 and spec['type'] in 'fF%': self = self._rescale(0, rounding) # figure out placement of the decimal point leftdigits = self._exp + len(self._int) if spec['type'] in 'eE': if not self and precision is not None: dotplace = 1 - precision else: dotplace = 1 elif spec['type'] in 'fF%': dotplace = leftdigits elif spec['type'] in 'gG': if self._exp <= 0 and leftdigits > -6: dotplace = leftdigits else: dotplace =
add python 3.10.4 for windows
__format__
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_pydecimal.py
16
49
https://github.com/XX-net/XX-Net.git
24
411
0
171
704
Python
{ "docstring": "Format a Decimal instance according to the given specifier.\n\n The specifier should be a standard format specifier, with the\n form described in PEP 3101. Formatting types 'e', 'E', 'f',\n 'F', 'g', 'G', 'n' and '%' are supported. If the formatting\n type is omitted it defaults to 'g' or 'G', depending on the\n value of context.capitals.\n ", "language": "en", "n_whitespaces": 99, "n_words": 55, "vocab_size": 49 }
def __format__(self, specifier, context=None, _localeconv=None): # Note: PEP 3101 says that if the type is not present then # there should be at least one digit after the decimal point. # We take the liberty of ignoring this requirement for # Decimal---it's presumably there to make sure that # format(float, '') behaves similarly to str(float). if context is None: context = getcontext() spec = _parse_format_specifier(specifier, _localeconv=_localeconv) # special values don't care about the type or precision if self._is_special: sign = _format_sign(self._sign, spec) body = str(self.copy_abs()) if spec['type'] == '%': body += '%' return _format_align(sign, body, spec) # a type of None defaults to 'g' or 'G', depending on context if spec['type'] is None: spec['type'] = ['g', 'G'][context.capitals] # if type is '%', adjust exponent of self accordingly if spec['type'] == '%': self = _dec_from_triple(self._sign, self._int, self._exp+2) # round if necessary, taking rounding mode from the context rounding = context.rounding precision = spec['precision'] if precision is not None: if spec['type'] in 'eE': self = self._round(precision+1, rounding) elif spec['type'] in 'fF%': self = self._rescale(-precision, rounding) elif spec['type'] in 'gG' and len(self._int) > precision: self = self._round(precision, rounding) # special case: zeros with a positive exponent can't be # represented in fixed point; rescale them to 0e0. if not self and self._exp > 0 and spec['type'] in 'fF%': self = self._rescale(0, rounding) # figure out placement of the decimal point leftdigits = self._exp + len(self._int) if spec['type'] in 'eE': if not self and precision is not None: dotplace = 1 - precision else: dotplace = 1 elif spec['type'] in 'fF%': dotplace = leftdigits elif spec['type'] in 'gG': if self._exp <= 0 and leftdigits > -6: dotplace = leftdigits else: dotplace = 1 # find digits before and after decimal point, and get exponent if dotplace < 0: intpart = '0' fracpart = '0'*(-dotplace) + self._int elif dotplace > len(self._int): intpart = self._int + '0'*(dotplace-len(self._int)) fracpart = '' else: intpart = self._int[:dotplace] or '0' fracpart = self._int[dotplace:] exp = leftdigits-dotplace # done with the decimal-specific stuff; hand over the rest # of the formatting to the _format_number function return _format_number(self._sign, intpart, fracpart, exp, spec)
75,368
258,694
30
sklearn/tests/test_base.py
15
13
def test_feature_names_in(): pd = pytest.importorskip("pandas") iris = datasets.load_iris() X_np = iris.data df = pd.DataFrame(X_np, columns=iris.feature_names)
TST Better info when checking for no warnings in tests (#22362)
test_feature_names_in
9f85c9d44965b764f40169ef2917e5f7a798684f
scikit-learn
test_base.py
10
47
https://github.com/scikit-learn/scikit-learn.git
4
339
0
12
68
Python
{ "docstring": "Check that feature_name_in are recorded by `_validate_data`", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def test_feature_names_in(): pd = pytest.importorskip("pandas") iris = datasets.load_iris() X_np = iris.data df = pd.DataFrame(X_np, columns=iris.feature_names)
69,642
241,658
200
pytorch_lightning/trainer/connectors/accelerator_connector.py
67
12
def check_interactive_compatibility(self): from pytorch_lightning.utilities import _IS_INTERACTIVE if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible(): raise MisconfigurationException( f"`Trainer(strategy={self._strategy_type.value!r})` or" f" `Trainer(accelerator={self._strategy_type.value!r})` is not compatible with an interactive" " environment. Run your code as a script, or choose one of the compatible backends:" f" {', '.join(_StrategyType.interactive_compatible_types())}." " In case you are spaw
Rename `_distrib_type` to `_strategy_type` (#11328) Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
check_interactive_compatibility
e15579a4f32ee3c08318a466583f4a0a8517d654
lightning
accelerator_connector.py
16
11
https://github.com/Lightning-AI/lightning.git
4
44
0
56
121
Python
{ "docstring": "Raises a `MisconfigurationException` if the accelerator and/or plugin is not compatible with an\n interactive environment.", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 15 }
def check_interactive_compatibility(self): from pytorch_lightning.utilities import _IS_INTERACTIVE if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible(): raise MisconfigurationException( f"`Trainer(strategy={self._strategy_type.value!r})` or" f" `Trainer(accelerator={self._strategy_type.value!r})` is not compatible with an interactive" " environment. Run your code as a script, or choose one of the compatible backends:" f" {', '.join(_StrategyType.interactive_compatible_types())}." " In case you are spawning processes yourself, make sure to include the Trainer" " creation inside the worker function." )
53,522
212,929
256
PySimpleGUI.py
76
10
def theme_global(new_theme=None): if new_theme is not None: if new_theme not in theme_list(): popup_error_with_traceback('Cannot use custom themes with theme_global call', 'Your request to use theme {} cannot be performed.'.format(new_theme), 'The PySimpleGUI Global User Se
Better error checking/reporting in theme_global. NEW THEME DarkGrey15
theme_global
dfad2e3b7671b7128895c8a0e29fff38d7efe6e9
PySimpleGUI
PySimpleGUI.py
14
13
https://github.com/PySimpleGUI/PySimpleGUI.git
3
71
0
59
125
Python
{ "docstring": "\n Sets / Gets the global PySimpleGUI Theme. If none is specified then returns the global theme from user settings.\n Note the theme must be a standard, built-in PySimpleGUI theme... not a user-created theme.\n\n :param new_theme: the new theme name to use\n :type new_theme: (str)\n :return: the currently selected theme\n :rtype: (str)\n ", "language": "en", "n_whitespaces": 94, "n_words": 51, "vocab_size": 39 }
def theme_global(new_theme=None): if new_theme is not None: if new_theme not in theme_list(): popup_error_with_traceback('Cannot use custom themes with theme_global call', 'Your request to use theme {} cannot be performed.'.format(new_theme), 'The PySimpleGUI Global User Settings are meant for PySimpleGUI standard items, not user config items', 'You can use any of the many built-in themes instead or use your own UserSettings file to store your custom theme') return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL) pysimplegui_user_settings.set('-theme-', new_theme) theme(new_theme) return new_theme else: return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL)
57,796
226,116
141
packages/python/chart-studio/chart_studio/plotly/chunked_requests/chunked_request.py
45
11
def close(self):
switch to black .22
close
43e3a4011080911901176aab919c0ecf5046ddd3
plotly.py
chunked_request.py
12
8
https://github.com/plotly/plotly.py.git
2
46
0
41
95
Python
{ "docstring": "Close the connection to server.\n\n If available, return a http_client.HTTPResponse object.\n\n Closing the connection involves sending the\n Transfer-Encoding terminating bytes.\n ", "language": "en", "n_whitespaces": 48, "n_words": 20, "vocab_size": 17 }
def close(self): self._reset_retries() self._closed = True # Chunked-encoded posts are terminated with '0\r\n\r\n' # For some reason, either Python or node.js seems to # require an extra \r\n. try: self._conn.send("\r\n0\r\n\r\n".encode("utf-8")) except http_client.socket.error: # In case the socket has already been closed return "" return self._getresponse()
54,636
216,555
381
salt/modules/runit.py
164
25
def _get_svc_path(name="*", status=None): # This is the core routine to work with services, called by many # other functions of this module. # # The name of a service is the "apparent" folder's name that contains its # "run" script. If its "folder" is a symlink, the service is an "alias" of # the targeted service. if not SERVICE_DIR: raise CommandExecutionError("Could not find service directory.") # path list of enabled services as /AVAIL_SVR_DIRS/$service, # taking care of any service aliases (do not use os.path.realpath()).
fix runit module failing on non-symlinked service
_get_svc_path
5bf2904e7ac79d438ce03a673aa9a5c99f4e8e0f
salt
runit.py
15
24
https://github.com/saltstack/salt.git
10
175
0
95
304
Python
{ "docstring": "\n Return a list of paths to services with ``name`` that have the specified ``status``\n\n name\n a glob for service name. default is '*'\n\n status\n None : all services (no filter, default choice)\n 'DISABLED' : available service(s) that is not enabled\n 'ENABLED' : enabled service (whether started on boot or not)\n ", "language": "en", "n_whitespaces": 98, "n_words": 50, "vocab_size": 41 }
def _get_svc_path(name="*", status=None): # This is the core routine to work with services, called by many # other functions of this module. # # The name of a service is the "apparent" folder's name that contains its # "run" script. If its "folder" is a symlink, the service is an "alias" of # the targeted service. if not SERVICE_DIR: raise CommandExecutionError("Could not find service directory.") # path list of enabled services as /AVAIL_SVR_DIRS/$service, # taking care of any service aliases (do not use os.path.realpath()). ena = set() for el in glob.glob(os.path.join(SERVICE_DIR, name)): if _is_svc(el): if os.path.islink(el): ena.add(os.readlink(el)) else: ena.add(el) log.trace("found enabled service path: %s", el) if status == "ENABLED": return sorted(ena) # path list of available services as /AVAIL_SVR_DIRS/$service ava = set() for d in AVAIL_SVR_DIRS: for el in glob.glob(os.path.join(d, name)): if _is_svc(el): ava.add(el) log.trace("found available service path: %s", el) if status == "DISABLED": # service available but not enabled ret = ava.difference(ena) else: # default: return available services ret = ava.union(ena) return sorted(ret)
@RunIf(min_gpus=2)
69,688
241,761
70
tests/checkpointing/test_torch_saving.py
34
22
def test_model_torch_save_ddp_cpu(tmpdir): model = BoringModel() num_epochs = 1 trainer = Trainer( default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2, logger=False ) temp_path = os.path.jo
Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408) Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
test_model_torch_save_ddp_cpu
d2d284fd6e3e8f53e9a44ab233771850af1e4dab
lightning
test_torch_saving.py
10
10
https://github.com/Lightning-AI/lightning.git
1
78
1
30
139
Python
{ "docstring": "Test to ensure torch save does not fail for model and trainer using cpu ddp.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
def test_model_torch_save_ddp_cpu(tmpdir): model = BoringModel() num_epochs = 1 trainer = Trainer( default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2, logger=False ) temp_path = os.path.join(tmpdir, "temp.pt") trainer.fit(model) # Ensure these do not fail torch.save(trainer.model, temp_path) torch.save(trainer, temp_path) @RunIf(min_gpus=2)
6,562
36,020
38
src/transformers/onnx/config.py
17
5
def default_batch_size(self) -> int: # Using 2 avoid ONNX making assumption about single sample batch return On
Add ONNX export for ViT (#15658) * Add ONNX support for ViT * Refactor to use generic preprocessor * Add vision dep to tests * Extend ONNX slow tests to ViT * Add dummy image generator * Use model_type to determine modality * Add deprecation warnings for tokenizer argument * Add warning when overwriting the preprocessor * Add optional args to docstrings * Add minimum PyTorch version to OnnxConfig * Refactor OnnxConfig class variables from CONSTANT_NAME to snake_case * Add reasonable value for default atol Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
default_batch_size
50dd314d939a86f3a81e19af01459f449fbaeeca
transformers
config.py
6
8
https://github.com/huggingface/transformers.git
1
12
0
17
23
Python
{ "docstring": "\n The default batch size to use if no other indication\n\n Returns:\n Integer > 0\n ", "language": "en", "n_whitespaces": 47, "n_words": 14, "vocab_size": 14 }
def default_batch_size(self) -> int: # Using 2 avoid ONNX making assumption about single sample batch return OnnxConfig.default_fixed_batch
42,870
178,942
156
nuitka/utils/Signing.py
32
16
def addMacOSCodeSignature(filenames): # Weak signing. identity = getMacOSSigningIdentity() command = [ "codesign", "-s", identity, "--force", "--deep", "--preserve-metadata=entitlements", ] assert type(filenames) is not str command.extend(filenam
macOS: Add support for specifying signing identity and access to protected resources.
addMacOSCodeSignature
51ca460bd8c382cc165cbb1325e7cb65895d1a0b
Nuitka
Signing.py
10
19
https://github.com/Nuitka/Nuitka.git
1
66
0
31
112
Python
{ "docstring": "Remove the code signature from a filename.\n\n Args:\n filenames - The files to be signed.\n\n Returns:\n None\n\n Notes:\n This is macOS specific.\n ", "language": "en", "n_whitespaces": 55, "n_words": 22, "vocab_size": 22 }
def addMacOSCodeSignature(filenames): # Weak signing. identity = getMacOSSigningIdentity() command = [ "codesign", "-s", identity, "--force", "--deep", "--preserve-metadata=entitlements", ] assert type(filenames) is not str command.extend(filenames) with withMadeWritableFileMode(filenames): executeToolChecked( logger=postprocessing_logger, command=command, absence_message=macos_codesign_usage, stderr_filter=_filterSigntoolErrorOutput, )
9,566
48,680
178
rest_framework/views.py
56
22
def exception_handler(exc, context): if isinstance(exc, Http
Preserve exception messages for wrapped Django exceptions (#8051) * Preserve messages for wrapped Django exceptions * Fix the test * Update test_generics.py * Update test_generics.py Co-authored-by: Tom Christie <tom@tomchristie.com>
exception_handler
56946fac8f29aa44ce84391f138d63c4c8a2a285
django-rest-framework
views.py
14
18
https://github.com/encode/django-rest-framework.git
7
152
0
39
246
Python
{ "docstring": "\n Returns the response that should be used for any given exception.\n\n By default we handle the REST framework `APIException`, and also\n Django's built-in `Http404` and `PermissionDenied` exceptions.\n\n Any unhandled exceptions may return `None`, which will cause a 500 error\n to be raised.\n ", "language": "en", "n_whitespaces": 61, "n_words": 42, "vocab_size": 39 }
def exception_handler(exc, context): if isinstance(exc, Http404): exc = exceptions.NotFound(*(exc.args)) elif isinstance(exc, PermissionDenied): exc = exceptions.PermissionDenied(*(exc.args)) if isinstance(exc, exceptions.APIException): headers = {} if getattr(exc, 'auth_header', None): headers['WWW-Authenticate'] = exc.auth_header if getattr(exc, 'wait', None): headers['Retry-After'] = '%d' % exc.wait if isinstance(exc.detail, (list, dict)): data = exc.detail else: data = {'detail': exc.detail} set_rollback() return Response(data, status=exc.status_code, headers=headers) return None
7,156
39,239
107
recommenders/models/sar/sar_singlenode.py
25
22
def compute_cooccurrence_matrix(self, df): u
Remove drop_duplicates() from SAR method fix #1464 (#1588) * Remove drop_duplicates() from SAR method fix #1464 * flake is complaining * Typos * Define self.unity_user_affinity inside __init__() * Remove drop_duplicates() from SAR method * Remove duplicates in testing data * Remove duplicates in test data for recommend_k_items * Allow duplicates in score data Co-authored-by: miguelgfierro <miguelgfierro@users.noreply.github.com> Co-authored-by: Andreas Argyriou <anargyri@users.noreply.github.com> Co-authored-by: Simon Zhao <43029286+simonzhaoms@users.noreply.github.com>
compute_cooccurrence_matrix
96b5053fa688bec79a729f9ea238e5f916bced01
recommenders
sar_singlenode.py
15
10
https://github.com/microsoft/recommenders.git
1
101
0
21
153
Python
{ "docstring": "Co-occurrence matrix.\n\n The co-occurrence matrix is defined as :math:`C = U^T * U`\n\n where U is the user_affinity matrix with 1's as values (instead of ratings).\n\n Args:\n df (pandas.DataFrame): DataFrame of users and items\n\n Returns:\n numpy.ndarray: Co-occurrence matrix\n ", "language": "en", "n_whitespaces": 95, "n_words": 38, "vocab_size": 32 }
def compute_cooccurrence_matrix(self, df): user_item_hits = sparse.coo_matrix( (np.repeat(1, df.shape[0]), (df[self.col_user_id], df[self.col_item_id])), shape=(self.n_users, self.n_items), ).tocsr() item_cooccurrence = user_item_hits.transpose().dot(user_item_hits) item_cooccurrence = item_cooccurrence.multiply( item_cooccurrence >= self.threshold ) return item_cooccurrence.astype(df[self.col_rating].dtype)
3,452
20,609
615
pipenv/patched/notpip/_vendor/pyparsing/helpers.py
164
48
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): if isinstance(tagStr, str_type): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas, alphanums + "_-:") if xml: tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) openTag = ( suppress_LT + tagStr("tag") + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + Opt("/", default=[False])("empty").set_parse_action( lambda s, l, t: t[0] == "/" ) + suppress_GT ) else: tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( printables, exclude_chars=">" ) openTag = ( suppress_LT + tagStr("tag") + Dict( ZeroOrMore( Group( tagAttrName.set_parse_action(lambda t: t[0].lower()) + Opt(Suppress("=") + tagAttrValue) ) ) ) + Opt("/", default=[False])("empty").set_parse_action( lambda s, l, t: t[0] == "/" ) + suppress_GT ) closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False) openTag.set_name("<%s>" % resname) # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels openTag.add_parse_action( lambda t: t.__setitem__( "start" + "".join(resname.replace(":", " ").title().split()), t.copy() )
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_makeTags
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
helpers.py
26
53
https://github.com/pypa/pipenv.git
3
365
0
96
627
Python
{ "docstring": "Internal helper to construct opening and closing tag expressions, given a tag name", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): if isinstance(tagStr, str_type): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas, alphanums + "_-:") if xml: tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) openTag = ( suppress_LT + tagStr("tag") + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + Opt("/", default=[False])("empty").set_parse_action( lambda s, l, t: t[0] == "/" ) + suppress_GT ) else: tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( printables, exclude_chars=">" ) openTag = ( suppress_LT + tagStr("tag") + Dict( ZeroOrMore( Group( tagAttrName.set_parse_action(lambda t: t[0].lower()) + Opt(Suppress("=") + tagAttrValue) ) ) ) + Opt("/", default=[False])("empty").set_parse_action( lambda s, l, t: t[0] == "/" ) + suppress_GT ) closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False) openTag.set_name("<%s>" % resname) # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels openTag.add_parse_action( lambda t: t.__setitem__( "start" + "".join(resname.replace(":", " ").title().split()), t.copy() ) ) closeTag = closeTag( "end" + "".join(resname.replace(":", " ").title().split()) ).set_name("</%s>" % resname) openTag.tag = resname closeTag.tag = resname openTag.tag_body = SkipTo(closeTag()) return openTag, closeTag
80,398
270,135
208
keras/datasets/cifar100.py
88
24
def load_data(label_mode="fine"): if label_mode not in ["fine", "coarse"]: raise ValueError( '`label_mode` must be one of `"fine"`, `"coarse"`. ' f"Received: label_mode=
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
load_data
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
cifar100.py
12
24
https://github.com/keras-team/keras.git
3
185
0
64
303
Python
{ "docstring": "Loads the CIFAR100 dataset.\n\n This is a dataset of 50,000 32x32 color training images and\n 10,000 test images, labeled over 100 fine-grained classes that are\n grouped into 20 coarse-grained classes. See more info at the\n [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).\n\n Args:\n label_mode: one of \"fine\", \"coarse\". If it is \"fine\" the category labels\n are the fine-grained labels, if it is \"coarse\" the output labels are the\n coarse-grained superclasses.\n\n Returns:\n Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n **x_train**: uint8 NumPy array of grayscale image data with shapes\n `(50000, 32, 32, 3)`, containing the training data. Pixel values range\n from 0 to 255.\n\n **y_train**: uint8 NumPy array of labels (integers in range 0-99)\n with shape `(50000, 1)` for the training data.\n\n **x_test**: uint8 NumPy array of grayscale image data with shapes\n `(10000, 32, 32, 3)`, containing the test data. Pixel values range\n from 0 to 255.\n\n **y_test**: uint8 NumPy array of labels (integers in range 0-99)\n with shape `(10000, 1)` for the test data.\n\n Example:\n\n ```python\n (x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()\n assert x_train.shape == (50000, 32, 32, 3)\n assert x_test.shape == (10000, 32, 32, 3)\n assert y_train.shape == (50000, 1)\n assert y_test.shape == (10000, 1)\n ```\n ", "language": "en", "n_whitespaces": 304, "n_words": 193, "vocab_size": 106 }
def load_data(label_mode="fine"): if label_mode not in ["fine", "coarse"]: raise ValueError( '`label_mode` must be one of `"fine"`, `"coarse"`. ' f"Received: label_mode={label_mode}." ) dirname = "cifar-100-python" origin = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" path = get_file( dirname, origin=origin, untar=True, file_hash="85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7", ) fpath = os.path.join(path, "train") x_train, y_train = load_batch(fpath, label_key=label_mode + "_labels") fpath = os.path.join(path, "test") x_test, y_test = load_batch(fpath, label_key=label_mode + "_labels") y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if backend.image_data_format() == "channels_last": x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
8,993
46,789
194
dev/breeze/src/airflow_breeze/utils/path_utils.py
114
27
def find_airflow_sources_root() -> Path: default_airflow_sources_root = Path.cwd() # Try to find airflow sources in current working dir airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd()) if not airflow_sources_root: # Or if it fails, find it in parents of the directory where the ./breeze.py is. airflow_sources_root = search_upwards_for_airflow_sources_root(Path(__file__).resolve().parent) if airflow_sources_root: os.chdir(airflow_sources_root) return Path(airflow_sources_root) else: console.print( f"\n[bright_yellow]Could not find Airflow sources location. " f"Assuming {default_airflow_sources_root}" ) os.chdir(default_airflow_sources_root) return Path(default_airflow_sources_root) AIRFLOW_SOURCES_ROOT = find_airflow_sources_root() BUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build' FILES_DIR = AIRFLOW
Prepare Breeze2 for prime time :) (#22713) This is a review and clean-up for all the parameters and commands for Breeze2 in order to prepare it for being used by the contribugors. There are various small fixes here and there, removal of duplicated code, refactoring and moving code around as well as cleanup and review all the parameters used for all implemented commands. The parameters, default values and their behaviours were updated to match "new" life of Breeze rather than old one. Some improvements are made to the autocomplete and click help messages printed. Full list of choices is always displayed, parameters are groups according to their target audience, and they were sorted according to importance and frequency of use. Various messages have been colourised according to their meaning - warnings as yellow, errors as red and informational messages as bright_blue. The `dry-run` option has been added to just show what would have been run without actually running some potentially "write" commands (read commands are still executed) so that you can easily verify and manually copy and execute the commands with option to modify them before. The `dry_run` and `verbose` options are now used for all commands. The "main" command now runs "shell" by default similarly as the original Breeze. All "shortcut" parameters have been standardized - i.e common options (verbose/dry run/help) have one and all common flags that are likely to be used often have an assigned shortcute. The "stop" and "cleanup" command have been added as they are necessary for average user to complete the regular usage cycle. Documentation for all the important methods have been updated.
find_airflow_sources_root
4ffd4f09532fceb67675fce4c1f5cd383eff992e
airflow
path_utils.py
15
26
https://github.com/apache/airflow.git
3
79
0
71
267
Python
{ "docstring": "\n Find the root of airflow sources. When Breeze is run from sources, it is easy, but this one also\n has to handle the case when Breeze is installed via `pipx` so it searches upwards of the current\n directory to find the right root of airflow directory.\n\n If not found, current directory is returned (this handles the case when Breeze is run from the local\n directory.\n\n :return: Path for the found sources.\n\n ", "language": "en", "n_whitespaces": 93, "n_words": 71, "vocab_size": 45 }
def find_airflow_sources_root() -> Path: default_airflow_sources_root = Path.cwd() # Try to find airflow sources in current working dir airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd()) if not airflow_sources_root: # Or if it fails, find it in parents of the directory where the ./breeze.py is. airflow_sources_root = search_upwards_for_airflow_sources_root(Path(__file__).resolve().parent) if airflow_sources_root: os.chdir(airflow_sources_root) return Path(airflow_sources_root) else: console.print( f"\n[bright_yellow]Could not find Airflow sources location. " f"Assuming {default_airflow_sources_root}" ) os.chdir(default_airflow_sources_root) return Path(default_airflow_sources_root) AIRFLOW_SOURCES_ROOT = find_airflow_sources_root() BUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build' FILES_DIR = AIRFLOW_SOURCES_ROOT / 'files' MSSQL_DATA_VOLUME = AIRFLOW_SOURCES_ROOT / 'tmp_mssql_volume' MYPY_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.mypy_cache' LOGS_DIR = AIRFLOW_SOURCES_ROOT / 'logs' DIST_DIR = AIRFLOW_SOURCES_ROOT / 'dist' SCRIPTS_CI_DIR = AIRFLOW_SOURCES_ROOT / 'scripts' / 'ci' DOCKER_CONTEXT_DIR = AIRFLOW_SOURCES_ROOT / 'docker-context-files' CACHE_TMP_FILE_DIR = tempfile.TemporaryDirectory() OUTPUT_LOG = Path(CACHE_TMP_FILE_DIR.name, 'out.log')
117,527
321,097
346
qutebrowser/browser/network/pac.py
94
19
def _parse_proxy_entry(proxy_str): config = [c.strip() for c in proxy_str.split(' ') if c] if not config: raise ParseProxyError("Empty proxy entry") if config[0] == "DIRECT": if len(config) != 1: raise ParseProxyError("Invalid number of parameters for " + "DIRECT") return QNetworkProxy(QNetworkProxy.ProxyType.NoProxy) elif config[0] == "PROXY": if len(config) != 2: raise ParseProxyError("Invalid number of parameters for PROXY") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, host, port) elif config[0] in ["SOCKS", "SOCKS5"]: if len(config) != 2: raise ParseProxyError("Invalid number of parameters for SOCKS") host, port = PACResol
Run scripts/dev/rewrite_enums.py
_parse_proxy_entry
0877fb0d78635692e481c8bde224fac5ad0dd430
qutebrowser
pac.py
13
22
https://github.com/qutebrowser/qutebrowser.git
10
183
0
52
307
Python
{ "docstring": "Parse one proxy string entry, as described in PAC specification.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def _parse_proxy_entry(proxy_str): config = [c.strip() for c in proxy_str.split(' ') if c] if not config: raise ParseProxyError("Empty proxy entry") if config[0] == "DIRECT": if len(config) != 1: raise ParseProxyError("Invalid number of parameters for " + "DIRECT") return QNetworkProxy(QNetworkProxy.ProxyType.NoProxy) elif config[0] == "PROXY": if len(config) != 2: raise ParseProxyError("Invalid number of parameters for PROXY") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, host, port) elif config[0] in ["SOCKS", "SOCKS5"]: if len(config) != 2: raise ParseProxyError("Invalid number of parameters for SOCKS") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.ProxyType.Socks5Proxy, host, port) else: err = "Unknown proxy type: {}" raise ParseProxyError(err.format(config[0]))
38,505
160,133
81
numpy/f2py/tests/test_f2py2e.py
37
19
def test_gen_pyf(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) opath = Path(hello_world_f90).stem + ".pyf" monkeypatch.setattr(sys,
TST: Initialize f2py2e tests of the F2PY CLI (#20668) Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff. More importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.
test_gen_pyf
729ad4f92420231e2a7009b3223c6c7620b8b808
numpy
test_f2py2e.py
13
9
https://github.com/numpy/numpy.git
1
77
0
34
147
Python
{ "docstring": "Ensures that a signature file is generated via the CLI\n CLI :: -h\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
def test_gen_pyf(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) opath = Path(hello_world_f90).stem + ".pyf" monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split()) with util.switchdir(ipath.parent): f2pycli() # Generate wrappers out, _ = capfd.readouterr() assert "Saving signatures to file" in out assert Path(f'{opath}').exists()
12,237
60,671
100
.venv/lib/python3.8/site-packages/pip/_internal/configuration.py
34
7
def _dictionary(self): # type: () -> Dict[str, Any] # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} for variant in OVERRIDE_ORDER:
upd; format
_dictionary
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
configuration.py
11
5
https://github.com/jindongwang/transferlearning.git
2
28
0
28
50
Python
{ "docstring": "A dictionary representing the loaded configuration.\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
def _dictionary(self): # type: () -> Dict[str, Any] # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} for variant in OVERRIDE_ORDER: retval.update(self._config[variant]) return retval
51,204
205,770
99
django/db/models/query.py
12
10
def using(self, alias): return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, )
Refs #33476 -- Reformatted code with Black.
using
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
query.py
11
9
https://github.com/django/django.git
1
51
0
12
75
Python
{ "docstring": "Select the database this RawQuerySet should execute against.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def using(self, alias): return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, )
81,394
275,492
39
keras/optimizers/optimizer_v2/optimizer_v2.py
15
6
def get_weights(self): params = self.we
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
get_weights
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
optimizer_v2.py
7
3
https://github.com/keras-team/keras.git
1
18
0
15
33
Python
{ "docstring": "Returns the current weights of the optimizer.\n\n The weights of an optimizer are its state (ie, variables).\n This function returns the weight values associated with this\n optimizer as a list of Numpy arrays. The first value is always the\n iterations count of the optimizer, followed by the optimizer's state\n variables in the order they were created. The returned list can in turn\n be used to load state into similarly parameterized optimizers.\n\n For example, the RMSprop optimizer for this simple model returns a list of\n three values-- the iteration count, followed by the root-mean-square value\n of the kernel and bias of the single Dense layer:\n\n >>> opt = tf.keras.optimizers.RMSprop()\n >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])\n >>> m.compile(opt, loss='mse')\n >>> data = np.arange(100).reshape(5, 20)\n >>> labels = np.zeros(5)\n >>> results = m.fit(data, labels) # Training.\n >>> len(opt.get_weights())\n 3\n\n Returns:\n Weights values as a list of numpy arrays.\n ", "language": "en", "n_whitespaces": 288, "n_words": 143, "vocab_size": 94 }
def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer.
70,202
244,030
108
mmdet/core/bbox/match_costs/match_cost.py
44
14
def _focal_loss_cost(self, cls_pred, gt_labels): cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + sel
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
_focal_loss_cost
cac356380d505bf15587f07c0529218cc36b9652
mmdetection
match_cost.py
14
8
https://github.com/open-mmlab/mmdetection.git
1
102
0
27
161
Python
{ "docstring": "\n Args:\n cls_pred (Tensor): Predicted classification logits, shape\n (num_query, num_class).\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n\n Returns:\n torch.Tensor: cls_cost value with weight\n ", "language": "en", "n_whitespaces": 92, "n_words": 22, "vocab_size": 20 }
def _focal_loss_cost(self, cls_pred, gt_labels): cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight
50,958
204,887
47
django/db/backends/base/operations.py
15
5
def date_extract_sql(self, lookup_type, field_name): raise NotImplementedError( "subclasses of BaseDatabaseOperations may require a date_extract_sq
Refs #33476 -- Reformatted code with Black.
date_extract_sql
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
operations.py
8
4
https://github.com/django/django.git
1
15
0
15
27
Python
{ "docstring": "\n Given a lookup_type of 'year', 'month', or 'day', return the SQL that\n extracts a value from the given date field field_name.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
def date_extract_sql(self, lookup_type, field_name): raise NotImplementedError( "subclasses of BaseDatabaseOperations may require a date_extract_sql() method" )
72,349
248,557
714
tests/rest/client/test_rooms.py
227
36
def test_threepid_invite_spamcheck(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable(0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), ) # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it # allow everything for now. # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. mock = Mock( return_value=make_awaitable(synapse.module_api.NOT_SPAM), spec=lambda *x: None, ) self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = "teresa@example.com" channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", content={ "id_server": "example.com", "id_access_token": "sometoken", "medium": "email", "address":
Uniformize spam-checker API, part 4: port other spam-checker callbacks to return `Union[Allow, Codes]`. (#12857) Co-authored-by: Brendan Abolivier <babolivier@matrix.org>
test_threepid_invite_spamcheck
a164a46038b0e51142781619db0e6dec8e0c2aaa
synapse
test_rooms.py
13
44
https://github.com/matrix-org/synapse.git
1
243
0
131
424
Python
{ "docstring": "\n Test allowing/blocking threepid invites with a spam-check module.\n\n In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal[\"NOT_SPAM\"]]`.", "language": "en", "n_whitespaces": 38, "n_words": 24, "vocab_size": 23 }
def test_threepid_invite_spamcheck(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable(0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), ) # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it # allow everything for now. # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. mock = Mock( return_value=make_awaitable(synapse.module_api.NOT_SPAM), spec=lambda *x: None, ) self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = "teresa@example.com" channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", content={ "id_server": "example.com", "id_access_token": "sometoken", "medium": "email", "address": email_to_invite, }, access_token=self.tok, ) self.assertEqual(channel.code, 200) # Check that the callback was called with the right params. mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) # Check that the call to send the invite was made. make_invite_mock.assert_called_once() # Now change the return value of the callback to deny any invite and test that # we can't send the invite. mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", content={ "id_server": "example.com", "id_access_token": "sometoken", "medium": "email", "address": email_to_invite, }, access_token=self.tok, ) self.assertEqual(channel.code, 403) # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once()
@pytest.mark.parametrize( "constraint", [ _ArrayLikes, _Callables, _InstancesOf, _NoneConstraint, _RandomStates, _SparseMatrices, ], )
76,095
260,155
79
sklearn/utils/tests/test_param_validation.py
17
14
def test_generate_invalid_param_val_all_valid(constraints): with pytest.raises(NotImplementedError): generate_invalid_param_val(constraints[0], constraints=constraints) @pytest.mark.parametrize( "constraint", [ _ArrayLikes, _Callables, _InstancesOf, _NoneConstraint, _RandomStates,
FIX Param validation: fix generating invalid param when 2 interval constraints (#23513) Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
test_generate_invalid_param_val_all_valid
02cbe01e67165d7d38e5e441cfccd6b57b2207b6
scikit-learn
test_param_validation.py
10
3
https://github.com/scikit-learn/scikit-learn.git
1
25
1
17
78
Python
{ "docstring": "Check that the function raises NotImplementedError when there's no invalid value\n for the constraint.\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 13 }
def test_generate_invalid_param_val_all_valid(constraints): with pytest.raises(NotImplementedError): generate_invalid_param_val(constraints[0], constraints=constraints) @pytest.mark.parametrize( "constraint", [ _ArrayLikes, _Callables, _InstancesOf, _NoneConstraint, _RandomStates, _SparseMatrices, ], )
34,607
149,953
163
freqtrade/freqai/data_drawer.py
47
16
def load_drawer_from_disk(self): exists = Path(se
rehaul of backend data management - increasing performance by holding history in memory, reducing load on the ratelimit by only pinging exchange once per candle. Improve code readability.
load_drawer_from_disk
16b4a5b71ff140f5de31e5d5572f1f193457cf6b
freqtrade
data_drawer.py
16
11
https://github.com/freqtrade/freqtrade.git
3
81
0
41
156
Python
{ "docstring": "\n Locate and load a previously saved data drawer full of all pair model metadata in\n present model folder.\n :returns:\n exists: bool = whether or not the drawer was located\n ", "language": "en", "n_whitespaces": 65, "n_words": 29, "vocab_size": 27 }
def load_drawer_from_disk(self): exists = Path(self.full_path / str('pair_dictionary.json')).resolve().exists() if exists: with open(self.full_path / str('pair_dictionary.json'), "r") as fp: self.pair_dict = json.load(fp) elif not self.follow_mode: logger.info("Could not find existing datadrawer, starting from scratch") else: logger.warning(f'Follower could not find pair_dictionary at {self.full_path} ' 'sending null values back to strategy') return exists
55,356
218,510
658
python3.10.4/Lib/ipaddress.py
157
16
def address_exclude(self, other): if not self._version == other._version: raise TypeError("%s and %s are not of the same version" % ( self, other)) if not isinstance(other, _BaseNetwork):
add python 3.10.4 for windows
address_exclude
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
ipaddress.py
15
32
https://github.com/XX-net/XX-Net.git
11
191
0
77
324
Python
{ "docstring": "Remove an address from a larger block.\n\n For example:\n\n addr1 = ip_network('192.0.2.0/28')\n addr2 = ip_network('192.0.2.1/32')\n list(addr1.address_exclude(addr2)) =\n [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),\n IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]\n\n or IPv6:\n\n addr1 = ip_network('2001:db8::1/32')\n addr2 = ip_network('2001:db8::1/128')\n list(addr1.address_exclude(addr2)) =\n [ip_network('2001:db8::1/128'),\n ip_network('2001:db8::2/127'),\n ip_network('2001:db8::4/126'),\n ip_network('2001:db8::8/125'),\n ...\n ip_network('2001:db8:8000::/33')]\n\n Args:\n other: An IPv4Network or IPv6Network object of the same type.\n\n Returns:\n An iterator of the IPv(4|6)Network objects which is self\n minus other.\n\n Raises:\n TypeError: If self and other are of differing address\n versions, or if other is not a network object.\n ValueError: If other is not completely contained by self.\n\n ", "language": "en", "n_whitespaces": 390, "n_words": 88, "vocab_size": 65 }
def address_exclude(self, other): if not self._version == other._version: raise TypeError("%s and %s are not of the same version" % ( self, other)) if not isinstance(other, _BaseNetwork): raise TypeError("%s is not a network object" % other) if not other.subnet_of(self): raise ValueError('%s not contained in %s' % (other, self)) if other == self: return # Make sure we're comparing the network of other. other = other.__class__('%s/%s' % (other.network_address, other.prefixlen)) s1, s2 = self.subnets() while s1 != other and s2 != other: if other.subnet_of(s1): yield s2 s1, s2 = s1.subnets() elif other.subnet_of(s2): yield s1 s1, s2 = s2.subnets() else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) if s1 == other: yield s2 elif s2 == other: yield s1 else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other))
7,232
39,440
28
recommenders/utils/python_utils.py
12
11
def mutual_information(cooccurrence): with np.errstate(invalid="ignore", divide="ignore"): result = np.log2(cooccurrence.shape[0] * lift(cooccurrence)) return np.array(result)
Add new item similarity metrics for SAR (#1754) * Add mutual information similarity in SAR * Add lexicographers mutual information similarity for SAR * Add cosine similarity for SAR * Add inclusion index for SAR * Typos * Change SARSingleNode to SAR * Convert item similarity matrix to np.array * Update * Update SAR tests * Remove unused imports * Add explanations for new similarity metrics
mutual_information
1d7341e93d1f03387699fb3c6ae0b6c0e464296f
recommenders
python_utils.py
13
4
https://github.com/microsoft/recommenders.git
1
45
0
12
79
Python
{ "docstring": "Helper method to calculate the Mutual Information of a matrix of\n co-occurrences.\n\n Mutual information is a measurement of the amount of information\n explained by the i-th j-th item column vector.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items.\n\n Returns:\n numpy.ndarray: The matrix of mutual information between any two items.\n\n ", "language": "en", "n_whitespaces": 83, "n_words": 51, "vocab_size": 35 }
def mutual_information(cooccurrence): with np.errstate(invalid="ignore", divide="ignore"): result = np.log2(cooccurrence.shape[0] * lift(cooccurrence)) return np.array(result)
3,612
20,908
72
pipenv/patched/notpip/_vendor/typing_extensions.py
49
5
def _is_dunder(name): return len(name) > 4 and name.startswith('__') and name.endswith('__
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_is_dunder
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
typing_extensions.py
10
2
https://github.com/pypa/pipenv.git
3
27
0
44
53
Python
{ "docstring": "Returns True if name is a __dunder_variable_name__.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def _is_dunder(name): return len(name) > 4 and name.startswith('__') and name.endswith('__') # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality # checks, argument expansion etc. are done on the _subs_tre. As a result we # can't provide a get_type_hints function that strips out annotations.
75,949
259,851
615
sklearn/neighbors/_kde.py
133
36
def fit(self, X, y=None, sample_weight=None): algorithm = self._choose_algorithm(self.algorithm, self.metric) if isinstance(self.bandwidth, str): methods_supported = ("scott", "silvermann") if self.bandwidth not in methods_supported: raise ValueError( "When `bandwidth` is a string, it should be one of: " f"{', '.join(methods_supported)}. Got {self.bandwidth!r} instead." ) if self.bandwidth == "scott": self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4)) elif self.bandwidth == "silvermann": self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** ( -1 / (X.shape[1] + 4) ) else: check_scalar( self.bandwidth, "bandwidth", target_type=numbers.Real, min_val=0, include_boundaries="neither", ) self.bandwidth_ = self.bandwidth if self.kernel not in VALID_KERNELS: raise ValueError("invalid kernel: '{0}'".format(self.kernel)) X = self._validate_data(X, order="C", dtype=DTYP
FEA Added Kernel Density bandwidth estimation and test (#22993) Co-authored-by: STOJANOVIC Jovan <jovan.stojanovic@inria.fr> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
fit
dedaa8f25f136e954941d15151bbbc88150789fc
scikit-learn
_kde.py
19
42
https://github.com/scikit-learn/scikit-learn.git
8
278
0
89
454
Python
{ "docstring": "Fit the Kernel Density model on the data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n List of sample weights attached to the data X.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ", "language": "en", "n_whitespaces": 211, "n_words": 70, "vocab_size": 54 }
def fit(self, X, y=None, sample_weight=None): algorithm = self._choose_algorithm(self.algorithm, self.metric) if isinstance(self.bandwidth, str): methods_supported = ("scott", "silvermann") if self.bandwidth not in methods_supported: raise ValueError( "When `bandwidth` is a string, it should be one of: " f"{', '.join(methods_supported)}. Got {self.bandwidth!r} instead." ) if self.bandwidth == "scott": self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4)) elif self.bandwidth == "silvermann": self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** ( -1 / (X.shape[1] + 4) ) else: check_scalar( self.bandwidth, "bandwidth", target_type=numbers.Real, min_val=0, include_boundaries="neither", ) self.bandwidth_ = self.bandwidth if self.kernel not in VALID_KERNELS: raise ValueError("invalid kernel: '{0}'".format(self.kernel)) X = self._validate_data(X, order="C", dtype=DTYPE) if sample_weight is not None: sample_weight = _check_sample_weight( sample_weight, X, DTYPE, only_non_negative=True ) kwargs = self.metric_params if kwargs is None: kwargs = {} self.tree_ = TREE_DICT[algorithm]( X, metric=self.metric, leaf_size=self.leaf_size, sample_weight=sample_weight, **kwargs, ) return self
70,117
243,767
61
src/PIL/ImageMorph.py
18
9
def get_on_pixels(self, image):
Improve exception traceback readability
get_on_pixels
2ae55ccbdad9c842929fb238ea1eb81d1f999024
Pillow
ImageMorph.py
9
5
https://github.com/python-pillow/Pillow.git
2
34
0
18
60
Python
{ "docstring": "Get a list of all turned on pixels in a binary image\n\n Returns a list of tuples of (x,y) coordinates\n of all matching pixels. See :ref:`coordinate-system`.", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 19 }
def get_on_pixels(self, image): if image.mode != "L": msg = "Image mode must be L" raise ValueError(msg) return _imagingmorph.get_on_pixels(image.im.id)
48,420
197,273
109
sympy/parsing/ast_parser.py
21
14
def visit_Num(self, node): if isinstance(node.n, int): return fix_missing_locations(Call(func=Name('Integer', Load(
Inserted the `visit_Num` function back in. This was required to keep SymPy compatible with Python 3.7.
visit_Num
e95d725680aab772037848628471a31f03a13901
sympy
ast_parser.py
17
8
https://github.com/sympy/sympy.git
3
86
0
15
136
Python
{ "docstring": "This function exists for backwards compatibility with Python 3.7.\n It should be removed when SymPy removes support for Python 3.7.", "language": "en", "n_whitespaces": 29, "n_words": 20, "vocab_size": 17 }
def visit_Num(self, node): if isinstance(node.n, int): return fix_missing_locations(Call(func=Name('Integer', Load()), args=[node], keywords=[])) elif isinstance(node.n, float): return fix_missing_locations(Call(func=Name('Float', Load()), args=[node], keywords=[])) return node
56,343
221,318
42
python3.10.4/Lib/cgitb.py
11
8
def enable(display=1, logdir=None, context=5, format="html"): sys.excepthook = Hook(display=display, logdir=logdir, context=context, format=format
add python 3.10.4 for windows
enable
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
cgitb.py
9
3
https://github.com/XX-net/XX-Net.git
1
42
0
11
64
Python
{ "docstring": "Install an exception handler that formats tracebacks as HTML.\n\n The optional argument 'display' can be set to 0 to suppress sending the\n traceback to the browser, and 'logdir' can be set to a directory to cause\n tracebacks to be written to files there.", "language": "en", "n_whitespaces": 51, "n_words": 43, "vocab_size": 31 }
def enable(display=1, logdir=None, context=5, format="html"): sys.excepthook = Hook(display=display, logdir=logdir, context=context, format=format)
13,455
63,660
63
.venv/lib/python3.8/site-packages/pip/_vendor/requests/utils.py
27
7
def parse_list_header(value): result = [] for item in
upd; format
parse_list_header
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
utils.py
15
7
https://github.com/jindongwang/transferlearning.git
3
54
0
23
91
Python
{ "docstring": "Parse lists as described by RFC 2068 Section 2.\n\n In particular, parse comma-separated lists where the elements of\n the list may include quoted-strings. A quoted-string could\n contain a comma. A non-quoted string could have quotes in the\n middle. Quotes are removed automatically after parsing.\n\n It basically works like :func:`parse_set_header` just that items\n may appear multiple times and case sensitivity is preserved.\n\n The return value is a standard :class:`list`:\n\n >>> parse_list_header('token, \"quoted value\"')\n ['token', 'quoted value']\n\n To create a header from the :class:`list` again, use the\n :func:`dump_header` function.\n\n :param value: a string with a list header.\n :return: :class:`list`\n :rtype: list\n ", "language": "en", "n_whitespaces": 147, "n_words": 99, "vocab_size": 82 }
def parse_list_header(value): result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission).
105,628
306,845
46
homeassistant/components/apple_tv/media_player.py
14
8
def media_series_title(self) -> str | None: if self._playing and self._is_feature_available(FeatureName.SeriesName): return self
Improve type hints in apple_tv media player (#77940)
media_series_title
5276d849ec497ccd0cecf3cb6a8dacae4fa6f845
core
media_player.py
9
5
https://github.com/home-assistant/core.git
3
32
0
13
53
Python
{ "docstring": "Title of series of current playing media, TV show only.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def media_series_title(self) -> str | None: if self._playing and self._is_feature_available(FeatureName.SeriesName): return self._playing.series_name return None
57,244
224,209
221
mkdocs/commands/build.py
116
18
def _build_template(name, template, files, config, nav): # Run `pre_template` plugin events. template = config['plugins'].run_event( 'pre_template', template, template_name=name, config=config ) if utils.is_error_template(name): # Force absolute URLs in the nav of error pages and account for the # possibility that the docs root might be different than the server root. # See https://github.com/mkdocs/mkdocs/issues/77. # However, if site_url is not set, assume the docs root and server root # are the same. See https://github.com/mkdocs/mkdocs/issues/1598. base_url = urlsplit(config['site_url'] or '/').path else: base_url = utils.get_relative_url('.', name) context = get_contex
Format code with `black -l100 --skip-string-normalization`
_build_template
dca7cbb43fcd6ea7c677c98ba585395b070d387b
mkdocs
build.py
14
15
https://github.com/mkdocs/mkdocs.git
3
134
0
73
221
Python
{ "docstring": "\n Return rendered output for given template as a string.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
def _build_template(name, template, files, config, nav): # Run `pre_template` plugin events. template = config['plugins'].run_event( 'pre_template', template, template_name=name, config=config ) if utils.is_error_template(name): # Force absolute URLs in the nav of error pages and account for the # possibility that the docs root might be different than the server root. # See https://github.com/mkdocs/mkdocs/issues/77. # However, if site_url is not set, assume the docs root and server root # are the same. See https://github.com/mkdocs/mkdocs/issues/1598. base_url = urlsplit(config['site_url'] or '/').path else: base_url = utils.get_relative_url('.', name) context = get_context(nav, files, config, base_url=base_url) # Run `template_context` plugin events. context = config['plugins'].run_event( 'template_context', context, template_name=name, config=config ) output = template.render(context) # Run `post_template` plugin events. output = config['plugins'].run_event('post_template', output, template_name=name, config=config) return output
48,951
198,467
375
sympy/core/basic.py
88
18
def matches(self, expr, repl_dict=None, old=False): expr = sympify(expr) if not isinstance(expr, self.__class__): return None if repl_dict is None: repl_dict = {} else: repl_dict = repl_dict.copy() if self == expr:
Code cleanup
matches
9d58006fc0a23afcba38f641c9472917c436428a
sympy
basic.py
16
26
https://github.com/sympy/sympy.git
10
164
0
52
260
Python
{ "docstring": "\n Helper method for match() that looks for a match between Wild symbols\n in self and expressions in expr.\n\n Examples\n ========\n\n >>> from sympy import symbols, Wild, Basic\n >>> a, b, c = symbols('a b c')\n >>> x = Wild('x')\n >>> Basic(a + x, x).matches(Basic(a + b, c)) is None\n True\n >>> Basic(a + x, x).matches(Basic(a + b + c, b + c))\n {x_: b + c}\n ", "language": "en", "n_whitespaces": 151, "n_words": 66, "vocab_size": 45 }
def matches(self, expr, repl_dict=None, old=False): expr = sympify(expr) if not isinstance(expr, self.__class__): return None if repl_dict is None: repl_dict = {} else: repl_dict = repl_dict.copy() if self == expr: return repl_dict if len(self.args) != len(expr.args): return None d = repl_dict # already a copy for arg, other_arg in zip(self.args, expr.args): if arg == other_arg: continue if arg.is_Relational: try: d = arg.xreplace(d).matches(other_arg, d, old=old) except TypeError: # Should be InvalidComparisonError when introduced d = None else: d = arg.xreplace(d).matches(other_arg, d, old=old) if d is None: return None return d
30,006
133,393
376
python/ray/util/sgd/torch/worker_group.py
85
26
def _create_placement_group(self, num_workers): pg = get_current_placement_group() if pg is None: bundle = {"CPU": self._num_cpus_per_worker, "GPU": int(self._use_gpu)} bundles = [bundle] * num_workers pg = ray.util.placement_group(bundles, strategy="SPREAD") logger.debug("Waiting for placement group to start.") ready, _ = ray.wait([pg.ready()], timeout=SGD_PLACEMENT_GROUP_TIMEOUT_S) if ready: logger.debug("Placement group has started.")
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
_create_placement_group
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
worker_group.py
16
21
https://github.com/ray-project/ray.git
3
121
0
67
212
Python
{ "docstring": "Creates a placement group for the workers.\n\n If this worker is already in a placement group then a new one will\n not be created. This is primarily for when Tune is the upstream and\n will allocate resources for SGD workers.\n\n If this worker is not in a placement group, a new one will be created\n and set. The placement group will have a single bundle for each worker\n and use the SPREAD strategy for an even distribution.\n ", "language": "en", "n_whitespaces": 126, "n_words": 77, "vocab_size": 43 }
def _create_placement_group(self, num_workers): pg = get_current_placement_group() if pg is None: bundle = {"CPU": self._num_cpus_per_worker, "GPU": int(self._use_gpu)} bundles = [bundle] * num_workers pg = ray.util.placement_group(bundles, strategy="SPREAD") logger.debug("Waiting for placement group to start.") ready, _ = ray.wait([pg.ready()], timeout=SGD_PLACEMENT_GROUP_TIMEOUT_S) if ready: logger.debug("Placement group has started.") else: raise TimeoutError( "Placement group creation timed out. Make sure " "your cluster either has enough resources or use " "an autoscaling cluster. Current resources " "available: {}, resources requested by the " "placement group: {}".format( ray.available_resources(), pg.bundle_specs ) ) self._worker_placement_group = pg
14,679
67,953
30
erpnext/stock/report/warehouse_wise_item_balance_age_and_value/warehouse_wise_item_balance_age_and_value.py
48
16
def get_warehouse_list(filters): from frappe.core.doctype.user_permission.user_permission import get_permitted_documents condition = "" user_permitted_warehouse = get_permitted_documents("Warehouse") value = () if user_permitted_warehouse: condition = "and name in %s" value = set(
style: format code with black
get_warehouse_list
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
warehouse_wise_item_balance_age_and_value.py
12
20
https://github.com/frappe/erpnext.git
4
87
0
33
149
Python
{ "docstring": "select name\n\t\tfrom `tabWarehouse` where is_group = 0\n\t\t{condition}", "language": "en", "n_whitespaces": 6, "n_words": 9, "vocab_size": 9 }
def get_warehouse_list(filters): from frappe.core.doctype.user_permission.user_permission import get_permitted_documents condition = "" user_permitted_warehouse = get_permitted_documents("Warehouse") value = () if user_permitted_warehouse: condition = "and name in %s" value = set(user_permitted_warehouse) elif not user_permitted_warehouse and filters.get("warehouse"): condition = "and name = %s" value = filters.get("warehouse") return frappe.db.sql( .format( condition=condition ), value, as_dict=1, )
8,684
45,743
94
airflow/models/mappedoperator.py
30
10
def unmap(self) -> "BaseOperator": dag = self.dag if not dag:
More explicit mapped argument validation (#21933) * More explicit mapped argument validation Instead of always using MagicMock to validate mapped arguments, this implements a more sophisticated protocol that allows an operator to implement a 'validate_mapped_arguments' to provide custom validation logic. If an operator just wants to use __init__ for validation, however, they can set a flag 'mapped_arguments_validated_by_init' to get the behavior easily. (This does *not* use MagicMock, however, since any custom validation logic should be able to handle those on its own). The 'validate_mapped_arguments' flag is currently only set on PythonOperator. It can likely be used on a lot more operators down the road. * Add flag to distinguish a validation-only init There's just too much magic during a task's initialization that tries to add it into the dependency graph. This flag is needed to work around all that, I think.
unmap
b65e52205a7045eb08d471289b85abda587442b7
airflow
mappedoperator.py
10
9
https://github.com/apache/airflow.git
3
57
0
24
101
Python
{ "docstring": "Get the \"normal\" Operator after applying the current mapping.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def unmap(self) -> "BaseOperator": dag = self.dag if not dag: raise RuntimeError("Cannot unmap a task without a DAG") dag._remove_task(self.task_id) if isinstance(self.operator_class, str): raise RuntimeError("Cannot unmap a deserialized operator") return self.operator_class(**self._get_unmap_kwargs())
121,034
337,338
35
src/accelerate/test_utils/testing.py
12
5
def require_tensorflow(test_case): if not is_tensorflow_available(): return unittest.skip(
Add logging capabilities (#293) Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> - Added experiment tracking API, and support for Weights and Biases, TensorBoard, and CometML + Tests - Added `tensorflow` to a new dependency list to be used during tests - Added three new functions in `Accelerator` to interact with the API
require_tensorflow
5668270de74a09e5bff15891054f73ddbb1176ac
accelerate
testing.py
11
5
https://github.com/huggingface/accelerate.git
2
26
0
11
49
Python
{ "docstring": "\n Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n installed\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 15 }
def require_tensorflow(test_case): if not is_tensorflow_available(): return unittest.skip("test requires TensorFlow")(test_case) else: return test_case
56,433
221,571
595
python3.10.4/Lib/concurrent/futures/_base.py
125
36
def as_completed(fs, timeout=None): if timeout is not None: end_time = timeout + time.monotonic() fs = set(fs) total_futures = len(fs) with _AcquireFutures(fs): finished = set( f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = fs - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) finished = list(finished) try: yield from _yield_finished_futures(finished, waiter, ref_collect=(fs,)) while pending: if timeout is None: wait_timeout = None else: wait_timeout = end_time - time.monotonic() if wait_timeout < 0: raise TimeoutError( '%d (of %d) futures unfinished' % ( len(pending), total_futures)) waiter.event.wait(wait_timeout) with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] wai
add python 3.10.4 for windows
as_completed
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_base.py
20
36
https://github.com/XX-net/XX-Net.git
9
212
0
81
365
Python
{ "docstring": "An iterator over the given futures that yields each as it completes.\n\n Args:\n fs: The sequence of Futures (possibly created by different Executors) to\n iterate over.\n timeout: The maximum number of seconds to wait. If None, then there\n is no limit on the wait time.\n\n Returns:\n An iterator that yields the given Futures as they complete (finished or\n cancelled). If any given Futures are duplicated, they will be returned\n once.\n\n Raises:\n TimeoutError: If the entire result iterator could not be generated\n before the given timeout.\n ", "language": "en", "n_whitespaces": 172, "n_words": 85, "vocab_size": 63 }
def as_completed(fs, timeout=None): if timeout is not None: end_time = timeout + time.monotonic() fs = set(fs) total_futures = len(fs) with _AcquireFutures(fs): finished = set( f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = fs - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) finished = list(finished) try: yield from _yield_finished_futures(finished, waiter, ref_collect=(fs,)) while pending: if timeout is None: wait_timeout = None else: wait_timeout = end_time - time.monotonic() if wait_timeout < 0: raise TimeoutError( '%d (of %d) futures unfinished' % ( len(pending), total_futures)) waiter.event.wait(wait_timeout) with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() # reverse to keep finishing order finished.reverse() yield from _yield_finished_futures(finished, waiter, ref_collect=(fs, pending)) finally: # Remove waiter from unfinished futures for f in fs: with f._condition: f._waiters.remove(waiter) DoneAndNotDoneFutures = collections.namedtuple( 'DoneAndNotDoneFutures', 'done not_done')
35,150
151,842
139
freqtrade/freqai/RL/BaseReinforcementLearningModel.py
27
15
def pack_env_dict(self) -> Dict[str, Any]: env_info = {"window_size": self.CONV_WIDTH, "reward_kwargs": self.reward_params, "config": self.config, "live": self.live} if self.data_provider: env_info["fee"] = self.data_pr
use a dictionary to make code more readable
pack_env_dict
7b4abd5ef50f3c6f84c6604fc1f79ff4b92c2575
freqtrade
BaseReinforcementLearningModel.py
15
12
https://github.com/freqtrade/freqtrade.git
2
74
0
25
122
Python
{ "docstring": "\n Create dictionary of environment arguments\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
def pack_env_dict(self) -> Dict[str, Any]: env_info = {"window_size": self.CONV_WIDTH, "reward_kwargs": self.reward_params, "config": self.config, "live": self.live} if self.data_provider: env_info["fee"] = self.data_provider._exchange \ .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore return env_info
31,780
139,820
734
rllib/evaluation/rollout_worker.py
184
41
def sample(self) -> SampleBatchType: if self.fake_sampler and self.last_batch is not None: return self.last_batch elif self.input_reader is None: raise ValueError( "RolloutWorker has no `input_reader` object! " "Cannot call `sample()`. You can try setting " "`create_env_on_driver` to True." ) if log_once("sample_start"): logger.info( "Generating sample batch of size {}".format( self.rollout_fragment_length ) ) batches = [self.input_reader.next()] steps_so_far = ( batches[0].count if self.count_steps_by == "env_steps" else batches[0].agent_steps() ) # In truncate_episodes mode, never pull more than 1 batch per env. # This avoids over-running the target batch size. if self.batch_mode == "truncate_episodes": max_batches = self.num_envs else: max_batches = float("inf") while ( steps_so_far < self.rollout_fragment_length and len(batches) < max_batches ): batch = self.input_reader.next() steps_so_far += ( batch.count if self.count_steps_by == "env_steps" else batch.agent_steps() ) batches.append(batch) batch = batches[0].concat_samples(batches) if len(batches) > 1 else batches[0] self.callbacks.on_sample_end(worker=self, samples=batch) # Always do writes prior to compression for consistency and to allow # for better compression inside the writer. self.output_writer.write(batch) # Do off-policy estimation, if needed. if self.reward_estimators: for sub_batch in batch.split_by_episode(): for estimator in self.reward_estimators: estimator.process(sub_batch) if log_once("sample_end"): logger.info("Completed sample batch:\n\n{}\n".format(summarize(batch))) if self.compress_observations: batch.compress(bulk=self.compress_observations == "bulk") if self.fake_sampler: self.last_batch = batch
[RLlib] Agents to algos: DQN w/o Apex and R2D2, DDPG/TD3, SAC, SlateQ, QMIX, PG, Bandits (#24896)
sample
3815e52a61b6afe44b883d7d745fa00b599f66ca
ray
rollout_worker.py
13
66
https://github.com/ray-project/ray.git
17
284
0
119
481
Python
{ "docstring": "Returns a batch of experience sampled from this worker.\n\n This method must be implemented by subclasses.\n\n Returns:\n A columnar batch of experiences (e.g., tensors).\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker\n >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTFPolicy\n >>> worker = RolloutWorker( # doctest: +SKIP\n ... env_creator=lambda _: gym.make(\"CartPole-v0\"), # doctest: +SKIP\n ... policy_spec=PGTFPolicy) # doctest: +SKIP\n >>> print(worker.sample()) # doctest: +SKIP\n SampleBatch({\"obs\": [...], \"action\": [...], ...})\n ", "language": "en", "n_whitespaces": 198, "n_words": 67, "vocab_size": 46 }
def sample(self) -> SampleBatchType: if self.fake_sampler and self.last_batch is not None: return self.last_batch elif self.input_reader is None: raise ValueError( "RolloutWorker has no `input_reader` object! " "Cannot call `sample()`. You can try setting " "`create_env_on_driver` to True." ) if log_once("sample_start"): logger.info( "Generating sample batch of size {}".format( self.rollout_fragment_length ) ) batches = [self.input_reader.next()] steps_so_far = ( batches[0].count if self.count_steps_by == "env_steps" else batches[0].agent_steps() ) # In truncate_episodes mode, never pull more than 1 batch per env. # This avoids over-running the target batch size. if self.batch_mode == "truncate_episodes": max_batches = self.num_envs else: max_batches = float("inf") while ( steps_so_far < self.rollout_fragment_length and len(batches) < max_batches ): batch = self.input_reader.next() steps_so_far += ( batch.count if self.count_steps_by == "env_steps" else batch.agent_steps() ) batches.append(batch) batch = batches[0].concat_samples(batches) if len(batches) > 1 else batches[0] self.callbacks.on_sample_end(worker=self, samples=batch) # Always do writes prior to compression for consistency and to allow # for better compression inside the writer. self.output_writer.write(batch) # Do off-policy estimation, if needed. if self.reward_estimators: for sub_batch in batch.split_by_episode(): for estimator in self.reward_estimators: estimator.process(sub_batch) if log_once("sample_end"): logger.info("Completed sample batch:\n\n{}\n".format(summarize(batch))) if self.compress_observations: batch.compress(bulk=self.compress_observations == "bulk") if self.fake_sampler: self.last_batch = batch return batch
51,828
206,994
152
tests/admin_changelist/tests.py
47
26
def test_no_duplicates_for_non_unique_related_object_in_list_filter(self): parent = Parent.objects.create(name="Mary") # Two children with the same name Child.objects.create(parent=parent, name="Daniel") Child.objects.create(parent=parent, name="Daniel") m = ParentAdmin(Parent, custom_site)
Refs #33476 -- Reformatted code with Black.
test_no_duplicates_for_non_unique_related_object_in_list_filter
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
12
12
https://github.com/django/django.git
1
136
0
38
229
Python
{ "docstring": "\n Regressions tests for #15819: If a field listed in list_filters is a\n non-unique related object, results shouldn't appear more than once.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 20 }
def test_no_duplicates_for_non_unique_related_object_in_list_filter(self): parent = Parent.objects.create(name="Mary") # Two children with the same name Child.objects.create(parent=parent, name="Daniel") Child.objects.create(parent=parent, name="Daniel") m = ParentAdmin(Parent, custom_site) request = self.factory.get("/parent/", data={"child__name": "Daniel"}) request.user = self.superuser cl = m.get_changelist_instance(request) # Exists() is applied. self.assertEqual(cl.queryset.count(), 1) # Queryset must be deletable. self.assertIs(cl.queryset.query.distinct, False) cl.queryset.delete() self.assertEqual(cl.queryset.count(), 0)
5,158
28,140
48
saleor/plugins/webhook/utils.py
11
10
def get_current_tax_app() -> Optional[App]: return ( App.objects.order_by("pk") .for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES)
Add support for calculating taxes in Saleor Apps (#9526) * Squash previouse PR in taxes by Sync webhooks * Adjust incoming communication form tax app in order calculation * Change return type for base_checkout_total to Money * Fix cratign order lines for checkout lines * Remove not needed args * Fix order discount recalculation * Fix order discounts events amount calculation * Fix order calculation tests * Use base price in checkout line serializer * Use base shipping price in checkout tax payload * Use base total in checkout tax payload * Tax app interface should recive tax rate as decimal * Tax app interface should recive tax rate as decimal * Clear migrations * Add descriptions to webhook events enums * Update changelog * Drop not required changes from plugin interface * Fix review remarks
get_current_tax_app
3e06a6462559498c6ad09c0591e648a7943ac0c6
saleor
utils.py
15
8
https://github.com/saleor/saleor.git
1
39
0
11
67
Python
{ "docstring": "Return currently used tax app or None, if there aren't any.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def get_current_tax_app() -> Optional[App]: return ( App.objects.order_by("pk") .for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES) .for_event_type(WebhookEventSyncType.ORDER_CALCULATE_TAXES) .last() )
73,173
249,857
315
tests/util/caches/test_deferred_cache.py
140
20
def test_callbacks(self) -> None: cache: DeferredCache[str, int] = DeferredCache("test") callbacks = set() # start with an entry, with a callba
Add missing type hints to test.util.caches (#14529)
test_callbacks
4ae967cf6308e80b03da749f0cbaed36988e235e
synapse
test_deferred_cache.py
13
16
https://github.com/matrix-org/synapse.git
1
171
0
100
300
Python
{ "docstring": "Invalidation callbacks are called at the right time", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_callbacks(self) -> None: cache: DeferredCache[str, int] = DeferredCache("test") callbacks = set() # start with an entry, with a callback cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) # now replace that entry with a pending result origin_d: "defer.Deferred[int]" = defer.Deferred() set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) # ... and also make a get request get_d = cache.get("k1", callback=lambda: callbacks.add("get")) # we don't expect the invalidation callback for the original value to have # been called yet, even though get() will now return a different result. # I'm not sure if that is by design or not. self.assertEqual(callbacks, set()) # now fire off all the deferreds origin_d.callback(20) self.assertEqual(self.successResultOf(set_d), 20) self.assertEqual(self.successResultOf(get_d), 20) # now the original invalidation callback should have been called, but none of # the others self.assertEqual(callbacks, {"prefill"}) callbacks.clear() # another update should invalidate both the previous results cache.prefill("k1", 30) self.assertEqual(callbacks, {"set", "get"})
50,845
204,710
68
django/core/management/sql.py
18
11
def sql_flush(style, connection, reset_sequences=True, allow_cascade=False): tables = connection.in
Refs #33476 -- Reformatted code with Black.
sql_flush
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
sql.py
9
10
https://github.com/django/django.git
1
52
0
17
76
Python
{ "docstring": "\n Return a list of the SQL statements used to flush the database.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 11 }
def sql_flush(style, connection, reset_sequences=True, allow_cascade=False): tables = connection.introspection.django_table_names( only_existing=True, include_views=False ) return connection.ops.sql_flush( style, tables, reset_sequences=reset_sequences, allow_cascade=allow_cascade, )
69,920
242,777
316
src/PIL/ImageFilter.py
88
18
def generate(cls, size, callback, channels=3, target_mode=None): size_1d, size_2d, size_3d = cls._check_size(size) if channels not in (3, 4): raise ValueError("Only 3 or 4 output channels are supported") table = [0] * (size_1d * size_2d * size_3d * channels) idx_out = 0 for b in range(size_3d)
Variable in function should be snake_case
generate
d3c9a6504e84f87379554b6b671a1fb6c66a449e
Pillow
ImageFilter.py
17
20
https://github.com/python-pillow/Pillow.git
5
151
0
61
222
Python
{ "docstring": "Generates new LUT using provided callback.\n\n :param size: Size of the table. Passed to the constructor.\n :param callback: Function with three parameters which correspond\n three color channels. Will be called ``size**3``\n times with values from 0.0 to 1.0 and should return\n a tuple with ``channels`` elements.\n :param channels: The number of channels which should return callback.\n :param target_mode: Passed to the constructor of the resulting\n lookup table.\n ", "language": "en", "n_whitespaces": 201, "n_words": 67, "vocab_size": 48 }
def generate(cls, size, callback, channels=3, target_mode=None): size_1d, size_2d, size_3d = cls._check_size(size) if channels not in (3, 4): raise ValueError("Only 3 or 4 output channels are supported") table = [0] * (size_1d * size_2d * size_3d * channels) idx_out = 0 for b in range(size_3d): for g in range(size_2d): for r in range(size_1d): table[idx_out : idx_out + channels] = callback( r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1) ) idx_out += channels return cls( (size_1d, size_2d, size_3d), table, channels=channels, target_mode=target_mode, _copy_table=False, )
50,951
204,878
46
django/db/backends/base/operations.py
14
4
def regex_lookup(self, lookup_type): raise NotImplementedError( "subclasses of BaseDatabaseOperations may require a regex_lookup() method"
Refs #33476 -- Reformatted code with Black.
regex_lookup
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
operations.py
8
4
https://github.com/django/django.git
1
13
0
14
25
Python
{ "docstring": "\n Return the string to use in a query when performing regular expression\n lookups (using \"regex\" or \"iregex\"). It should contain a '%s'\n placeholder for the column being searched against.\n\n If the feature is not supported (or part of it is not supported), raise\n NotImplementedError.\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 39 }
def regex_lookup(self, lookup_type): raise NotImplementedError( "subclasses of BaseDatabaseOperations may require a regex_lookup() method" )
14,712
68,062
35
erpnext/telephony/doctype/call_log/call_log.py
55
32
def link_existing_conversations(doc, state): if doc.doctype != "Contact": return try: numbers = [d.phone for d in doc.phone_nos] for number in numbers: number = strip_number(number) if not number: continue logs = frappe.db.sql_list( , dict(phone_number="%{}".format(number), docname=doc.name, doctype=doc.doctype), ) for log in logs: call_log = frappe.get_doc("Call Log", log) call_log.add_link(link_
style: format code with black
link_existing_conversations
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
call_log.py
18
33
https://github.com/frappe/erpnext.git
7
142
0
46
232
Python
{ "docstring": "\n\tCalled from hooks on creation of Contact or Lead to link all the existing conversations.\n\t\n\t\t\t\tSELECT cl.name FROM `tabCall Log` cl\n\t\t\t\tLEFT JOIN `tabDynamic Link` dl\n\t\t\t\tON cl.name = dl.parent\n\t\t\t\tWHERE (cl.`from` like %(phone_number)s or cl.`to` like %(phone_number)s)\n\t\t\t\tGROUP BY cl.name\n\t\t\t\tHAVING SUM(\n\t\t\t\t\tCASE\n\t\t\t\t\t\tWHEN dl.link_doctype = %(doctype)s AND dl.link_name = %(docname)s\n\t\t\t\t\t\tTHEN 1\n\t\t\t\t\t\tELSE 0\n\t\t\t\t\tEND\n\t\t\t\t)=0\n\t\t\t", "language": "en", "n_whitespaces": 45, "n_words": 58, "vocab_size": 52 }
def link_existing_conversations(doc, state): if doc.doctype != "Contact": return try: numbers = [d.phone for d in doc.phone_nos] for number in numbers: number = strip_number(number) if not number: continue logs = frappe.db.sql_list( , dict(phone_number="%{}".format(number), docname=doc.name, doctype=doc.doctype), ) for log in logs: call_log = frappe.get_doc("Call Log", log) call_log.add_link(link_type=doc.doctype, link_name=doc.name) call_log.save(ignore_permissions=True) frappe.db.commit() except Exception: frappe.log_error(title=_("Error during caller information update"))
89,517
290,401
125
homeassistant/components/media_player/__init__.py
31
12
async def async_volume_up(self) -> None: if hasattr(self, "volume_up"): await
Update mypy to 0.990 (#81783) * Update mypy to 0.990 * Remove type ignore - overriding attr with property (13475) * Remove type ignores - hasattr (13544) * Adjust type ignore - assignment (13549) * New error code - type-abstract (13785) * Disable annotation-unchecked (13851)
async_volume_up
0c8eeaa6436b04ba6da46bccab8b11523f314d9b
core
__init__.py
14
14
https://github.com/home-assistant/core.git
5
70
0
26
112
Python
{ "docstring": "Turn volume up for media player.\n\n This method is a coroutine.\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 11 }
async def async_volume_up(self) -> None: if hasattr(self, "volume_up"): await self.hass.async_add_executor_job(self.volume_up) return if ( self.volume_level is not None and self.volume_level < 1 and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET ): await self.async_set_volume_level(min(1, self.volume_level + 0.1))
39,981
167,374
62
pandas/io/pytables.py
16
6
def infer_axes(self) -> bool: s = self.storable if s is None: return False self.get_attrs() return True
TYP: some return annotations in pytables.py (#47512)
infer_axes
7d2f9b8d59908fbf57c6453bc41891efbfe981a6
pandas
pytables.py
7
10
https://github.com/pandas-dev/pandas.git
2
27
0
14
47
Python
{ "docstring": "\n infer the axes of my storer\n return a boolean indicating if we have a valid storer or not\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
def infer_axes(self) -> bool: s = self.storable if s is None: return False self.get_attrs() return True
35,351
153,296
130
modin/core/dataframe/pandas/dataframe/dataframe.py
36
9
def _validate_set_axis(self, new_labels, old_labels): new_labels = ensure_index(new_labels) old_len = len(old_labels) new_len = len(new_labels) if old_len != new_len: raise ValueError( f"Length mismatch: Expected axis has {old_len} elements, " + "new values have {new_len} elements" ) return new_labels
REFACTOR-#3900: add flake8-no-implicit-concat plugin and refactor flake8 error codes (#3901) Signed-off-by: jeffreykennethli <jkli@ponder.io>
_validate_set_axis
e5e9634357e60925a5a70e56a1d4882d269f533a
modin
dataframe.py
12
10
https://github.com/modin-project/modin.git
2
43
0
32
77
Python
{ "docstring": "\n Validate the possibility of replacement of old labels with the new labels.\n\n Parameters\n ----------\n new_labels : list-like\n The labels to replace with.\n old_labels : list-like\n The labels to replace.\n\n Returns\n -------\n list-like\n The validated labels.\n ", "language": "en", "n_whitespaces": 132, "n_words": 35, "vocab_size": 24 }
def _validate_set_axis(self, new_labels, old_labels): new_labels = ensure_index(new_labels) old_len = len(old_labels) new_len = len(new_labels) if old_len != new_len: raise ValueError( f"Length mismatch: Expected axis has {old_len} elements, " + "new values have {new_len} elements" ) return new_labels
18,225
87,117
55
src/sentry/snuba/discover.py
32
8
def transform_data(result, translated_columns, query_builder) -> EventsResponse: final_result: EventsResponse = {"data": result["data"], "meta": result["meta"]} for col in final_result["meta"]: # Translate back column names that were converted to snuba format col["name"] = translated_columns.get(col["name"], col["name"])
feat(discover): Only transform when ordering project (#39468) - This updates the querybuilder with a orderby resolver so we can implement more custom orderbys(orderbies?) in the future - This changes the project field to just select the project_id only, which results in needing a new post-processing capability to the querybuilder - This is done via the `value_resolver_map` and the `meta_resolver_map` - Removed the snuba_filter param from transform_results since we no longer use it - Removes the old discover 1 query since it shouldn't be supported and no longer is being used - Needed to update ds code too since it relied on the old project behaviour but doesn't use `discover.query`
transform_data
bf416f7ad23d7537a84c9727cfe1c0a7effd27bb
sentry
discover.py
12
13
https://github.com/getsentry/sentry.git
3
80
0
31
102
Python
{ "docstring": "\n Transform internal names back to the public schema ones.\n\n When getting timeseries results via rollup, this function will\n zerofill the output results.\n ", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 21 }
def transform_data(result, translated_columns, query_builder) -> EventsResponse: final_result: EventsResponse = {"data": result["data"], "meta": result["meta"]} for col in final_result["meta"]: # Translate back column names that were converted to snuba format col["name"] = translated_columns.get(col["name"], col["name"])
48,627
197,550
81
sympy/plotting/plot.py
43
17
def plot_contour(*args, show=True, **kwargs): args = list(map(sympify, args)) plot_expr = check_arguments(args, 1, 2) series = [ContourSeries(*arg) for arg
Improve documentation
plot_contour
eb20cbe9b89917786a10d50b785b4f21230f04be
sympy
plot.py
10
10
https://github.com/sympy/sympy.git
4
86
0
36
138
Python
{ "docstring": "\n Draws contour plot of a function\n\n Usage\n =====\n\n Single plot\n\n ``plot_contour(expr, range_x, range_y, **kwargs)``\n\n If the ranges are not specified, then a default range of (-10, 10) is used.\n\n Multiple plot with the same range.\n\n ``plot_contour(expr1, expr2, range_x, range_y, **kwargs)``\n\n If the ranges are not specified, then a default range of (-10, 10) is used.\n\n Multiple plots with different ranges.\n\n ``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``\n\n Ranges have to be specified for every expression.\n\n Default range may change in the future if a more advanced default range\n detection algorithm is implemented.\n\n Arguments\n =========\n\n expr : Expression representing the function along x.\n\n range_x : (:class:`Symbol`, float, float)\n A 3-tuple denoting the range of the x variable, e.g. (x, 0, 5).\n\n range_y : (:class:`Symbol`, float, float)\n A 3-tuple denoting the range of the y variable, e.g. (y, 0, 5).\n\n Keyword Arguments\n =================\n\n Arguments for ``ContourSeries`` class:\n\n nb_of_points_x : int\n The x range is sampled uniformly at ``nb_of_points_x`` of points.\n\n nb_of_points_y : int\n The y range is sampled uniformly at ``nb_of_points_y`` of points.\n\n Aesthetics:\n\n surface_color : Function which returns a float\n Specifies the color for the surface of the plot. See\n :class:`sympy.plotting.Plot` for more details.\n\n If there are multiple plots, then the same series arguments are applied to\n all the plots. If you want to set these options separately, you can index\n the returned ``Plot`` object and set it.\n\n Arguments for ``Plot`` class:\n\n title : str\n Title of the plot.\n\n size : (float, float), optional\n A tuple in the form (width, height) in inches to specify the size of\n the overall figure. The default value is set to ``None``, meaning\n the size will be set by the default backend.\n\n See Also\n ========\n\n Plot, ContourSeries\n\n ", "language": "en", "n_whitespaces": 462, "n_words": 283, "vocab_size": 155 }
def plot_contour(*args, show=True, **kwargs): args = list(map(sympify, args)) plot_expr = check_arguments(args, 1, 2) series = [ContourSeries(*arg) for arg in plot_expr] plot_contours = Plot(*series, **kwargs) if len(plot_expr[0].free_symbols) > 2: raise ValueError('Contour Plot cannot Plot for more than two variables.') if show: plot_contours.show() return plot_contours
56,263
221,193
73
python3.10.4/Lib/bz2.py
31
5
def peek(self, n=0): self._check_can_read() # Relies on the u
add python 3.10.4 for windows
peek
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
bz2.py
8
3
https://github.com/XX-net/XX-Net.git
1
24
0
26
44
Python
{ "docstring": "Return buffered data without advancing the file position.\n\n Always returns at least one byte of data, unless at EOF.\n The exact number of bytes returned is unspecified.\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 25 }
def peek(self, n=0): self._check_can_read() # Relies on the undocumented fact that BufferedReader.peek() # always returns at least one byte (except at EOF), independent # of the value of n return self._buffer.peek(n)
20,701
101,282
55
lib/training/cache.py
12
5
def cache_full(self) -> bool: if self._cache_
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
cache_full
2beceffad9b15c1fd78f06b9b272563321c5a41e
faceswap
cache.py
9
7
https://github.com/deepfakes/faceswap.git
2
35
0
10
64
Python
{ "docstring": "bool: ``True`` if the cache has been fully populated. ``False`` if there are items still\n to be cached. ", "language": "en", "n_whitespaces": 25, "n_words": 18, "vocab_size": 17 }
def cache_full(self) -> bool: if self._cache_info["cache_full"]: return self._cache_info["cache_full"] with self._lock: return self._cache_info["cache_full"]
78,245
265,914
102
netbox/netbox/views/generic/base.py
29
8
def get_queryset(self, request): if self.queryset is None: raise ImproperlyConfigured( f"{self.__class__.__name__} does not define a queryset. Set queryset on the class or " f"override its get_queryset() method."
Closes #10739: Introduce get_queryset() method on generic views
get_queryset
b2e2e3be35f3922ecee945b97279c50725c0b7fa
netbox
base.py
14
7
https://github.com/netbox-community/netbox.git
2
31
0
29
63
Python
{ "docstring": "\n Return the base queryset for the view. By default, this returns self.queryset.all().\n\n Args:\n request: The current request\n ", "language": "en", "n_whitespaces": 50, "n_words": 17, "vocab_size": 16 }
def get_queryset(self, request): if self.queryset is None: raise ImproperlyConfigured( f"{self.__class__.__name__} does not define a queryset. Set queryset on the class or " f"override its get_queryset() method." ) return self.queryset.all()
118,085
322,190
117
paddlenlp/taskflow/knowledge_mining.py
46
9
def _preprocess(self, inputs): inputs = self._check_input_text(inputs) self._max_cls_len = 5 num_workers = self.kwargs[ 'num_workers'] if 'num_workers' in self.kwargs else 0 lazy_load = self.kwargs[ 'lazy_load'] if 'lazy_load' in self.kwargs else False # Prompt template: input_text + "是" + "[MASK]" * cls_seq_length prompt_template = ["是"] + ["[MASK]"] * self._max_cls_len
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com> Co-authored-by: tianxin <tianxin04@baidu.com>
_preprocess
621357338437ee420eabbbf5ab19065bc85e73a5
PaddleNLP
knowledge_mining.py
10
26
https://github.com/PaddlePaddle/PaddleNLP.git
3
168
0
33
115
Python
{ "docstring": "\n Create the dataset and dataloader for the predict.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 7 }
def _preprocess(self, inputs): inputs = self._check_input_text(inputs) self._max_cls_len = 5 num_workers = self.kwargs[ 'num_workers'] if 'num_workers' in self.kwargs else 0 lazy_load = self.kwargs[ 'lazy_load'] if 'lazy_load' in self.kwargs else False # Prompt template: input_text + "是" + "[MASK]" * cls_seq_length prompt_template = ["是"] + ["[MASK]"] * self._max_cls_len
54,151
215,757
257
salt/modules/consul.py
86
16
def session_destroy(consul_url=None, token=None, session=None, **kwargs): ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error("No Consul URL found.") ret["me
[merge jam] Master port 49261 - consul modules (#58101) * add consul states and acl function present/absent * add consul to states doc index * refact/fix consul states * fix doc, fix states * fix name parameter for acl_changes * fixing pylint errors * small changes after review by @rallytime * fix header count * Update consul.py * fix acl_exists description, fix when both id and name are missing * Adding some tests for consul module and consul state module. Some additional fixes in the consul module. * Fixing tests. * Fixing failing tests on Windows. * Adding changelog. * Adding some tests for consul module and consul state module. Some additional fixes in the consul module. * moving tests to pytest. * manual black changes. * One more manual black change. * fixing formatting. Adding versionadded for state module. Co-authored-by: Rémi Jouannet <remi.jouannet@outscale.com> Co-authored-by: Mike Place <mp@saltstack.com> Co-authored-by: Daniel Wozniak <dwozniak@saltstack.com> Co-authored-by: Wayne Werner <wwerner@vmware.com>
session_destroy
fb825aa760fa0585a2c8fdafc6e62be8aec8cecf
salt
consul.py
12
29
https://github.com/saltstack/salt.git
6
160
0
56
283
Python
{ "docstring": "\n Destroy session\n\n :param consul_url: The Consul server URL.\n :param session: The ID of the session to destroy.\n :param dc: By default, the datacenter of the agent is queried;\n however, the dc can be provided using the \"dc\" parameter.\n :return: Boolean & message of success or failure.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716'\n\n ", "language": "en", "n_whitespaces": 101, "n_words": 55, "vocab_size": 45 }
def session_destroy(consul_url=None, token=None, session=None, **kwargs): ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error("No Consul URL found.") ret["message"] = "No Consul URL found." ret["res"] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if "dc" in kwargs: query_params["dc"] = kwargs["dc"] function = "session/destroy/{}".format(session) res = _query( consul_url=consul_url, function=function, token=token, method="PUT", query_params=query_params, ) if res["res"]: ret["res"] = True ret["message"] = "Destroyed Session {}.".format(session) else: ret["res"] = False ret["message"] = "Unable to destroy session {}.".format(session) return ret
5,242
29,623
31
saleor/graphql/product/mutations/collection/collection_update.py
10
10
def post_save_action(cls, info, instance, cleaned_input): manager = load_plugin_manager(info.context
Split product types and mutations (#11259) * Split product types file * Split product/mutations/products.py file
post_save_action
74d1c8d8504dbdd339865ff97ca4ac9bd30a8faf
saleor
collection_update.py
9
3
https://github.com/saleor/saleor.git
1
30
0
10
47
Python
{ "docstring": "Override this method with `pass` to avoid triggering product webhook.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def post_save_action(cls, info, instance, cleaned_input): manager = load_plugin_manager(info.context) cls.call_event(manager.collection_updated, instance)
81,600
276,242
91
keras/saving/saving_utils.py
47
6
def _deserialize_metric(metric_config): from keras import ( metrics as metrics_module, ) # pylint:disable=g-import-not-at-top if metric_config in ["accuracy", "acc", "crossentropy", "ce"]: # Do not deserialize accuracy and cross-entropy strings as we have special # case handling for these
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_deserialize_metric
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
saving_utils.py
8
7
https://github.com/keras-team/keras.git
2
37
0
41
68
Python
{ "docstring": "Deserialize metrics, leaving special strings untouched.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
def _deserialize_metric(metric_config): from keras import ( metrics as metrics_module, ) # pylint:disable=g-import-not-at-top if metric_config in ["accuracy", "acc", "crossentropy", "ce"]: # Do not deserialize accuracy and cross-entropy strings as we have special # case handling for these in compile, based on model output shape. return metric_config return metrics_module.deserialize(metric_config)
@keras_export("keras.activations.swish") @tf.__internal__.dispatch.add_dispatch_support
80,026
269,312
10
keras/activations.py
6
8
def softsign(x): return tf.math.softsign(x) @keras_export("keras.activations.swish") @tf.__internal__.dispatch.add_dispatch_support
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
softsign
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
activations.py
8
2
https://github.com/keras-team/keras.git
1
15
1
6
50
Python
{ "docstring": "Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.\n\n Example Usage:\n\n >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)\n >>> b = tf.keras.activations.softsign(a)\n >>> b.numpy()\n array([-0.5, 0. , 0.5], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The softsign activation: `x / (abs(x) + 1)`.\n ", "language": "en", "n_whitespaces": 85, "n_words": 45, "vocab_size": 36 }
def softsign(x): return tf.math.softsign(x) @keras_export("keras.activations.swish") @tf.__internal__.dispatch.add_dispatch_support
34,894
150,952
708
freqtrade/freqai/data_kitchen.py
145
59
def compute_inlier_metric(self) -> None: import scipy.stats as ss nmb_previous_points = self.data['InlierMetric_nmb_points'] weibull_percentile = self.data['InlierMetric_weib_perc'] train_ft_df = self.data_dictionary['train_features'] train_ft_df_reindexed = train_ft_df.reindex( index=np.flip(train_ft_df.index) ) pairwise = pd.DataFrame( np.triu( pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count) ), columns=train_ft_df_reindexed.index, index=train_ft_df_reindexed.index ) pairwise = pairwise.round(5) column_labels = [ '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1) ] distances = pd.DataFrame( columns=column_labels, index=train_ft_df.index ) for index in train_ft_df.index[nmb_previous_points]: current_row = pairwise.loc[[index]] current_row_no_zeros = current_row.loc[ :, (current_row!=0).any(axis=0) ] distances.loc[[index]] = current_row_no_zeros.iloc[ :, :nmb_previous_points ] distances = distances.replace([np.inf, -np.inf], np.nan) drop_index = pd.isnull(distances).any(1) distances = distances[drop_index==0] inliers = pd.DataFrame(index=distances.index) for key in distances.keys(): current_distances = distances[key].dropna() fit_params = ss.weibull_min.fit(current_distances) cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params) is_inlier = np.where( current_distances<=cutoff, 1, 0 ) df_inlier = pd.DataFrame( {key+'_IsInlier':is_inlier}, index=distances.index ) inliers = pd.concat( [inliers, df_inlier], axis=1 ) self.data_dictionary['train_features'] = pd.DataFrame( data=inliers.sum(axis=1)/nmb_previous_points, col
Add inlier metric computation
compute_inlier_metric
d3cb211283ced68d082cfdbdac12f3d2ab90d63b
freqtrade
data_kitchen.py
16
64
https://github.com/freqtrade/freqtrade.git
4
417
0
98
653
Python
{ "docstring": "\n \n Compute inlier metric from backwards distance distributions. \n This metric defines how well features from a timepoint fit \n into previous timepoints.\n ", "language": "en", "n_whitespaces": 59, "n_words": 20, "vocab_size": 18 }
def compute_inlier_metric(self) -> None: import scipy.stats as ss nmb_previous_points = self.data['InlierMetric_nmb_points'] weibull_percentile = self.data['InlierMetric_weib_perc'] train_ft_df = self.data_dictionary['train_features'] train_ft_df_reindexed = train_ft_df.reindex( index=np.flip(train_ft_df.index) ) pairwise = pd.DataFrame( np.triu( pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count) ), columns=train_ft_df_reindexed.index, index=train_ft_df_reindexed.index ) pairwise = pairwise.round(5) column_labels = [ '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1) ] distances = pd.DataFrame( columns=column_labels, index=train_ft_df.index ) for index in train_ft_df.index[nmb_previous_points]: current_row = pairwise.loc[[index]] current_row_no_zeros = current_row.loc[ :, (current_row!=0).any(axis=0) ] distances.loc[[index]] = current_row_no_zeros.iloc[ :, :nmb_previous_points ] distances = distances.replace([np.inf, -np.inf], np.nan) drop_index = pd.isnull(distances).any(1) distances = distances[drop_index==0] inliers = pd.DataFrame(index=distances.index) for key in distances.keys(): current_distances = distances[key].dropna() fit_params = ss.weibull_min.fit(current_distances) cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params) is_inlier = np.where( current_distances<=cutoff, 1, 0 ) df_inlier = pd.DataFrame( {key+'_IsInlier':is_inlier}, index=distances.index ) inliers = pd.concat( [inliers, df_inlier], axis=1 ) self.data_dictionary['train_features'] = pd.DataFrame( data=inliers.sum(axis=1)/nmb_previous_points, columns=['inlier_metric'], index = train_ft_df.index ) percent_outliers = np.round( 100*(1-self.data_dictionary['iniler_metric'].sum()/ len(train_ft_df.index)), 2 ) logger.info('{percent_outliers}%% of data points were identified as outliers') return None
55,942
220,224
70
python3.10.4/Lib/ast.py
16
7
def items_view(self, traverser, items): if len(items) == 1: traverser(items[0]) self.write(",") else: self.interleave(lambda: self.write(", "), tra
add python 3.10.4 for windows
items_view
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
ast.py
14
6
https://github.com/XX-net/XX-Net.git
2
50
0
15
84
Python
{ "docstring": "Traverse and separate the given *items* with a comma and append it to\n the buffer. If *items* is a single item sequence, a trailing comma\n will be added.", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 22 }
def items_view(self, traverser, items): if len(items) == 1: traverser(items[0]) self.write(",") else: self.interleave(lambda: self.write(", "), traverser, items)
72,195
248,296
62
synapse/metrics/jemalloc.py
19
9
def refresh_stats(self) -> None: try: self._mallctl("epoch", read=False, write=1) except Exception as e: logger.warning("Failed to reload jemalloc stats: %s
Add config flags to allow for cache auto-tuning (#12701)
refresh_stats
cde8af9a495cbc7f3d0207e3f17c37eddaee34e1
synapse
jemalloc.py
11
9
https://github.com/matrix-org/synapse.git
2
37
0
19
65
Python
{ "docstring": "Request that jemalloc updates its internal statistics. This needs to\n be called before querying for stats, otherwise it will return stale\n values.\n ", "language": "en", "n_whitespaces": 43, "n_words": 22, "vocab_size": 22 }
def refresh_stats(self) -> None: try: self._mallctl("epoch", read=False, write=1) except Exception as e: logger.warning("Failed to reload jemalloc stats: %s", e)
41,791
176,247
77
networkx/tests/test_convert_numpy.py
43
18
def test_to_numpy_array_multiweight_reduction(func, expected): G = nx.MultiDiGraph() weights = [-1, 2, 10.0] for w in weights: G.add_edge(0, 1, weight=w) A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float)
Refactor `to_numpy_array` with advanced indexing (#5250) * WIP: try approach based on advanced indexing. * WIP: Fix some tests and support multigraphs. * Rm test for limiting reductions to nanfunctions. * Catch edgeless graph cornercase. * Cleanups. * Update networkx/convert_matrix.py Comments from review Co-authored-by: Dan Schult <dschult@colgate.edu> * Only subgraph if necessary and copy if so, for performance reasons Co-authored-by: Dan Schult <dschult@colgate.edu> * Split multigraph and graph cases for performance. * Add tests for to_numpy_array with complex dtype. Co-authored-by: Andras Deak <deak.andris@gmail.com> * Add test for object weights. * Add test for more multiweight reduction functions. Include arbitrary functions beyond the original set of nanmin, nanmax, and nansum. * Update docstring. Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: Andras Deak <deak.andris@gmail.com>
test_to_numpy_array_multiweight_reduction
0cc70051fa0a979b1f1eab4af5b6587a6ebf8334
networkx
test_convert_numpy.py
10
9
https://github.com/networkx/networkx.git
2
122
0
32
175
Python
{ "docstring": "Test various functions for reducing multiedge weights.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def test_to_numpy_array_multiweight_reduction(func, expected): G = nx.MultiDiGraph() weights = [-1, 2, 10.0] for w in weights: G.add_edge(0, 1, weight=w) A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float) assert np.allclose(A, [[0, expected], [0, 0]]) # Undirected case A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float) assert np.allclose(A, [[0, expected], [expected, 0]])
28,018
125,896
669
rllib/connectors/tests/test_agent.py
152
36
def test_vr_connector_causal_slice(self): view_rq_dict = { "state": ViewRequirement("obs"), # shift array should be [-2, -1, 0] "prev_states": ViewRequirement("obs", shift="-2:0"), # shift array should be [-4, -2, 0] "prev_strided_states_even": ViewRequirement("obs", shift="-4:0:2"), # shift array should be [-3, -1] "prev_strided_states_odd": ViewRequirement("obs", shift="-3:0:2"), } obs_arrs = np.arange(10)[:, None] + 1 config = PPOConfig().to_dict() ctx = ConnectorContext( view_requirements=view_rq_dict, config=config, is_policy_recurrent=True ) c = ViewRequirementAgentConnector(ctx) # keep a queue of observations obs_list = [] for t, obs in enumerate(obs_arrs): # t=0 is the next state of t=-1 data = AgentConnectorDataType( 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1} ) processed = c([data]) for_action = processed[0].data.for_action if t ==
[RLlib] Implemented ViewRequirementConnector (#26998)
test_vr_connector_causal_slice
8ddcf89096e5631c6b6e0d04dc094b458a15c9f9
ray
test_agent.py
15
38
https://github.com/ray-project/ray.git
4
300
0
105
491
Python
{ "docstring": "Test that the ViewRequirementConnector can handle slice shifts correctly.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_vr_connector_causal_slice(self): view_rq_dict = { "state": ViewRequirement("obs"), # shift array should be [-2, -1, 0] "prev_states": ViewRequirement("obs", shift="-2:0"), # shift array should be [-4, -2, 0] "prev_strided_states_even": ViewRequirement("obs", shift="-4:0:2"), # shift array should be [-3, -1] "prev_strided_states_odd": ViewRequirement("obs", shift="-3:0:2"), } obs_arrs = np.arange(10)[:, None] + 1 config = PPOConfig().to_dict() ctx = ConnectorContext( view_requirements=view_rq_dict, config=config, is_policy_recurrent=True ) c = ViewRequirementAgentConnector(ctx) # keep a queue of observations obs_list = [] for t, obs in enumerate(obs_arrs): # t=0 is the next state of t=-1 data = AgentConnectorDataType( 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1} ) processed = c([data]) for_action = processed[0].data.for_action if t == 0: obs_list.extend([obs for _ in range(5)]) else: # remove the first obs and add the current obs to the end obs_list.pop(0) obs_list.append(obs) # check state check(for_action["state"], obs[None]) # check prev_states check( for_action["prev_states"], np.stack(obs_list)[np.array([-3, -2, -1])][None], ) # check prev_strided_states_even check( for_action["prev_strided_states_even"], np.stack(obs_list)[np.array([-5, -3, -1])][None], ) check( for_action["prev_strided_states_odd"], np.stack(obs_list)[np.array([-4, -2])][None], )
44,269
183,607
68
examples/calculator.py
14
10
def render(self) -> RenderableType: return Padding( Align.right(FigletText(self.value), vertical="middle"), (0, 1), style="white o
more docs
render
6bfc26c1ec37262b9cd4bbab35d15907dc6742bf
textual
calculator.py
12
7
https://github.com/Textualize/textual.git
1
38
0
14
62
Python
{ "docstring": "Build a Rich renderable to render the calculator display.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def render(self) -> RenderableType: return Padding( Align.right(FigletText(self.value), vertical="middle"), (0, 1), style="white on rgb(51,51,51)", )
@keras_export("keras.__internal__.models.clone_and_build_model", v1=[])
82,432
278,255
253
keras/models/cloning.py
101
18
def in_place_subclassed_model_state_restoration(model): assert not model._is_graph_network # Restore layers and build attributes if ( hasattr(model, "_original_attributes_cache") and model._original_attributes_cache is not None ): # Models have sticky attribute assignment, so we want to be careful to # add back the previous attributes and track Layers by their original # names without adding dependencies on "utility" attributes which Models # exempt when they're constructed. setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: # Res
resolve line-too-long in models
in_place_subclassed_model_state_restoration
f0fc6f798937a7a5fdab469c0f16bdde7cfc4ccd
keras
cloning.py
14
17
https://github.com/keras-team/keras.git
5
97
1
75
181
Python
{ "docstring": "Restores the original state of a model after it was \"reset\".\n\n This undoes this action of `_in_place_subclassed_model_reset`, which is\n called in `clone_and_build_model` if `in_place_reset` is set to True.\n\n Args:\n model: Instance of a Keras model created via subclassing, on which\n `_in_place_subclassed_model_reset` was previously called.\n ", "language": "en", "n_whitespaces": 68, "n_words": 44, "vocab_size": 37 }
def in_place_subclassed_model_state_restoration(model): assert not model._is_graph_network # Restore layers and build attributes if ( hasattr(model, "_original_attributes_cache") and model._original_attributes_cache is not None ): # Models have sticky attribute assignment, so we want to be careful to # add back the previous attributes and track Layers by their original # names without adding dependencies on "utility" attributes which Models # exempt when they're constructed. setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: # Restore to the state of a never-called model. _reset_build_compile_trackers(model) @keras_export("keras.__internal__.models.clone_and_build_model", v1=[])