ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
70,593
244,941
110
tests/test_datasets/test_pipelines/utils.py
23
11
def check_result_same(results, pipeline_results, check_keys): for key in check_keys: if results.get(key, None) is None: continue if isinstance(results[key], (BitmapMasks, PolygonMasks)): assert_allclose(pipeline_results[key].to_ndarray(), results[key].to_ndarray()) else: assert_allclose(pipeline_
Refactor Autoaugment
check_result_same
6146a83cb898110ba0170f956903b74741a6ac37
mmdetection
utils.py
14
9
https://github.com/open-mmlab/mmdetection.git
4
77
0
22
117
Python
{ "docstring": "Check whether the ``pipeline_results`` is the same with the predefined\n ``results``.\n\n Args:\n results (dict): Predefined results which should be the standard\n output of the transform pipeline.\n pipeline_results (dict): Results processed by the transform\n pipeline.\n check_keys (tuple): Keys that need to be checked between\n results and pipeline_results.\n ", "language": "en", "n_whitespaces": 109, "n_words": 46, "vocab_size": 35 }
def check_result_same(results, pipeline_results, check_keys): for key in check_keys: if results.get(key, None) is None: continue if isinstance(results[key], (BitmapMasks, PolygonMasks)): assert_allclose(pipeline_results[key].to_ndarray(), results[key].to_ndarray()) else: assert_allclose(pipeline_results[key], results[key])
38,186
159,308
65
scripts/release.py
29
11
def get_rasa_sdk_version() -> Text: dependencies_filename = "pyproject.toml" toml_data = toml.load(project_root() / dependencies_filename) try:
add changelog for 3.0.6 release (#10771) * add changelog * update poetry.lock
get_rasa_sdk_version
40d5139b3ec136b82e28cdc80d99076b9e6b1e6a
rasa
release.py
13
9
https://github.com/RasaHQ/rasa.git
2
58
0
27
110
Python
{ "docstring": "Find out what the referenced version of the Rasa SDK is.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def get_rasa_sdk_version() -> Text: dependencies_filename = "pyproject.toml" toml_data = toml.load(project_root() / dependencies_filename) try: sdk_version = toml_data["tool"]["poetry"]["dependencies"]["rasa-sdk"] return sdk_version[1:].strip() except AttributeError: raise Exception(f"Failed to find Rasa SDK version in {dependencies_filename}")
73,365
250,287
464
tests/handlers/test_e2e_room_keys.py
68
12
def test_update_omitted_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }, ) ) self.assertEqual(version, "1") self.get_success( self.handler.update_version( self.local_user, version, { "algorithm": "m.megolm_backup.v1", "auth_data": "revised_first_version_auth_data", }, ) ) # check we can retrieve it as the current version res = self.get_success(self.handler.get_version_info(self.local_user)) del res["etag"] # etag is opaque, so don't test i
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
test_update_omitted_version
652d1669c5a103b1c20478770c4aaf18849c09a3
synapse
test_e2e_room_keys.py
13
33
https://github.com/matrix-org/synapse.git
1
122
0
47
214
Python
{ "docstring": "Check that the update succeeds if the version is missing from the body", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 11 }
def test_update_omitted_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }, ) ) self.assertEqual(version, "1") self.get_success( self.handler.update_version( self.local_user, version, { "algorithm": "m.megolm_backup.v1", "auth_data": "revised_first_version_auth_data", }, ) ) # check we can retrieve it as the current version res = self.get_success(self.handler.get_version_info(self.local_user)) del res["etag"] # etag is opaque, so don't test its contents self.assertDictEqual( res, { "algorithm": "m.megolm_backup.v1", "auth_data": "revised_first_version_auth_data", "version": version, "count": 0, }, )
47,773
196,273
285
sympy/geometry/plane.py
104
25
def parameter_value(self, other, u, v=None): from sympy.geometry.point import Point if not isinstance(other, GeometryEntity): other = Point(other, dim=self.ambient_dimension) if not isinstance(other, Point): raise ValueError("o
Updated import locations
parameter_value
498015021131af4dbb07eb110e5badaba8250c7b
sympy
plane.py
13
20
https://github.com/sympy/sympy.git
9
184
0
73
286
Python
{ "docstring": "Return the parameter(s) corresponding to the given point.\n\n Examples\n ========\n\n >>> from sympy import pi, Plane\n >>> from sympy.abc import t, u, v\n >>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0))\n\n By default, the parameter value returned defines a point\n that is a distance of 1 from the Plane's p1 value and\n in line with the given point:\n\n >>> on_circle = p.arbitrary_point(t).subs(t, pi/4)\n >>> on_circle.distance(p.p1)\n 1\n >>> p.parameter_value(on_circle, t)\n {t: pi/4}\n\n Moving the point twice as far from p1 does not change\n the parameter value:\n\n >>> off_circle = p.p1 + (on_circle - p.p1)*2\n >>> off_circle.distance(p.p1)\n 2\n >>> p.parameter_value(off_circle, t)\n {t: pi/4}\n\n If the 2-value parameter is desired, supply the two\n parameter symbols and a replacement dictionary will\n be returned:\n\n >>> p.parameter_value(on_circle, u, v)\n {u: sqrt(10)/10, v: sqrt(10)/30}\n >>> p.parameter_value(off_circle, u, v)\n {u: sqrt(10)/5, v: sqrt(10)/15}\n ", "language": "en", "n_whitespaces": 335, "n_words": 139, "vocab_size": 91 }
def parameter_value(self, other, u, v=None): from sympy.geometry.point import Point if not isinstance(other, GeometryEntity): other = Point(other, dim=self.ambient_dimension) if not isinstance(other, Point): raise ValueError("other must be a point") if other == self.p1: return other if isinstance(u, Symbol) and v is None: delta = self.arbitrary_point(u) - self.p1 eq = delta - (other - self.p1).unit sol = solve(eq, u, dict=True) elif isinstance(u, Symbol) and isinstance(v, Symbol): pt = self.arbitrary_point(u, v) sol = solve(pt - other, (u, v), dict=True) else: raise ValueError('expecting 1 or 2 symbols') if not sol: raise ValueError("Given point is not on %s" % func_name(self)) return sol[0] # {t: tval} or {u: uval, v: vval}
@register
53,089
211,415
48
ppdet/modeling/architectures/pose3d_metro.py
31
9
def orthographic_projection(X, camera): camera = camera.reshape((-1, 1, 3)) X_trans = X[:, :, :2] + camera[:, :, 1:] shape = paddle.shape(X_trans) X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape) return X_2d
pose3d metro modeling (#6612) * pose3d metro modeling * delete extra comments
orthographic_projection
d4e34fe165c09db65fd00113708be1b711ac957c
PaddleDetection
pose3d_metro.py
14
6
https://github.com/PaddlePaddle/PaddleDetection.git
1
86
1
25
137
Python
{ "docstring": "Perform orthographic projection of 3D points X using the camera parameters\n Args:\n X: size = [B, N, 3]\n camera: size = [B, 3]\n Returns:\n Projected 2D points -- size = [B, N, 2]\n ", "language": "en", "n_whitespaces": 63, "n_words": 33, "vocab_size": 24 }
def orthographic_projection(X, camera): camera = camera.reshape((-1, 1, 3)) X_trans = X[:, :, :2] + camera[:, :, 1:] shape = paddle.shape(X_trans) X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape) return X_2d @register
30,003
133,390
53
python/ray/util/sgd/torch/worker_group.py
14
8
def _load_state_id(self, state_id): remote_calls = [ worker.load_state_stream.remote(state_id) for worker in self.remote_workers ]
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
_load_state_id
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
worker_group.py
10
5
https://github.com/ray-project/ray.git
2
28
0
13
45
Python
{ "docstring": "Loads the object with id `state_id` to all workers.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _load_state_id(self, state_id): remote_calls = [ worker.load_state_stream.remote(state_id) for worker in self.remote_workers ] return remote_calls
48,851
198,278
788
sympy/geometry/line.py
210
30
def __new__(cls, p1, pt=None, angle=None, **kwargs): p1 = Point(p1, dim=2) if pt is not None and angle is None: try: p2 = Point(pt, dim=2) except (NotImplementedError, TypeError, ValueError): raise ValueError(filldedent()) if p1 == p2: raise ValueError('A Ray requires two distinct points.') elif angle is not None and pt is None: # we need to know if the angle is an odd multiple of pi/2 angle = sympify(angle) c = _pi_coeff(angle) p2 = None if c is not None: if c.is_Rational: if c.q == 2: if c.p == 1: p2 = p1 + Point(0, 1) elif c.p == 3: p2 = p1 + Point(0, -1) elif c.q == 1: if c.p == 0: p2 = p1 + Point(1, 0) elif c.p == 1: p2 = p1 + Point(-1, 0) if p2 is None: c *= S.Pi else: c = angle % (2*S.Pi) if not p2: m = 2*c/S.Pi left = And(1 < m, m < 3) # is it in quadrant 2 or 3? x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True)) y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)),
Use sympify less
__new__
2a1afca9477eb781f16d5d6b63fa37abed7740a3
sympy
line.py
23
41
https://github.com/sympy/sympy.git
17
367
0
113
557
Python
{ "docstring": "\n The 2nd argument was not a valid Point; if\n it was meant to be an angle it should be\n given with keyword \"angle\".", "language": "en", "n_whitespaces": 80, "n_words": 23, "vocab_size": 20 }
def __new__(cls, p1, pt=None, angle=None, **kwargs): p1 = Point(p1, dim=2) if pt is not None and angle is None: try: p2 = Point(pt, dim=2) except (NotImplementedError, TypeError, ValueError): raise ValueError(filldedent()) if p1 == p2: raise ValueError('A Ray requires two distinct points.') elif angle is not None and pt is None: # we need to know if the angle is an odd multiple of pi/2 angle = sympify(angle) c = _pi_coeff(angle) p2 = None if c is not None: if c.is_Rational: if c.q == 2: if c.p == 1: p2 = p1 + Point(0, 1) elif c.p == 3: p2 = p1 + Point(0, -1) elif c.q == 1: if c.p == 0: p2 = p1 + Point(1, 0) elif c.p == 1: p2 = p1 + Point(-1, 0) if p2 is None: c *= S.Pi else: c = angle % (2*S.Pi) if not p2: m = 2*c/S.Pi left = And(1 < m, m < 3) # is it in quadrant 2 or 3? x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True)) y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True)) p2 = p1 + Point(x, y) else: raise ValueError('A 2nd point or keyword "angle" must be used.') return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
78,575
266,772
770
test/lib/ansible_test/_internal/delegation.py
231
76
def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None con = host_state.controller_profile.get_origin_controller_connection() working_directory = host_state.controller_profile.get_working_directory() host_delegation = not isinstance(args.controller, OriginConfig) if host_delegation: if data_context().content.collection: content_root = os.path.join(working_directory, data_context().content.collection.directory) else: content_root = os.path.join(working_directory, 'ansible') ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin') with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payl
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
delegate_command
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
delegation.py
17
57
https://github.com/ansible/ansible.git
16
487
0
154
803
Python
{ "docstring": "Delegate execution based on the provided host state.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None con = host_state.controller_profile.get_origin_controller_connection() working_directory = host_state.controller_profile.get_working_directory() host_delegation = not isinstance(args.controller, OriginConfig) if host_delegation: if data_context().content.collection: content_root = os.path.join(working_directory, data_context().content.collection.directory) else: content_root = os.path.join(working_directory, 'ansible') ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin') with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payload_file: create_payload(args, payload_file.name) con.extract_archive(chdir=working_directory, src=payload_file) else: content_root = working_directory ansible_bin_path = ANSIBLE_BIN_PATH command = generate_command(args, host_state.controller_profile.python, ansible_bin_path, content_root, exclude, require) if isinstance(con, SshConnection): ssh = con.settings else: ssh = None options = [] if isinstance(args, IntegrationConfig) and args.controller.is_managed and all(target.is_managed for target in args.targets): if not args.allow_destructive: options.append('--allow-destructive') with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase] if containers: options.extend(['--containers', json.dumps(containers.to_dict())]) # Run unit tests unprivileged to prevent stray writes to the source tree. # Also disconnect from the network once requirements have been installed. if isinstance(args, UnitsConfig) and isinstance(con, DockerConnection): pytest_user = 'pytest' writable_dirs = [ os.path.join(content_root, ResultType.JUNIT.relative_path), os.path.join(content_root, ResultType.COVERAGE.relative_path), ] con.run(['mkdir', '-p'] + writable_dirs) con.run(['chmod', '777'] + writable_dirs) con.run(['chmod', '755', working_directory]) con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)]) con.run(['useradd', pytest_user, '--create-home']) con.run(insert_options(command, options + ['--requirements-mode', 'only'])) container = con.inspect() networks = container.get_network_names() if networks is not None: for network in networks: con.disconnect_network(network) else: display.warning('Network disconnection is not supported (this is normal under podman). ' 'Tests will not be isolated from the network. Network-related tests may misbehave.') options.extend(['--requirements-mode', 'skip']) con.user = pytest_user success = False try: con.run(insert_options(command, options)) success = True finally: if host_delegation: download_results(args, con, content_root, success)
36,629
156,292
132
dask/dataframe/io/tests/test_parquet.py
75
19
def test_in_predicate_requires_an_iterable(tmp_path, engine, filter_value): path = tmp_path / "gh_8720_pandas.parquet" df = pd.DataFrame( {"A": [1, 2, 3, 4], "B": [1, 1, 2, 2]}, ) df.to_parquet(path, engine=engine) with pytest.raises(TypeError, match="Value of 'in' filter"): dd.read_parquet(path, engine=engine, filters=filter_value) # pandas to_parquet outputs a single file, dask outputs a folder with global # metadata that changes the filtering code path ddf = dd.from_pandas(df, npartitions=2) path = tmp_path / "gh_8720_dask.parquet" ddf.to_parquet(path, engine=engine) with pytest.raises(TypeError, match=
Check that values for the `in` predicate in `read_parquet` are correct (#8846) As reported in #8720, the _flatten_filters function required the value to be hashable. This implicitly required the value to be a tuple, although lists and sets would also be appropriate since they support the 'in' operation. _flatten_filters was only used to determine which columns should be filtered, so it can be refactored and removed. The 'in' predicate also requires that the value be iterable. Non-iterable values are now caught and an appropriate message is raised indicating what the user needs to change. This must be done in two places to support both fastparquet and pyarrow.
test_in_predicate_requires_an_iterable
9000abdd43772a82dcbf7999c5126b571d698d8a
dask
test_parquet.py
11
13
https://github.com/dask/dask.git
1
137
0
52
218
Python
{ "docstring": "Regression test for https://github.com/dask/dask/issues/8720", "language": "en", "n_whitespaces": 3, "n_words": 4, "vocab_size": 4 }
def test_in_predicate_requires_an_iterable(tmp_path, engine, filter_value): path = tmp_path / "gh_8720_pandas.parquet" df = pd.DataFrame( {"A": [1, 2, 3, 4], "B": [1, 1, 2, 2]}, ) df.to_parquet(path, engine=engine) with pytest.raises(TypeError, match="Value of 'in' filter"): dd.read_parquet(path, engine=engine, filters=filter_value) # pandas to_parquet outputs a single file, dask outputs a folder with global # metadata that changes the filtering code path ddf = dd.from_pandas(df, npartitions=2) path = tmp_path / "gh_8720_dask.parquet" ddf.to_parquet(path, engine=engine) with pytest.raises(TypeError, match="Value of 'in' filter"): dd.read_parquet(path, engine=engine, filters=filter_value)
78,571
266,768
65
test/lib/ansible_test/_internal/config.py
25
10
def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] if not self.targets: raise Exception('There must be one or more targets.') assert type_guard(self.targets, targ
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
only_targets
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
config.py
10
5
https://github.com/ansible/ansible.git
2
44
0
25
72
Python
{ "docstring": "\n Return a list of target host configurations.\n Requires that there are one or more targets, all the specified type.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] if not self.targets: raise Exception('There must be one or more targets.') assert type_guard(self.targets, target_type) return t.cast(t.List[THostConfig], self.targets)
22,584
107,070
75
lib/matplotlib/widgets.py
19
11
def _get_animated_artists(self): return tuple([a for ax_ in self.ax.get_figure().get_axes() for a in ax_.g
Fix z_order
_get_animated_artists
334cc617b8ed3b6b4ec6cb64ff16a040ef454149
matplotlib
widgets.py
14
4
https://github.com/matplotlib/matplotlib.git
5
48
0
15
78
Python
{ "docstring": "\n Convenience method to get all animated artists of a figure, except\n those already present in self.artists. 'z_order' is ignored.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
def _get_animated_artists(self): return tuple([a for ax_ in self.ax.get_figure().get_axes() for a in ax_.get_children() if a.get_animated() and a not in self.artists])
23,558
109,376
131
lib/mpl_toolkits/axisartist/axislines.py
35
15
def new_gridlines(self, ax): g
Get rcParams from mpl
new_gridlines
438d30b227b1fef7e8733578f851e76a8e360f24
matplotlib
axislines.py
11
9
https://github.com/matplotlib/matplotlib.git
1
69
0
31
114
Python
{ "docstring": "\n Create and return a new GridlineCollection instance.\n\n *which* : \"major\" or \"minor\"\n *axis* : \"both\", \"x\" or \"y\"\n\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 16 }
def new_gridlines(self, ax): gridlines = GridlinesCollection( None, transform=ax.transData, colors=mpl.rcParams['grid.color'], linestyles=mpl.rcParams['grid.linestyle'], linewidths=mpl.rcParams['grid.linewidth']) ax._set_artist_props(gridlines) gridlines.set_grid_helper(self) ax.axes._set_artist_props(gridlines) # gridlines.set_clip_path(self.axes.patch) # set_clip_path need to be deferred after Axes.cla is completed. # It is done inside the cla. return gridlines
3,409
20,522
73
pipenv/patched/notpip/_vendor/pygments/util.py
26
9
def duplicates_removed(it, already_seen=()): lst = [] seen = set() for i in it: if i in seen or i in already_seen: continue lst.append(i) seen.
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
duplicates_removed
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
util.py
9
14
https://github.com/pypa/pipenv.git
4
49
0
19
82
Python
{ "docstring": "\n Returns a list with duplicates removed from the iterable `it`.\n\n Order is preserved.\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 13 }
def duplicates_removed(it, already_seen=()): lst = [] seen = set() for i in it: if i in seen or i in already_seen: continue lst.append(i) seen.add(i) return lst
38,490
160,110
40
numpy/ma/extras.py
11
11
def masked_all(shape, dtype=float): a = masked_array(np.empty(shape, dtype),
DOC: fix data type of parameter shape (#21251) `np.ma.masked_all` uses `np.empty` under the hood, so the parameter description for shape in `masked_all` should be updated to match that of `np.empty`. Relevant issue: #21203
masked_all
119bf865b15747bea815ec3ced10e2bbc1ba8de1
numpy
extras.py
13
4
https://github.com/numpy/numpy.git
1
39
0
10
61
Python
{ "docstring": "\n Empty masked array with all elements masked.\n\n Return an empty masked array of the given shape and dtype, where all the\n data are masked.\n\n Parameters\n ----------\n shape : int or tuple of ints\n Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``.\n dtype : dtype, optional\n Data type of the output.\n\n Returns\n -------\n a : MaskedArray\n A masked array with all data masked.\n\n See Also\n --------\n masked_all_like : Empty masked array modelled on an existing array.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> ma.masked_all((3, 3))\n masked_array(\n data=[[--, --, --],\n [--, --, --],\n [--, --, --]],\n mask=[[ True, True, True],\n [ True, True, True],\n [ True, True, True]],\n fill_value=1e+20,\n dtype=float64)\n\n The `dtype` parameter defines the underlying data type.\n\n >>> a = ma.masked_all((3, 3))\n >>> a.dtype\n dtype('float64')\n >>> a = ma.masked_all((3, 3), dtype=np.int32)\n >>> a.dtype\n dtype('int32')\n\n ", "language": "en", "n_whitespaces": 306, "n_words": 136, "vocab_size": 84 }
def masked_all(shape, dtype=float): a = masked_array(np.empty(shape, dtype), mask=np.ones(shape, make_mask_descr(dtype))) return a
@not_implemented_for("directed")
42,034
176,676
333
networkx/algorithms/centrality/closeness.py
125
25
def closeness_centrality(G, u=None, distance=None, wf_improved=True): r if G.is_directed(): G = G.reverse() # create a reversed graph view if distance is not None: # use Dijkstra's algorithm with specified attribute as edge weight path_length = functools.partial( nx.single_source_dijkstra_path_length, weight=distance ) else: path_length = nx.single_source_shortest_path_length if u is None: nodes = G.nodes
added example to closeness.py (#5645) * added example on closeness * docstring improvement
closeness_centrality
58b63cb57cd1747c23611ee0b46991a5be2db751
networkx
closeness.py
17
118
https://github.com/networkx/networkx.git
9
186
1
79
294
Python
{ "docstring": "Compute closeness centrality for nodes.\n\n Closeness centrality [1]_ of a node `u` is the reciprocal of the\n average shortest path distance to `u` over all `n-1` reachable nodes.\n\n .. math::\n\n C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n\n where `d(v, u)` is the shortest-path distance between `v` and `u`,\n and `n-1` is the number of nodes reachable from `u`. Notice that the\n closeness distance function computes the incoming distance to `u`\n for directed graphs. To use outward distance, act on `G.reverse()`.\n\n Notice that higher values of closeness indicate higher centrality.\n\n Wasserman and Faust propose an improved formula for graphs with\n more than one connected component. The result is \"a ratio of the\n fraction of actors in the group who are reachable, to the average\n distance\" from the reachable actors [2]_. You might think this\n scale factor is inverted but it is not. As is, nodes from small\n components receive a smaller closeness value. Letting `N` denote\n the number of nodes in the graph,\n\n .. math::\n\n C_{WF}(u) = \\frac{n-1}{N-1} \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n u : node, optional\n Return only the value for node u\n\n distance : edge attribute key, optional (default=None)\n Use the specified edge attribute as the edge distance in shortest\n path calculations\n\n wf_improved : bool, optional (default=True)\n If True, scale by the fraction of nodes reachable. This gives the\n Wasserman and Faust improved formula. For single component graphs\n it is the same as the original formula.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with closeness centrality as the value.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> nx.closeness_centrality(G)\n {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75}\n\n See Also\n --------\n betweenness_centrality, load_centrality, eigenvector_centrality,\n degree_centrality, incremental_closeness_centrality\n\n Notes\n -----\n The closeness centrality is normalized to `(n-1)/(|G|-1)` where\n `n` is the number of nodes in the connected part of graph\n containing the node. If the graph is not completely connected,\n this algorithm computes the closeness centrality for each\n connected part separately scaled by that parts size.\n\n If the 'distance' keyword is set to an edge attribute key then the\n shortest-path length will be computed using Dijkstra's algorithm with\n that edge attribute as the edge weight.\n\n The closeness centrality uses *inward* distance to a node, not outward.\n If you want to use outword distances apply the function to `G.reverse()`\n\n In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the\n outward distance rather than the inward distance. If you use a 'distance'\n keyword and a DiGraph, your results will change between v2.2 and v2.3.\n\n References\n ----------\n .. [1] Linton C. Freeman: Centrality in networks: I.\n Conceptual clarification. Social Networks 1:215-239, 1979.\n https://doi.org/10.1016/0378-8733(78)90021-7\n .. [2] pg. 201 of Wasserman, S. and Faust, K.,\n Social Network Analysis: Methods and Applications, 1994,\n Cambridge University Press.\n ", "language": "en", "n_whitespaces": 708, "n_words": 467, "vocab_size": 258 }
def closeness_centrality(G, u=None, distance=None, wf_improved=True): r if G.is_directed(): G = G.reverse() # create a reversed graph view if distance is not None: # use Dijkstra's algorithm with specified attribute as edge weight path_length = functools.partial( nx.single_source_dijkstra_path_length, weight=distance ) else: path_length = nx.single_source_shortest_path_length if u is None: nodes = G.nodes else: nodes = [u] closeness_centrality = {} for n in nodes: sp = path_length(G, n) totsp = sum(sp.values()) len_G = len(G) _closeness_centrality = 0.0 if totsp > 0.0 and len_G > 1: _closeness_centrality = (len(sp) - 1.0) / totsp # normalize to number of nodes-1 in connected part if wf_improved: s = (len(sp) - 1.0) / (len_G - 1) _closeness_centrality *= s closeness_centrality[n] = _closeness_centrality if u is not None: return closeness_centrality[u] else: return closeness_centrality @not_implemented_for("directed")
72,870
249,367
131
synapse/storage/databases/main/event_push_actions.py
38
11
async def _remove_old_push_actions_that_have_rotated(self) -> None: # We want to clear out anything that is older than a day that *has* already # been rotated. rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol( table="event_push_summary_stream_ordering", keyvalues={}, retcol="stream_ordering",
Clarifications for event push action processing. (#13485) * Clarifies comments. * Fixes an erroneous comment (about return type) added in #13455 (ec24813220f9d54108924dc04aecd24555277b99). * Clarifies the name of a variable. * Simplifies logic of pulling out the latest join for the requesting user.
_remove_old_push_actions_that_have_rotated
46bd7f4ed9020bbed459c03a11c26d7f7c3093b0
synapse
event_push_actions.py
11
18
https://github.com/matrix-org/synapse.git
3
64
0
34
72
Python
{ "docstring": "Clear out old push actions that have been summarised.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def _remove_old_push_actions_that_have_rotated(self) -> None: # We want to clear out anything that is older than a day that *has* already # been rotated. rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol( table="event_push_summary_stream_ordering", keyvalues={}, retcol="stream_ordering", ) max_stream_ordering_to_delete = min( rotated_upto_stream_ordering, self.stream_ordering_day_ago )
43,394
181,606
25
tests/export_tests.py
11
4
def test_indent(): multiline_string = indented_multiline_string =
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_indent
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
export_tests.py
8
10
https://github.com/EpistasisLab/tpot.git
1
20
0
9
39
Python
{ "docstring": "Assert that indenting a multiline string by 4 spaces prepends 4 spaces before each new line.test\ntest1\ntest2\ntest3 test\n test1\n test2\n test3", "language": "en", "n_whitespaces": 31, "n_words": 23, "vocab_size": 18 }
def test_indent(): multiline_string = indented_multiline_string = assert indented_multiline_string == _indent(multiline_string, 4)
74,702
254,598
27
onnx/tools/update_model_dims.py
19
6
def update_inputs_outputs_dims(model, input_dims, output_dims): # type: (ModelProto, Dict[Text, List[Any]], Dict[Text, List[Any]]) -> ModelProto dim_param_set = set() # type: Set[Text]
six: remove all references (#3926) * six: remove all references ONNX has dropped python 2 support and does not need this anymore. Signed-off-by: Chris Hua <hua.christopher@gmail.com> * six: use bytes for binary_type I misread the changes; the correct migration is binary_type -> bytes in python3. Signed-off-by: Christopher Hua <chua@squareup.com> * remove additional checks for Python version no more need to check for Python 3 Signed-off-by: Christopher Hua <chua@squareup.com> * remove unused import Signed-off-by: Christopher Hua <chua@squareup.com> Co-authored-by: Ashwini Khade <askhade@microsoft.com> Co-authored-by: Chun-Wei Chen <jacky82226@gmail.com>
update_inputs_outputs_dims
2e70f6769ca9b9d0e859fbbd6854f3abc478897b
onnx
update_model_dims.py
8
19
https://github.com/onnx/onnx.git
5
139
0
16
28
Python
{ "docstring": "\n This function updates the dimension sizes of the model's inputs and outputs to the values\n provided in input_dims and output_dims. if the dim value provided is negative, a unique dim_param\n will be set for that dimension.\n\n Example. if we have the following shape for inputs and outputs:\n shape(input_1) = ('b', 3, 'w', 'h')\n shape(input_2) = ('b', 4)\n and shape(output) = ('b', 'd', 5)\n\n The parameters can be provided as:\n input_dims = {\n \"input_1\": ['b', 3, 'w', 'h'],\n \"input_2\": ['b', 4],\n }\n output_dims = {\n \"output\": ['b', -1, 5]\n }\n\n Putting it together:\n model = onnx.load('model.onnx')\n updated_model = update_inputs_outputs_dims(model, input_dims, output_dims)\n onnx.save(updated_model, 'model.onnx')\n ", "language": "en", "n_whitespaces": 364, "n_words": 102, "vocab_size": 74 }
def update_inputs_outputs_dims(model, input_dims, output_dims): # type: (ModelProto, Dict[Text, List[Any]], Dict[Text, List[Any]]) -> ModelProto dim_param_set = set() # type: Set[Text]
25,277
114,801
118
mindsdb/integrations/lightwood_handler/lightwood_handler/utils.py
43
10
def get_aliased_columns(aliased_columns, model_alias, targets, mode=None): for col in targets: if mode == 'input': if str(col.parts[0]) != model_alias and col.alias is not None: aliased_columns[aliased_columns.index(col.parts[-1])] = str(col.alias) if mode == 'output': if str(col.parts[0]) == model_alias and col.alias is not None: aliased_columns[aliased_columns.index('prediction')] = str(col.alias) return aliased_columns
add utils file
get_aliased_columns
0c2fc2e6f9d32e8b6785890cdfd7a2bf320b4273
mindsdb
utils.py
17
9
https://github.com/mindsdb/mindsdb.git
8
109
0
28
173
Python
{ "docstring": " This method assumes mdb_sql will alert if there are two columns with the same alias ", "language": "en", "n_whitespaces": 16, "n_words": 15, "vocab_size": 15 }
def get_aliased_columns(aliased_columns, model_alias, targets, mode=None): for col in targets: if mode == 'input': if str(col.parts[0]) != model_alias and col.alias is not None: aliased_columns[aliased_columns.index(col.parts[-1])] = str(col.alias) if mode == 'output': if str(col.parts[0]) == model_alias and col.alias is not None: aliased_columns[aliased_columns.index('prediction')] = str(col.alias) return aliased_columns
78,195
265,786
172
netbox/utilities/urls.py
66
17
def get_model_urls(app_label, model_name): paths = [] # Retrieve reg
#9072: Implement a mechanism for dynamically registering model detail views
get_model_urls
0d7851ed9de2792ea6d9ed223c315c235290ddd7
netbox
urls.py
16
14
https://github.com/netbox-community/netbox.git
4
88
0
45
172
Python
{ "docstring": "\n Return a list of URL paths for detail views registered to the given model.\n\n Args:\n app_label: App/plugin name\n model_name: Model name\n ", "language": "en", "n_whitespaces": 45, "n_words": 21, "vocab_size": 20 }
def get_model_urls(app_label, model_name): paths = [] # Retrieve registered views for this model try: views = registry['views'][app_label][model_name] except KeyError: # No views have been registered for this model views = [] for view in views: # Import the view class or function callable = import_string(view['path']) if issubclass(callable, View): callable = callable.as_view() # Create a path to the view paths.append( path(f"{view['name']}/", callable, name=f"{model_name}_{view['name']}", kwargs=view['kwargs']) ) return paths
4,186
22,110
31
pipenv/patched/pip/_vendor/requests/sessions.py
11
6
def head(self, url, **kwargs): r kwargs.setdefault("allow_redirects", False) return self.request
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
head
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
sessions.py
8
9
https://github.com/pypa/pipenv.git
1
32
0
10
52
Python
{ "docstring": "Sends a HEAD request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :rtype: requests.Response\n ", "language": "en", "n_whitespaces": 52, "n_words": 24, "vocab_size": 22 }
def head(self, url, **kwargs): r kwargs.setdefault("allow_redirects", False) return self.request("HEAD", url, **kwargs)
10,774
53,293
199
src/prefect/agent.py
43
24
async def get_and_submit_flow_runs(self) -> List[FlowRun]: if not self.started: raise RuntimeError("Agent is not started. Use `async with OrionAgent()...`") self.logger.debug("Checking for flow runs...") submittable_runs = await self.client.read_flow_runs( sort=FlowRunSort.NEXT_SCHEDULED_S
Improve `prefect orion start` output
get_and_submit_flow_runs
9efee44dbee3a326e3e754139f4ea0d721849561
prefect
agent.py
12
19
https://github.com/PrefectHQ/prefect.git
3
93
0
38
160
Python
{ "docstring": "\n Queries for scheduled flow runs and submits them for execution in parallel\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
async def get_and_submit_flow_runs(self) -> List[FlowRun]: if not self.started: raise RuntimeError("Agent is not started. Use `async with OrionAgent()...`") self.logger.debug("Checking for flow runs...") submittable_runs = await self.client.read_flow_runs( sort=FlowRunSort.NEXT_SCHEDULED_START_TIME_ASC, flow_run_filter=self.flow_run_query_filter(), ) for flow_run in submittable_runs: self.logger.info(f"Submitting flow run '{flow_run.id}'") self.submitting_flow_run_ids.add(flow_run.id) self.task_group.start_soon( self.submit_run, flow_run, ) return submittable_runs
56,962
223,543
84
python3.10.4/Lib/email/_header_value_parser.py
35
9
def get_quoted_string(value): quoted_string = QuotedString() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) token, value = get_bare_quoted_string(value) quoted_string.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) return quoted_string, value
add python 3.10.4 for windows
get_quoted_string
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_header_value_parser.py
10
11
https://github.com/XX-net/XX-Net.git
5
77
0
17
127
Python
{ "docstring": "quoted-string = [CFWS] <bare-quoted-string> [CFWS]\n\n 'bare-quoted-string' is an intermediate class defined by this\n parser and not by the RFC grammar. It is the quoted string\n without any attached CFWS.\n ", "language": "en", "n_whitespaces": 42, "n_words": 29, "vocab_size": 25 }
def get_quoted_string(value): quoted_string = QuotedString() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) token, value = get_bare_quoted_string(value) quoted_string.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) return quoted_string, value
491
3,584
96
airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/tests/test_core.py
31
13
def test_defined_keyword_exist_in_schema(self, keyword, discovered_catalog): schemas_errors = [] for stream_name, stream in discovered_catalog.items(): check_result = find_keyword_schema(stream.json_schema, key=keyword) if check_result: schemas_errors
SAT: check for not allowed keywords `allOf`, `not` in connectors schema (#9851) * test_defined_keyword_exist_in_schema added Signed-off-by: Sergey Chvalyuk <grubberr@gmail.com>
test_defined_keyword_exist_in_schema
c8ee3f834120aa365acaa90b5eb583ac52c476ca
airbyte
test_core.py
11
7
https://github.com/airbytehq/airbyte.git
3
52
0
28
91
Python
{ "docstring": "Checking for the presence of not allowed keywords within each json schema", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_defined_keyword_exist_in_schema(self, keyword, discovered_catalog): schemas_errors = [] for stream_name, stream in discovered_catalog.items(): check_result = find_keyword_schema(stream.json_schema, key=keyword) if check_result: schemas_errors.append(stream_name) assert not schemas_errors, f"Found not allowed `{keyword}` keyword for selected streams: {schemas_errors}."
50,851
204,719
123
django/core/management/utils.py
39
15
def normalize_path_patterns(patterns): patterns = [os.path.normcase(p) for p in patterns] dir_suffixes = {"%s*" % path_sep for path_sep in {"/", os.sep}} norm_patterns = [] for pattern in patterns: for dir_suffix in dir_suffixes: if pattern.endswith(dir_suffix): norm_patterns.append(pattern[: -len(dir_suffix)]) break else: norm_patt
Refs #33476 -- Reformatted code with Black.
normalize_path_patterns
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
utils.py
18
12
https://github.com/django/django.git
6
86
0
29
141
Python
{ "docstring": "Normalize an iterable of glob style patterns based on OS.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def normalize_path_patterns(patterns): patterns = [os.path.normcase(p) for p in patterns] dir_suffixes = {"%s*" % path_sep for path_sep in {"/", os.sep}} norm_patterns = [] for pattern in patterns: for dir_suffix in dir_suffixes: if pattern.endswith(dir_suffix): norm_patterns.append(pattern[: -len(dir_suffix)]) break else: norm_patterns.append(pattern) return norm_patterns
186
1,383
20
packages/syft/src/syft/core/node/common/node_service/success_resp_message.py
6
3
def get_protobuf_schema() -> GeneratedProtocolMessageType: return ErrorResponseMessage_PB
ADD new Syft ErrorResponseMessage
get_protobuf_schema
14892f3e25065f85fcca953eac681f50880c0c48
PySyft
success_resp_message.py
6
14
https://github.com/OpenMined/PySyft.git
1
9
0
6
18
Python
{ "docstring": "Return the type of protobuf object which stores a class of this type\n As a part of serialization and deserialization, we need the ability to\n lookup the protobuf object type directly from the object type. This\n static method allows us to do this.\n Importantly, this method is also used to create the reverse lookup ability within\n the metaclass of Serializable. In the metaclass, it calls this method and then\n it takes whatever type is returned from this method and adds an attribute to it\n with the type of this class attached to it. See the MetaSerializable class for\n details.\n :return: the type of protobuf object which corresponds to this class.\n :rtype: GeneratedProtocolMessageType\n ", "language": "en", "n_whitespaces": 189, "n_words": 112, "vocab_size": 63 }
def get_protobuf_schema() -> GeneratedProtocolMessageType: return ErrorResponseMessage_PB
72,295
248,467
287
tests/rest/media/test_media_retention.py
59
17
def test_remote_media_cache_retention(self) -> None: # Advance 31 days (in seconds) self.reactor.advance(31 * 24 * 60 * 60) # Check that media has been correctly purged. # Local media should be unaffected. # Remote media accessed <30 days ago should still exist. self._assert_if_mxc_uris_purged( purged=[ (self.remote_server_name, self.remote_not_recently_accessed_media),
Add config options for media retention (#12732)
test_remote_media_cache_retention
2fc787c341ff540e5880932f116498ec0ed7a2c2
synapse
test_media_retention.py
14
20
https://github.com/matrix-org/synapse.git
1
106
0
47
158
Python
{ "docstring": "\n Tests that entries from the remote media cache that have not been accessed\n recently is purged, while local media is unaffected.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 18 }
def test_remote_media_cache_retention(self) -> None: # Advance 31 days (in seconds) self.reactor.advance(31 * 24 * 60 * 60) # Check that media has been correctly purged. # Local media should be unaffected. # Remote media accessed <30 days ago should still exist. self._assert_if_mxc_uris_purged( purged=[ (self.remote_server_name, self.remote_not_recently_accessed_media), ], not_purged=[ (self.remote_server_name, self.remote_recently_accessed_media), (self.hs.config.server.server_name, self.local_recently_accessed_media), ( self.hs.config.server.server_name, self.local_not_recently_accessed_media, ), (self.hs.config.server.server_name, self.local_never_accessed_media), ], )
52,781
209,791
232
scapy/arch/windows/__init__.py
54
11
def setmodulation(self, modu): # type: (int) -> bool # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501 self._check_npcap_requirement() _modus = { 0: "dsss", 1: "fhss", 2: "irbaseband", 3: "ofdm", 4: "hrdss", 5: "erp",
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <pierre@droids-corp.org>
setmodulation
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
scapy
__init__.py
10
17
https://github.com/secdev/scapy.git
2
92
0
50
159
Python
{ "docstring": "Set the interface modulation. It can be:\n - 0: dsss\n - 1: fhss\n - 2: irbaseband\n - 3: ofdm\n - 4: hrdss\n - 5: erp\n - 6: ht\n - 7: vht\n - 8: ihv\n - 9: mimo-ofdm\n - 10: mimo-ofdm\n - the value directly\n Only available with Npcap.", "language": "en", "n_whitespaces": 174, "n_words": 48, "vocab_size": 35 }
def setmodulation(self, modu): # type: (int) -> bool # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501 self._check_npcap_requirement() _modus = { 0: "dsss", 1: "fhss", 2: "irbaseband", 3: "ofdm", 4: "hrdss", 5: "erp", 6: "ht", 7: "vht", 8: "ihv", 9: "mimo-ofdm", 10: "mimo-ofdm", } m = _modus.get(modu, "unknown") if isinstance(modu, int) else modu return self._npcap_set("modu", str(m))
53,959
215,415
22
salt/transport/rabbitmq.py
8
7
def _on_connection_error(self, connection, exception): log.error("Failed to connect", exc_info=True)
Start to add base class defs
_on_connection_error
ab4803984bce4a4de7cc10910e7310c4babf557e
salt
rabbitmq.py
8
2
https://github.com/saltstack/salt.git
1
20
0
8
34
Python
{ "docstring": "\n Invoked by pika when connection on connection error\n :param connection:\n :param exception:\n :return:\n ", "language": "en", "n_whitespaces": 49, "n_words": 13, "vocab_size": 11 }
def _on_connection_error(self, connection, exception): log.error("Failed to connect", exc_info=True)
40,202
168,095
38
pandas/core/indexes/multi.py
17
4
def is_monotonic_decreasing(self) -> bool:
DOC: Add numpydoc SS06 validation (#47885)
is_monotonic_decreasing
62a69beddbedde349891378992c902c0b9341a9f
pandas
multi.py
9
5
https://github.com/pandas-dev/pandas.git
1
17
0
15
32
Python
{ "docstring": "\n Return a boolean if the values are equal or decreasing.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def is_monotonic_decreasing(self) -> bool: # monotonic decreasing if and only if reverse is monotonic increasing return self[::-1].is_monotonic_increasing
19,669
99,591
201
tests/sentry/notifications/test_notifications.py
70
28
def test_sends_assignment_notification(self): url = f"/api/0/issues/{self.group.id}/" with self.tasks(): response = self.client.put(url, format="json", data={"assignedTo": self.user.username}) assert response.status_code == 200, response.content msg = mail.outbox[0] # check the txt version assert f"assigned {self.s
fix(notifications): Use `metrics_key` (#34572)
test_sends_assignment_notification
1730c481f1a8a71446326fa1ff72e10663016385
sentry
test_notifications.py
15
15
https://github.com/getsentry/sentry.git
1
114
0
50
230
Python
{ "docstring": "\n Test that an email AND Slack notification are sent with\n the expected values when an issue is assigned.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
def test_sends_assignment_notification(self): url = f"/api/0/issues/{self.group.id}/" with self.tasks(): response = self.client.put(url, format="json", data={"assignedTo": self.user.username}) assert response.status_code == 200, response.content msg = mail.outbox[0] # check the txt version assert f"assigned {self.short_id} to themselves" in msg.body # check the html version assert f"{self.short_id}</a> to themselves</p>" in msg.alternatives[0][0] attachment, text = get_attachment() assert text == f"Issue assigned to {self.name} by themselves" assert attachment["title"] == self.group.title assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=assigned_activity-slack-user|Notification Settings>" )
50,164
202,903
473
django/db/migrations/questioner.py
119
18
def _ask_default(self, default=''): self.prompt_output.write('Please enter the default value as valid Python.') if default: self.prompt_output.write( f"Accept the default '{default}' by pressing 'Enter' or " f"provide another value." ) self.prompt_output.write( 'The datetime and django.utils.timezone modules are available, so ' 'it is possible to provide e.g. timezone.now as a valu
Refs #29026 -- Allowed customizing InteractiveMigrationQuestioner's prompt destination. Previously, the questioner did not obey the value of stdout provided to the command.
_ask_default
0ab58c120939093fea90822f376e1866fc714d1f
django
questioner.py
17
30
https://github.com/django/django.git
9
158
0
91
288
Python
{ "docstring": "\n Prompt for a default value.\n\n The ``default`` argument allows providing a custom default value (as a\n string) which will be shown to the user and used as the return value\n if the user doesn't provide any other input.\n ", "language": "en", "n_whitespaces": 74, "n_words": 38, "vocab_size": 31 }
def _ask_default(self, default=''): self.prompt_output.write('Please enter the default value as valid Python.') if default: self.prompt_output.write( f"Accept the default '{default}' by pressing 'Enter' or " f"provide another value." ) self.prompt_output.write( 'The datetime and django.utils.timezone modules are available, so ' 'it is possible to provide e.g. timezone.now as a value.' ) self.prompt_output.write("Type 'exit' to exit this prompt") while True: if default: prompt = "[default: {}] >>> ".format(default) else: prompt = ">>> " self.prompt_output.write(prompt, ending='') code = input() if not code and default: code = default if not code: self.prompt_output.write("Please enter some code, or 'exit' (without quotes) to exit.") elif code == "exit": sys.exit(1) else: try: return eval(code, {}, {'datetime': datetime, 'timezone': timezone}) except (SyntaxError, NameError) as e: self.prompt_output.write('Invalid input: %s' % e)
45,596
186,689
37
certbot-apache/certbot_apache/_internal/parser.py
9
5
def reset_modules(self) -> None: self.modules = {} self.update_modules
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <ferrand.ad@gmail.com>
reset_modules
7d9e9a49005de7961e84d2a7c608db57dbab3046
certbot
parser.py
7
6
https://github.com/certbot/certbot.git
1
24
0
9
44
Python
{ "docstring": "Reset the loaded modules list. This is called from cleanup to clear\n temporarily loaded modules.", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 14 }
def reset_modules(self) -> None: self.modules = {} self.update_modules() self.parse_modules()
30,112
133,800
259
rllib/agents/ppo/tests/test_ddppo.py
56
28
def test_ddppo_compilation(self):
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
test_ddppo_compilation
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
test_ddppo.py
15
15
https://github.com/ray-project/ray.git
4
121
0
45
202
Python
{ "docstring": "Test whether a DDPPOTrainer can be built with both frameworks.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_ddppo_compilation(self): config = ppo.ddppo.DEFAULT_CONFIG.copy() config["num_gpus_per_worker"] = 0 num_iterations = 2 for _ in framework_iterator(config, frameworks="torch"): trainer = ppo.ddppo.DDPPOTrainer(config=config, env="CartPole-v0") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) # Make sure, weights on all workers are the same (including # local one). weights = trainer.workers.foreach_worker(lambda w: w.get_weights()) for w in weights[1:]: check(w, weights[0]) check_compute_single_action(trainer) trainer.stop()
@keras_export('keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support
79,731
268,862
56
keras/losses.py
30
19
def log_cosh(y_true, y_pred): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) def _logcosh(x): return x + tf.math.softplus(-
Fix keras docstrings PiperOrigin-RevId: 424275818
log_cosh
89879e2c76e86c685e44c47a6cdb82f7e645c142
keras
losses.py
13
5
https://github.com/keras-team/keras.git
1
47
1
26
157
Python
{ "docstring": "Logarithm of the hyperbolic cosine of the prediction error.\n\n `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and\n to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly\n like the mean squared error, but will not be so strongly affected by the\n occasional wildly incorrect prediction.\n\n Standalone usage:\n\n >>> y_true = np.random.random(size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.logcosh(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> x = y_pred - y_true\n >>> assert np.allclose(\n ... loss.numpy(),\n ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.), axis=-1),\n ... atol=1e-5)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.\n ", "language": "en", "n_whitespaces": 169, "n_words": 131, "vocab_size": 93 }
def log_cosh(y_true, y_pred): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) def _logcosh(x): return x + tf.math.softplus(-2. * x) - tf.cast( tf.math.log(2.), x.dtype) return backend.mean(_logcosh(y_pred - y_true), axis=-1) @keras_export('keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support
80,821
271,597
38
keras/engine/training.py
13
4
def make_predict_function(self, force=False): if self.predict_function is not None and not force: return self.predict_function
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
make_predict_function
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training.py
8
17
https://github.com/keras-team/keras.git
6
83
0
11
41
Python
{ "docstring": "Creates a function that executes one step of inference.\n\n This method can be overridden to support custom inference logic.\n This method is called by `Model.predict` and `Model.predict_on_batch`.\n\n Typically, this method directly controls `tf.function` and\n `tf.distribute.Strategy` settings, and delegates the actual evaluation\n logic to `Model.predict_step`.\n\n This function is cached the first time `Model.predict` or\n `Model.predict_on_batch` is called. The cache is cleared whenever\n `Model.compile` is called. You can skip the cache and generate again the\n function with `force=True`.\n\n Args:\n force: Whether to regenerate the predict function and skip the cached\n function if available.\n\n Returns:\n Function. The function created by this method should accept a\n `tf.data.Iterator`, and return the outputs of the `Model`.\n ", "language": "en", "n_whitespaces": 232, "n_words": 110, "vocab_size": 71 }
def make_predict_function(self, force=False): if self.predict_function is not None and not force: return self.predict_function
76,436
260,715
31
sklearn/feature_selection/_rfe.py
10
7
def fit(self, X, y, **fit_params):
MAINT Add parameter validation to RFE and RFECV. (#24137) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
fit
c9d4e1f86e6d8c58441b1aa01d0a79f25cf3a999
scikit-learn
_rfe.py
8
3
https://github.com/scikit-learn/scikit-learn.git
1
30
0
9
47
Python
{ "docstring": "Fit the RFE model and then the underlying estimator on the selected features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,)\n The target values.\n\n **fit_params : dict\n Additional parameters passed to the `fit` method of the underlying\n estimator.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "language": "en", "n_whitespaces": 176, "n_words": 58, "vocab_size": 45 }
def fit(self, X, y, **fit_params): self._validate_params() return self._fit(X, y, **fit_params)
11,321
55,468
52
tests/cli/test_storage.py
31
8
def test_invalid_number_selection_fails(): number_string = "99999999" result = get_first_menu_and_fail(number_string) lines = result.stdout.splitlines() # Strange string addition are due to coloring, I believe assert lines[-1] == f"\x1b[31mInvalid selection {number_string}\x1b[0m" assert result.exit_code == 1
basic tests for storage cli
test_invalid_number_selection_fails
21b8eed6887646c8c2a752961a84c855dd4fed22
prefect
test_storage.py
9
6
https://github.com/PrefectHQ/prefect.git
1
38
0
27
74
Python
{ "docstring": "\n We need to make sure that if we give an invalid number that the CLI\n will exit.\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 16 }
def test_invalid_number_selection_fails(): number_string = "99999999" result = get_first_menu_and_fail(number_string) lines = result.stdout.splitlines() # Strange string addition are due to coloring, I believe assert lines[-1] == f"\x1b[31mInvalid selection {number_string}\x1b[0m" assert result.exit_code == 1
48,569
197,470
16
sympy/utilities/misc.py
10
9
def filldedent(s, w=70, **kwargs): return '\n' + fill(de
Pass keyword arguments to filldedent() through to fill()
filldedent
2047f4855577845b1b99e0926b887d313725a6e7
sympy
misc.py
14
2
https://github.com/sympy/sympy.git
1
38
0
10
66
Python
{ "docstring": "\n Strips leading and trailing empty lines from a copy of ``s``, then dedents,\n fills and returns it.\n\n Empty line stripping serves to deal with docstrings like this one that\n start with a newline after the initial triple quote, inserting an empty\n line at the beginning of the string.\n\n Additional keyword arguments will be passed to ``textwrap.fill()``.\n\n See Also\n ========\n strlines, rawlines\n\n ", "language": "en", "n_whitespaces": 92, "n_words": 61, "vocab_size": 52 }
def filldedent(s, w=70, **kwargs): return '\n' + fill(dedent(str(s)).strip('\n'), width=w, **kwargs)
38,732
160,788
1,357
numpy/lib/arraysetops.py
528
56
def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Convert booleans to uint8 so we can use the fast integer algorithm if ar1.dtype == bool: ar1 = ar1.view(np.uint8) if ar2.dtype == bool: ar2 = ar2.view(np.uint8) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if kind not in {None, 'sort', 'table'}: raise ValueError( "Invalid kind: {0}. ".format(kind) + "Please use None, 'sort' or 'table'.") if integer_arrays and kind in {None, 'table'}: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_range = int(ar2_max) - int(ar2_min) # Constraints on whether we can actually use the table method: range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. # However, here we set the requirement that by default # the intermediate array can only be 6x # the combined memory allocation of the original # arrays. See discussion on # https://github.com/numpy/numpy/pull/12065. if ( range_safe_from_overflow and (below_memory_constraint or kind == 'table') ): if invert: outgoing_array = np.ones_like(ar1, dtype=bool) else: outgoing_array = np.zeros_like(ar1, dtype=bool) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array elif kind == 'table': # not range_safe_from_overflow raise RuntimeError( "You have specified kind='table', " "but the range of values in `ar2` exceeds the " "maximum integer of the datatype. " "Please set `kind` to None or 'sort'." ) elif kind == 'table': raise ValueError( "The 'table' method is only " "supported for boolean or integer arrays. " "Please select 'sort' or None for kind." ) # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(
MAINT: change kind names for in1d - Switch dictionary->table, mergesort->sort
in1d
4ed458f16d9dd64554ccf49e315c5b8fb577d4cd
numpy
arraysetops.py
16
80
https://github.com/numpy/numpy.git
24
598
0
272
978
Python
{ "docstring": "\n Test whether each element of a 1-D array is also present in a second array.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n We recommend using :func:`isin` instead of `in1d` for new code.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n kind : {None, 'sort', 'table'}, optional\n The algorithm to use. This will not affect the final result,\n but will affect the speed. Default will select automatically\n based on memory considerations.\n\n * If 'sort', will use a mergesort-based approach. This will have\n a memory usage of roughly 6 times the sum of the sizes of\n `ar1` and `ar2`, not accounting for size of dtypes.\n * If 'table', will use a key-dictionary approach similar\n to a counting sort. This is only available for boolean and\n integer arrays. This will have a memory usage of the\n size of `ar1` plus the max-min value of `ar2`. This tends\n to be the faster method if the following formula is true:\n ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,\n but may use greater memory.\n * If `None`, will automatically choose 'table' if\n the required memory allocation is less than or equal to\n 6 times the sum of the sizes of `ar1` and `ar2`,\n otherwise will use 'sort'. This is done to not use\n a large amount of memory by default, even though\n 'table' may be faster in most cases.\n\n .. versionadded:: 1.8.0\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n numpy.lib.arraysetops : Module with a number of other functions for\n performing set operations on arrays.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n .. versionadded:: 1.4.0\n\n Examples\n --------\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n ", "language": "en", "n_whitespaces": 921, "n_words": 485, "vocab_size": 257 }
def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Convert booleans to uint8 so we can use the fast integer algorithm if ar1.dtype == bool: ar1 = ar1.view(np.uint8) if ar2.dtype == bool: ar2 = ar2.view(np.uint8) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if kind not in {None, 'sort', 'table'}: raise ValueError( "Invalid kind: {0}. ".format(kind) + "Please use None, 'sort' or 'table'.") if integer_arrays and kind in {None, 'table'}: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_range = int(ar2_max) - int(ar2_min) # Constraints on whether we can actually use the table method: range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. # However, here we set the requirement that by default # the intermediate array can only be 6x # the combined memory allocation of the original # arrays. See discussion on # https://github.com/numpy/numpy/pull/12065. if ( range_safe_from_overflow and (below_memory_constraint or kind == 'table') ): if invert: outgoing_array = np.ones_like(ar1, dtype=bool) else: outgoing_array = np.zeros_like(ar1, dtype=bool) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array elif kind == 'table': # not range_safe_from_overflow raise RuntimeError( "You have specified kind='table', " "but the range of values in `ar2` exceeds the " "maximum integer of the datatype. " "Please set `kind` to None or 'sort'." ) elif kind == 'table': raise ValueError( "The 'table' method is only " "supported for boolean or integer arrays. " "Please select 'sort' or None for kind." ) # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx]
71,096
246,202
124
tests/rest/admin/test_user.py
50
14
def test_devices(self) -> None: # Login in as the user self._get_token() # Check that we don't see a new device in our devices list channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # We should only see the one device (from the login in `prepare`) self.assertEqual(len(
Add type hints to `tests/rest/admin` (#11851)
test_devices
901b264c0c88f39cbfb8b2229e0dc57968882658
synapse
test_user.py
11
8
https://github.com/matrix-org/synapse.git
1
66
0
42
111
Python
{ "docstring": "Tests that logging in as a user doesn't create a new device for them.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
def test_devices(self) -> None: # Login in as the user self._get_token() # Check that we don't see a new device in our devices list channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # We should only see the one device (from the login in `prepare`) self.assertEqual(len(channel.json_body["devices"]), 1)
2,845
14,346
51
pydantic/_internal/_validation_functions.py
17
4
def in_ipython() -> bool: try: eval('__IPYTHON__') except NameError:
Switching to `pydantic_core` (#4516) * working on core schema generation * adapting main.py * getting tests to run * fix tests * disable pyright, fix mypy * moving to class-based model generation * working on validators * change how models are created * start fixing test_main.py * fixing mypy * SelfType * recursive models working, more tests fixed * fix tests on <3.10 * get docs build to pass * starting to cleanup types.py * starting works on custom types * working on using annotated-types * using annoated types for constraints * lots of cleanup, fixing network tests * network tests passing :tada: * working on types * working on types and cleanup * fixing UUID type, restructing again * more types and newer pydantic-core * working on Iterable * more test_types tests * support newer pydantic-core, fixing more test_types.py * working through more test_types.py * test_types.py at last passing locally :tada: * fixing more tests in test_types.py * fix datetime_parse tests and linting * get tests running again, rename to test_datetime.py * renaming internal modules * working through mypy errors * fixing mypy * refactoring _generate_schema.py * test_main.py passing * uprev deps * fix conftest and linting? * importing Annotated * ltining * import Annotated from typing_extensions * fixing 3.7 compatibility * fixing tests on 3.9 * fix linting * fixing SecretField and 3.9 tests * customising get_type_hints * ignore warnings on 3.11 * spliting repr out of utils * removing unused bits of _repr, fix tests for 3.7 * more cleanup, removing many type aliases * clean up repr * support namedtuples and typeddicts * test is_union * removing errors, uprev pydantic-core * fix tests on 3.8 * fixing private attributes and model_post_init * renaming and cleanup * remove unnecessary PydanticMetadata inheritance * fixing forward refs and mypy tests * fix signatures, change how xfail works * revert mypy tests to 3.7 syntax * correct model title * try to fix tests * fixing ClassVar forward refs * uprev pydantic-core, new error format * add "force" argument to model_rebuild * Apply suggestions from code review Suggestions from @tiangolo and @hramezani :pray: Co-authored-by: Hasan Ramezani <hasan.r67@gmail.com> Co-authored-by: Sebastián Ramírez <tiangolo@gmail.com> * more suggestions from @tiangolo * extra -> json_schema_extra on Field Co-authored-by: Hasan Ramezani <hasan.r67@gmail.com> Co-authored-by: Sebastián Ramírez <tiangolo@gmail.com>
in_ipython
594effa279668bd955e98f1cd5c036b37d3bbd40
pydantic
_validation_functions.py
10
10
https://github.com/pydantic/pydantic.git
2
22
0
16
44
Python
{ "docstring": "\n Check whether we're in an ipython environment, including jupyter notebooks.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
def in_ipython() -> bool: try: eval('__IPYTHON__') except NameError: return False else: # pragma: no cover return True
76,072
260,097
51
sklearn/utils/tests/test_param_validation.py
28
8
def test_stroptions_deprecated_internal_subset():
MNT Param validation: do not expose internal values in error msg (#23459) * allow to not expose internal valid params in error msg * ensure deprecated and internal do not overlap * deprecated and internal must be subsets of options * black
test_stroptions_deprecated_internal_subset
122876e9ab1ab494b4bf0ca3360d5a1527caf2e7
scikit-learn
test_param_validation.py
12
5
https://github.com/scikit-learn/scikit-learn.git
1
65
0
17
124
Python
{ "docstring": "Check that the deprecated and internal parameters must be subsets of options.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_stroptions_deprecated_internal_subset(): with pytest.raises(ValueError, match="deprecated options must be a subset"): StrOptions({"a", "b", "c"}, deprecated={"a", "d"}) with pytest.raises(ValueError, match="internal options must be a subset"): StrOptions({"a", "b", "c"}, internal={"a", "d"})
52,110
207,805
114
tests/admin_views/tests.py
29
15
def test_change_view_without_object_change_permission(self): change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,)) self.client.force_login(self.viewuser) response = self.client.get(change_url) self.assertEqual(response.context["title"], "View article") self.assertContains(response, "<title>View article | Django site admin</title>") self.assertContains(response, "<h1>View article</h1>") self.assertContains( response, '<a href="/test_admin/admin9/admin_views/article/" class="closelink">Close</a>', )
Refs #33476 -- Reformatted code with Black.
test_change_view_without_object_change_permission
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
12
11
https://github.com/django/django.git
1
81
0
27
138
Python
{ "docstring": "\n The object should be read-only if the user has permission to view it\n and change objects of that type but not to change the current object.\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 23 }
def test_change_view_without_object_change_permission(self): change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,)) self.client.force_login(self.viewuser) response = self.client.get(change_url) self.assertEqual(response.context["title"], "View article") self.assertContains(response, "<title>View article | Django site admin</title>") self.assertContains(response, "<h1>View article</h1>") self.assertContains( response, '<a href="/test_admin/admin9/admin_views/article/" class="closelink">Close</a>', )
39,358
163,015
1,142
pandas/__init__.py
355
25
def __getattr__(name): import warnings if name in __deprecated_num_index_names: warnings.warn( f"pandas.{name} is deprecated " "and will be removed from pandas in a future version. " "Use pandas.Index with the appropriate dtype instead.", FutureWarning, stacklevel=2, ) from pandas.core.api import Float64Index, Int64Index, UInt64Index return { "Float64Index": Float64Index, "Int64Index": Int64Index, "UInt64Index": UInt64Index, }[name] elif name == "datetime": warnings.warn( "The pandas.datetime class is deprecated " "and will be removed from pandas in a future version. " "Import from datetime module instead.", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == "np": warnings.warn( "The pandas.np module is deprecated " "and will be removed from pandas in a future version. " "Import numpy directly instead.", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {"SparseSeries", "SparseDataFrame"}: warnings.warn( f"The {name} class is removed from pandas. Accessing it from " "the top-level namespace will also be removed in the next version.", FutureWarning, stacklevel=2, ) retur
API: hide NumericIndex from public top-level namespace in favor of pd.Index (#44819)
__getattr__
abd74363b3f911cbba79cfd4d78fb17521b39928
pandas
__init__.py
13
55
https://github.com/pandas-dev/pandas.git
6
187
0
229
793
Python
{ "docstring": "\npandas - a powerful data analysis and manipulation library for Python\n=====================================================================\n\n**pandas** is a Python package providing fast, flexible, and expressive data\nstructures designed to make working with \"relational\" or \"labeled\" data both\neasy and intuitive. It aims to be the fundamental high-level building block for\ndoing practical, **real world** data analysis in Python. Additionally, it has\nthe broader goal of becoming **the most powerful and flexible open source data\nanalysis / manipulation tool available in any language**. It is already well on\nits way toward this goal.\n\nMain Features\n-------------\nHere are just a few of the things that pandas does well:\n\n - Easy handling of missing data in floating point as well as non-floating\n point data.\n - Size mutability: columns can be inserted and deleted from DataFrame and\n higher dimensional objects\n - Automatic and explicit data alignment: objects can be explicitly aligned\n to a set of labels, or the user can simply ignore the labels and let\n `Series`, `DataFrame`, etc. automatically align the data for you in\n computations.\n - Powerful, flexible group by functionality to perform split-apply-combine\n operations on data sets, for both aggregating and transforming data.\n - Make it easy to convert ragged, differently-indexed data in other Python\n and NumPy data structures into DataFrame objects.\n - Intelligent label-based slicing, fancy indexing, and subsetting of large\n data sets.\n - Intuitive merging and joining data sets.\n - Flexible reshaping and pivoting of data sets.\n - Hierarchical labeling of axes (possible to have multiple labels per tick).\n - Robust IO tools for loading data from flat files (CSV and delimited),\n Excel files, databases, and saving/loading data from the ultrafast HDF5\n format.\n - Time series-specific functionality: date range generation and frequency\n conversion, moving window statistics, date shifting and lagging.\n", "language": "en", "n_whitespaces": 321, "n_words": 289, "vocab_size": 187 }
def __getattr__(name): import warnings if name in __deprecated_num_index_names: warnings.warn( f"pandas.{name} is deprecated " "and will be removed from pandas in a future version. " "Use pandas.Index with the appropriate dtype instead.", FutureWarning, stacklevel=2, ) from pandas.core.api import Float64Index, Int64Index, UInt64Index return { "Float64Index": Float64Index, "Int64Index": Int64Index, "UInt64Index": UInt64Index, }[name] elif name == "datetime": warnings.warn( "The pandas.datetime class is deprecated " "and will be removed from pandas in a future version. " "Import from datetime module instead.", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == "np": warnings.warn( "The pandas.np module is deprecated " "and will be removed from pandas in a future version. " "Import numpy directly instead.", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {"SparseSeries", "SparseDataFrame"}: warnings.warn( f"The {name} class is removed from pandas. Accessing it from " "the top-level namespace will also be removed in the next version.", FutureWarning, stacklevel=2, ) return type(name, (), {}) elif name == "SparseArray": warnings.warn( "The pandas.SparseArray class is deprecated " "and will be removed from pandas in a future version. " "Use pandas.arrays.SparseArray instead.", FutureWarning, stacklevel=2, ) from pandas.core.arrays.sparse import SparseArray as _SparseArray return _SparseArray raise AttributeError(f"module 'pandas' has no attribute '{name}'") # module level doc-string __doc__ = # Use __all__ to let type checkers know what is part of the public API. # Pandas is not (yet) a py.typed library: the public API is determined # based on the documentation. __all__ = [ "BooleanDtype", "Categorical", "CategoricalDtype", "CategoricalIndex", "DataFrame", "DateOffset", "DatetimeIndex", "DatetimeTZDtype", "ExcelFile", "ExcelWriter", "Flags", "Float32Dtype", "Float64Dtype", "Grouper", "HDFStore", "Index", "IndexSlice", "Int16Dtype", "Int32Dtype", "Int64Dtype", "Int8Dtype", "Interval", "IntervalDtype", "IntervalIndex", "MultiIndex", "NA", "NaT", "NamedAgg", "Period", "PeriodDtype", "PeriodIndex", "RangeIndex", "Series", "SparseDtype", "StringDtype", "Timedelta", "TimedeltaIndex", "Timestamp", "UInt16Dtype", "UInt32Dtype", "UInt64Dtype", "UInt8Dtype", "api", "array", "arrays", "bdate_range", "concat", "crosstab", "cut", "date_range", "describe_option", "errors", "eval", "factorize", "get_dummies", "get_option", "infer_freq", "interval_range", "io", "isna", "isnull", "json_normalize", "lreshape", "melt", "merge", "merge_asof", "merge_ordered", "notna", "notnull", "offsets", "option_context", "options", "period_range", "pivot", "pivot_table", "plotting", "qcut", "read_clipboard", "read_csv", "read_excel", "read_feather", "read_fwf", "read_gbq", "read_hdf", "read_html", "read_json", "read_orc", "read_parquet", "read_pickle", "read_sas", "read_spss", "read_sql", "read_sql_query", "read_sql_table", "read_stata", "read_table", "read_xml", "reset_option", "set_eng_float_format", "set_option", "show_versions", "test", "testing", "timedelta_range", "to_datetime", "to_numeric", "to_pickle", "to_timedelta", "tseries", "unique", "value_counts", "wide_to_long", ]
54,169
215,777
79
salt/modules/file.py
33
11
def readlink(path, canonicalize=False): path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError("Path to link must be absolute.") if not os.path.islink(path): raise SaltInvocationError("A valid link was not specified
Use salt.utils.path.readlink to handle junctions on Windows
readlink
6680407756aac9ee0eaf150f3a69622b658f7321
salt
file.py
12
10
https://github.com/saltstack/salt.git
4
77
0
26
130
Python
{ "docstring": "\n .. versionadded:: 2014.1.0\n\n Return the path that a symlink points to\n If canonicalize is set to True, then it return the final target\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' file.readlink /path/to/link\n ", "language": "en", "n_whitespaces": 58, "n_words": 32, "vocab_size": 29 }
def readlink(path, canonicalize=False): path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError("Path to link must be absolute.") if not os.path.islink(path): raise SaltInvocationError("A valid link was not specified.") if canonicalize: return os.path.realpath(path) else: return salt.utils.path.readlink(path)
@contextlib.contextmanager
48,128
196,714
18
sympy/testing/pytest.py
6
5
def warns_deprecated_sympy(): with warns(SymPyDeprecationWarning): yield @contextlib.contextmanager
Note that warns_deprecated_sympy should not be used outside of the test suite
warns_deprecated_sympy
fdaacf84861404f7857081bc85850a781a78979f
sympy
pytest.py
9
3
https://github.com/sympy/sympy.git
1
12
1
6
35
Python
{ "docstring": "\n Shorthand for ``warns(SymPyDeprecationWarning)``\n\n This is the recommended way to test that ``SymPyDeprecationWarning`` is\n emitted for deprecated features in SymPy. To test for other warnings use\n ``warns``. To suppress warnings without asserting that they are emitted\n use ``ignore_warnings``.\n\n .. note::\n\n ``warns_deprecated_sympy()`` is only intended for internal use in the\n SymPy test suite to test that a deprecation warning triggers properly.\n All other code in the SymPy codebase, including documentation examples,\n should not use deprecated behavior.\n\n If you are a user of SymPy and you want to disable\n SymPyDeprecationWarnings, use ``warnings`` filters (see\n :ref:`silencing-sympy-deprecation-warnings`).\n\n >>> from sympy.testing.pytest import warns_deprecated_sympy\n >>> from sympy.utilities.exceptions import SymPyDeprecationWarning\n >>> with warns_deprecated_sympy():\n ... SymPyDeprecationWarning(\"Don't use\", feature=\"old thing\",\n ... deprecated_since_version=\"1.0\", issue=123).warn()\n\n >>> with warns_deprecated_sympy():\n ... pass\n Traceback (most recent call last):\n ...\n Failed: DID NOT WARN. No warnings of type \\\n SymPyDeprecationWarning was emitted. The list of emitted warnings is: [].\n\n ", "language": "en", "n_whitespaces": 256, "n_words": 143, "vocab_size": 97 }
def warns_deprecated_sympy(): with warns(SymPyDeprecationWarning): yield @contextlib.contextmanager
38,751
160,832
83
numpy/testing/_private/utils.py
25
11
def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False): __tracebackhide__ =
ENH: Add strict parameter to assert_array_equal. (#21595) Fixes #9542 Co-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com>
assert_array_equal
cafec60a5e28af98fb8798049edd7942720d2d74
numpy
utils.py
9
5
https://github.com/numpy/numpy.git
1
51
0
24
77
Python
{ "docstring": "\n Raises an AssertionError if two array_like objects are not equal.\n\n Given two array_like objects, check that the shape is equal and all\n elements of these objects are equal (but see the Notes for the special\n handling of a scalar). An exception is raised at shape mismatch or\n conflicting values. In contrast to the standard usage in numpy, NaNs\n are compared like numbers, no assertion is raised if both objects have\n NaNs in the same positions.\n\n The usual caution for verifying equality with floating point numbers is\n advised.\n\n Parameters\n ----------\n x : array_like\n The actual object to check.\n y : array_like\n The desired, expected object.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n strict : bool, optional\n If True, raise an AssertionError when either the shape or the data\n type of the array_like objects does not match. The special\n handling for scalars mentioned in the Notes section is disabled.\n\n Raises\n ------\n AssertionError\n If actual and desired objects are not equal.\n\n See Also\n --------\n assert_allclose: Compare two array_like objects for equality with desired\n relative and/or absolute precision.\n assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal\n\n Notes\n -----\n When one of `x` and `y` is a scalar and the other is array_like, the\n function checks that each element of the array_like object is equal to\n the scalar. This behaviour can be disabled with the `strict` parameter.\n\n Examples\n --------\n The first assert does not raise an exception:\n\n >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],\n ... [np.exp(0),2.33333, np.nan])\n\n Assert fails with numerical imprecision with floats:\n\n >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],\n ... [1, np.sqrt(np.pi)**2, np.nan])\n Traceback (most recent call last):\n ...\n AssertionError:\n Arrays are not equal\n <BLANKLINE>\n Mismatched elements: 1 / 3 (33.3%)\n Max absolute difference: 4.4408921e-16\n Max relative difference: 1.41357986e-16\n x: array([1. , 3.141593, nan])\n y: array([1. , 3.141593, nan])\n\n Use `assert_allclose` or one of the nulp (number of floating point values)\n functions for these cases instead:\n\n >>> np.testing.assert_allclose([1.0,np.pi,np.nan],\n ... [1, np.sqrt(np.pi)**2, np.nan],\n ... rtol=1e-10, atol=0)\n\n As mentioned in the Notes section, `assert_array_equal` has special\n handling for scalars. Here the test checks that each value in `x` is 3:\n\n >>> x = np.full((2, 5), fill_value=3)\n >>> np.testing.assert_array_equal(x, 3)\n\n Use `strict` to raise an AssertionError when comparing a scalar with an\n array:\n\n >>> np.testing.assert_array_equal(x, 3, strict=True)\n Traceback (most recent call last):\n ...\n AssertionError:\n Arrays are not equal\n <BLANKLINE>\n (shapes (2, 5), () mismatch)\n x: array([[3, 3, 3, 3, 3],\n [3, 3, 3, 3, 3]])\n y: array(3)\n\n The `strict` parameter also ensures that the array data types match:\n\n >>> x = np.array([2, 2, 2])\n >>> y = np.array([2., 2., 2.], dtype=np.float32)\n >>> np.testing.assert_array_equal(x, y, strict=True)\n Traceback (most recent call last):\n ...\n AssertionError:\n Arrays are not equal\n <BLANKLINE>\n (dtypes int64, float32 mismatch)\n x: array([2, 2, 2])\n y: array([2., 2., 2.], dtype=float32)\n ", "language": "en", "n_whitespaces": 937, "n_words": 461, "vocab_size": 239 }
def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False): __tracebackhide__ = True # Hide traceback for py.test assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not equal', strict=strict)
24,328
110,849
64
lib/matplotlib/transforms.py
11
11
def update_from_data_x(self, x, ignore=None): x = np.ravel(x) self.update
[Doc] Fix ndarray-links for arguments
update_from_data_x
f16da868d016363c4cd734b2abd6535230b094df
matplotlib
transforms.py
13
4
https://github.com/matplotlib/matplotlib.git
1
50
0
11
77
Python
{ "docstring": "\n Update the x-bounds of the `Bbox` based on the passed in data. After\n updating, the bounds will have positive *width*, and *x0* will be the\n minimal value.\n\n Parameters\n ----------\n x : `~numpy.ndarray`\n Array of x-values.\n\n ignore : bool, optional\n - When ``True``, ignore the existing bounds of the `Bbox`.\n - When ``False``, include the existing bounds of the `Bbox`.\n - When ``None``, use the last value passed to :meth:`ignore`.\n ", "language": "en", "n_whitespaces": 167, "n_words": 69, "vocab_size": 45 }
def update_from_data_x(self, x, ignore=None): x = np.ravel(x) self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]), ignore=ignore, updatey=False)
50,119
202,417
477
tests/csrf_tests/tests.py
157
15
def test_https_malformed_referer(self): malformed_referer_msg = "Referer checking failed - Referer is malformed." req = self._get_POST_request_with_token() req._is_secure_override = True req.META["HTTP_REFERER"] = "http://http://www.example.com/" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains( response, "Referer checking failed - Referer is insecure while host is secure.", status_code=403, ) # Empty req.META["HTTP_REFERER"] = "" self._check_referer_r
Refs #33476 -- Reformatted code with Black.
test_https_malformed_referer
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
9
33
https://github.com/django/django.git
1
292
0
65
472
Python
{ "docstring": "\n A POST HTTPS request with a bad referer is rejected.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def test_https_malformed_referer(self): malformed_referer_msg = "Referer checking failed - Referer is malformed." req = self._get_POST_request_with_token() req._is_secure_override = True req.META["HTTP_REFERER"] = "http://http://www.example.com/" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains( response, "Referer checking failed - Referer is insecure while host is secure.", status_code=403, ) # Empty req.META["HTTP_REFERER"] = "" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Non-ASCII req.META["HTTP_REFERER"] = "ØBöIß" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing scheme # >>> urlparse('//example.com/') # ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='') req.META["HTTP_REFERER"] = "//example.com/" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing netloc # >>> urlparse('https://') # ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='') req.META["HTTP_REFERER"] = "https://" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Invalid URL # >>> urlparse('https://[') # ValueError: Invalid IPv6 URL req.META["HTTP_REFERER"] = "https://[" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403)
21,477
102,162
32
tools/test/test_gen_backend_stubs.py
11
6
def test_missing_cpp_namespace(self) -> None: y
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
test_missing_cpp_namespace
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
pytorch
test_gen_backend_stubs.py
8
7
https://github.com/pytorch/pytorch.git
1
26
0
10
47
Python
{ "docstring": "\\\nbackend: XLA\nsupported:\n- absYou must provide a value for \"cpp_namespace\"", "language": "en", "n_whitespaces": 8, "n_words": 12, "vocab_size": 12 }
def test_missing_cpp_namespace(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, )
42,410
177,506
16
networkx/algorithms/shortest_paths/weighted.py
10
6
def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"): return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
Hide edges with a weight of None in A*. (#5945) * Hide edges with a weight of None in A*. This matches the Dijkstra's weight interface. * Update Dijkstra's and A* docs for weights of None. * Add tests for A* with weight of None. * Add another test for A* with a weight function. * Document that None indicates a hidden edge.
single_source_dijkstra_path_length
d82815dba6c8ddce19cd49f700298dc82a58f066
networkx
weighted.py
8
2
https://github.com/networkx/networkx.git
1
33
0
10
50
Python
{ "docstring": "Find shortest weighted path lengths in G from a source node.\n\n Compute the shortest path length between source and all other\n reachable nodes for a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n Starting node for path\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number or None to indicate a hidden edge.\n\n Returns\n -------\n length : dict\n Dict keyed by node to shortest path length from source.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> length = nx.single_source_dijkstra_path_length(G, 0)\n >>> length[4]\n 4\n >>> for node in [0, 1, 2, 3, 4]:\n ... print(f\"{node}: {length[node]}\")\n 0: 0\n 1: 1\n 2: 2\n 3: 3\n 4: 4\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n The weight function can be used to hide edges by returning None.\n So ``weight = lambda u, v, d: 1 if d['color']==\"red\" else None``\n will find the shortest red path.\n\n See Also\n --------\n single_source_dijkstra, single_source_bellman_ford_path_length\n\n ", "language": "en", "n_whitespaces": 512, "n_words": 289, "vocab_size": 174 }
def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"): return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
83,834
281,536
51
gamestonk_terminal/stocks/behavioural_analysis/ba_controller.py
24
11
def print_help(self): has_ticker_start = "" if self.ticker else "[unvl]" has_ticker_end = "" if self.ticker else "[/unvl]" help_text = f c
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: james <jmaslek11@gmail.com> Co-authored-by: jose-donato <zmcdonato@gmail.com>
print_help
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
ba_controller.py
12
40
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
39
0
18
119
Python
{ "docstring": "[cmds]\n load load a specific stock ticker for analysis\n\n[param]Ticker: [/param]{self.ticker.upper() or None}\n{has_ticker_start}\n[src][Finbrain][/src]\n headlines sentiment from 15+ major news headlines\n[src][Finnhub][/src]\n stats sentiment stats including comparison with sector{has_ticker_end}\n[src][Reddit][/src]\n wsb show what WSB gang is up to in subreddit wallstreetbets\n watchlist show other users watchlist\n popular show popular tickers\n spac_c show other users spacs announcements from subreddit SPACs community\n spac show other users spacs announcements from other subs{has_ticker_start}\n getdd gets due diligence from another user's post{has_ticker_end}\n[src][Stocktwits][/src]\n trending trending stocks\n stalker stalk stocktwits user's last messages{has_ticker_start}\n bullbear estimate quick sentiment from last 30 messages on board\n messages output up to the 30 last messages on the board\n[src][Twitter][/src]\n infer infer about stock's sentiment from latest tweets\n sentiment in-depth sentiment prediction from tweets over time\n[src][Google][/src]\n mentions interest over time based on stock's mentions\n regions regions that show highest interest in stock\n queries top related queries with this stock\n rise top rising related queries with stock{has_ticker_end}\n[src][SentimentInvestor][/src]\n popularsi show most popular stocks on social media right now\n emerging show stocks that are being talked about more than usual{has_ticker_start}\n metrics core social sentiment metrics for this stock\n social social media figures for stock popularity\n historical plot the past week of data for a selected metric{has_ticker_end}[/cmds]\n ", "language": "en", "n_whitespaces": 425, "n_words": 205, "vocab_size": 124 }
def print_help(self): has_ticker_start = "" if self.ticker else "[unvl]" has_ticker_end = "" if self.ticker else "[/unvl]" help_text = f console.print(text=help_text, menu="Stocks - Behavioural Analysis")
77,511
263,916
117
PyInstaller/lib/modulegraph/util.py
75
9
def iterate_instructions(code_object): # The arg extension the EXTENDED_ARG opcode represents is automatically handled by get_instructions() but the # instruction is left in. Get rid of it to make subsequent parsing easie
Make modulegraph EXTENDED_ARG opcode aware (#7055). Fix :class:`AssertionError` during build when analysing a ``.pyc`` file containing more that 255 variable names followed by an import statement all in the same namespace. Fixes #7055.
iterate_instructions
299ff39d1260cf80241b81ed39bbb89b78709583
pyinstaller
util.py
12
6
https://github.com/pyinstaller/pyinstaller.git
5
47
0
56
83
Python
{ "docstring": "Delivers the byte-code instructions as a continuous stream.\n\n Yields `dis.Instruction`. After each code-block (`co_code`), `None` is\n yielded to mark the end of the block and to interrupt the steam.\n ", "language": "en", "n_whitespaces": 38, "n_words": 29, "vocab_size": 25 }
def iterate_instructions(code_object): # The arg extension the EXTENDED_ARG opcode represents is automatically handled by get_instructions() but the # instruction is left in. Get rid of it to make subsequent parsing easier/safer. yield from (i for i in get_instructions(code_object) if i.opname != "EXTENDED_ARG") yield None # For each constant in this code object that is itself a code object, # parse this constant in the same manner. for constant in code_object.co_consts: if inspect.iscode(constant): yield from iterate_instructions(constant)
28,451
127,486
367
python/ray/tune/tuner.py
61
14
def fit(self) -> ResultGrid: if not self._is_ray_client: try: return self._local_tuner.fit() except Exception as e:
[docs/air] Fix up some minor docstrings (#28361)
fit
5c500f6308dce526c50a5230029fb4909b492a35
ray
tuner.py
18
41
https://github.com/ray-project/ray.git
4
93
0
37
175
Python
{ "docstring": "Executes hyperparameter tuning job as configured and returns result.\n\n Failure handling:\n For the kind of exception that happens during the execution of a trial,\n one may inspect it together with stacktrace through the returned result grid.\n See ``ResultGrid`` for reference. Each trial may fail up to a certain number.\n This is configured by ``RunConfig.FailureConfig.max_failures``.\n\n Exception that happens beyond trials will be thrown by this method as well.\n In such cases, there will be instruction like the following printed out\n at the end of console output to inform users on how to resume.\n\n Please use tuner = Tuner.restore(\"~/ray_results/tuner_resume\")\n to resume.\n\n Raises:\n RayTaskError: If user-provided trainable raises an exception\n TuneError: General Ray Tune error.\n ", "language": "en", "n_whitespaces": 218, "n_words": 112, "vocab_size": 92 }
def fit(self) -> ResultGrid: if not self._is_ray_client: try: return self._local_tuner.fit() except Exception as e: raise TuneError( f"Tune run failed. " f'Please use tuner = Tuner.restore("' f'{self._local_tuner.get_experiment_checkpoint_dir()}") to resume.' ) from e else: experiment_checkpoint_dir = ray.get( self._remote_tuner.get_experiment_checkpoint_dir.remote() ) try: return ray.get(self._remote_tuner.fit.remote()) except Exception as e: raise TuneError( f"Tune run failed. " f'Please use tuner = Tuner.restore("' f'{experiment_checkpoint_dir}") to resume.' ) from e
109,870
311,203
27
homeassistant/components/tradfri/base_class.py
6
4
def _handle_coordinator_update(self) -> None: sel
Implement coordinator class for Tradfri integration (#64166) * Initial commit coordinator * More coordinator implementation * More coordinator implementation * Allow integration reload * Move API calls to try/catch block * Move back fixture * Remove coordinator test file * Ensure unchanged file * Ensure unchanged conftest.py file * Remove coordinator key check * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Import RequestError * Move async_setup_platforms to end of setup_entry * Remove centralised handling of device data and device controllers * Remove platform_type argument * Remove exception * Remove the correct exception * Refactor coordinator error handling * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Remove platform type from base class * Remove timeout context manager * Refactor exception callback * Simplify starting device observation * Update test * Move observe start into update method * Remove await self.coordinator.async_request_refresh() * Refactor cover.py * Uncomment const.py * Add back extra_state_attributes * Update homeassistant/components/tradfri/coordinator.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Refactor switch platform * Expose switch state * Refactor sensor platform * Put back accidentally deleted code * Add set_hub_available * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Fix tests for fan platform * Update homeassistant/components/tradfri/base_class.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Update homeassistant/components/tradfri/base_class.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Fix non-working tests * Refresh sensor state * Remove commented line * Add group coordinator * Add groups during setup * Refactor light platform * Fix tests * Move outside of try...except * Remove error handler * Remove unneeded methods * Update sensor * Update .coveragerc * Move signal * Add signals for groups * Fix signal Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
_handle_coordinator_update
9d404b749a0aa0d0527e145c911559df5ecf2afd
core
base_class.py
9
8
https://github.com/home-assistant/core.git
1
20
0
6
38
Python
{ "docstring": "\n Handle updated data from the coordinator.\n\n Tests fails without this method.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
def _handle_coordinator_update(self) -> None: self._refresh() super()._handle_coordinator_update()
36,562
156,109
195
dask/dataframe/core.py
67
35
def _skew_1d(self, column, bias=True, nan_policy="propagate"): # import depends on scipy, not installed by default from dask.array import stats as da_stats if pd.Int64Dtype.is_dtype(column._meta_nonempty): column = column.astype("f8") if not np.issubdtype(column.dtype, np.number): column = column.astype("f8") name = self._token_prefix + "skew-1d-" + tokenize(column) array_skew
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
_skew_1d
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
core.py
11
15
https://github.com/dask/dask.git
3
164
0
55
252
Python
{ "docstring": "1D version of the skew calculation.\n\n Uses the array version from da.stats in case we are passing in a single series\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 18 }
def _skew_1d(self, column, bias=True, nan_policy="propagate"): # import depends on scipy, not installed by default from dask.array import stats as da_stats if pd.Int64Dtype.is_dtype(column._meta_nonempty): column = column.astype("f8") if not np.issubdtype(column.dtype, np.number): column = column.astype("f8") name = self._token_prefix + "skew-1d-" + tokenize(column) array_skew = da_stats.skew( column.values, axis=0, bias=bias, nan_policy=nan_policy ) layer = {(name, 0): (methods.wrap_skew_reduction, (array_skew._name,), None)} graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew]) return new_dd_object( graph, name, column._meta_nonempty.skew(), divisions=[None, None] )
89,989
290,876
34
tests/components/sensor/test_init.py
14
8
def test_device_classes_aligned(): for device_class in NumberDeviceClass: assert hasattr(SensorDeviceClass, device_class.name) assert getattr(SensorDeviceClass, device_class.name).value ==
Align number and sensor device classes (#81909) * Align number and sensor device classes * Add tests * Tweak tests
test_device_classes_aligned
b6586d5c34bf7ea5c30fbb1b62c438078ea14f39
core
test_init.py
12
4
https://github.com/home-assistant/core.git
2
34
0
13
54
Python
{ "docstring": "Make sure all number device classes are also available in SensorDeviceClass.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_device_classes_aligned(): for device_class in NumberDeviceClass: assert hasattr(SensorDeviceClass, device_class.name) assert getattr(SensorDeviceClass, device_class.name).value == device_class.value
42,316
177,252
220
networkx/algorithms/operators/all.py
73
23
def intersection_all(graphs): R = None for i, G in enumerate(graphs): G_nodes_set = set(G.nodes) G_edges_set = set(G.edges(keys=True) if G.is_multigraph() else G.edges()) if i == 0: # create new graph R = G.__class__() node_intersection = G_nodes_set edge_intersection = G_edges_set
Make all.py generator friendly (#5984) * Make compose_all generator friendly * Make disjoint_union_all and intersection_all generator friendly * Refactor disjoint_union_all to yield relabeled graphs * Make union_all generator friendly * Fix intersection_all * Fix union_all signature * Allow passing an infinite rename generator to union_all * Copy over generalizations to binary.py * Clean up rename * Simplify first_label in disjoint_union_all * Simplify disjoint_union_all * Add missing R.graph.update in intersection_all
intersection_all
50ff08de69c6e9541cd6c029bede5dabf56cfe73
networkx
all.py
14
20
https://github.com/networkx/networkx.git
6
132
0
53
223
Python
{ "docstring": "Returns a new graph that contains only the nodes and the edges that exist in\n all graphs.\n\n Parameters\n ----------\n graphs : iterable\n Iterable of NetworkX graphs\n\n Returns\n -------\n R : A new graph with the same type as the first graph in list\n\n Raises\n ------\n ValueError\n If `graphs` is an empty list.\n\n Notes\n -----\n Attributes from the graph, nodes, and edges are not copied to the new\n graph.\n ", "language": "en", "n_whitespaces": 125, "n_words": 68, "vocab_size": 52 }
def intersection_all(graphs): R = None for i, G in enumerate(graphs): G_nodes_set = set(G.nodes) G_edges_set = set(G.edges(keys=True) if G.is_multigraph() else G.edges()) if i == 0: # create new graph R = G.__class__() node_intersection = G_nodes_set edge_intersection = G_edges_set elif G.is_multigraph() != R.is_multigraph(): raise nx.NetworkXError("All graphs must be graphs or multigraphs.") else: node_intersection &= G_nodes_set edge_intersection &= G_edges_set R.graph.update(G.graph) if R is None: raise ValueError("cannot apply intersection_all to an empty list") R.add_nodes_from(node_intersection) R.add_edges_from(edge_intersection) return R
@functools.lru_cache(maxsize=None)
2,983
19,466
62
pipenv/patched/notpip/_internal/locations/__init__.py
28
19
def _looks_like_red_hat_scheme() -> bool:
Vendor in pip 21.2.4 release (from pip 21.2.2 prior). (#5009) * Vendor in pip 21.2.4 release (from pip 21.2.2 prior). * Add news fragment for pip 21.2.4 vendor update. * Add potentially missing LICENSE files
_looks_like_red_hat_scheme
7e33fcae4384563b4c927fd44318c29dd524a097
pipenv
__init__.py
13
16
https://github.com/pypa/pipenv.git
2
52
1
25
137
Python
{ "docstring": "Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.\n\n Red Hat's ``00251-change-user-install-location.patch`` changes the install\n command's ``prefix`` and ``exec_prefix`` to append ``\"/local\"``. This is\n (fortunately?) done quite unconditionally, so we create a default command\n object without any configuration to detect this.\n ", "language": "en", "n_whitespaces": 53, "n_words": 38, "vocab_size": 35 }
def _looks_like_red_hat_scheme() -> bool: from distutils.command.install import install from distutils.dist import Distribution cmd: Any = install(Distribution()) cmd.finalize_options() return ( cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local" and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local" ) @functools.lru_cache(maxsize=None)
8,737
45,910
579
airflow/providers/databricks/operators/databricks_sql.py
184
33
def _create_sql_query(self) -> str: escaper = ParamEscaper() maybe_with = "" if self._encryption is not None or self._credential is not None: maybe_encryption = "" if self._encryption is not None: maybe_encryption = self._generate_options("ENCRYPTION", escaper, self._encryption, False) maybe_credential = "" if self._credential is not None: maybe_credential = self._generate_options("CREDENTIAL", escaper, self._credential, False) maybe_with = f" WITH ({maybe_credential} {maybe_encryption})" location = escaper.escape_item(self._file_location) + maybe_with if self._expression_list is not None: location = f"(SELECT {self._expression_list} FROM {location})" files_or_pattern = "" if self._pattern is not None: files_or_pattern = f"PATTERN = {escaper.escape_item(self._pattern)}\n" elif self._files is not None: files_or_pattern = f"FILES = {escaper.e
Add new options to DatabricksCopyIntoOperator (#22076) This includes: * `encryption` - to specify encryption options for a given location * `credential` - to specify authentication options for a given location * `validate` - to control validation of schema & data
_create_sql_query
401419432082d222b823e4f2a66f21e5cc3ab28d
airflow
databricks_sql.py
18
40
https://github.com/apache/airflow.git
13
257
0
105
522
Python
{ "docstring": "COPY INTO {self._table_name}\nFROM {location}\nFILEFORMAT = {self._file_format}\n{validation}{files_or_pattern}{format_options}{copy_options}\n", "language": "en", "n_whitespaces": 5, "n_words": 9, "vocab_size": 9 }
def _create_sql_query(self) -> str: escaper = ParamEscaper() maybe_with = "" if self._encryption is not None or self._credential is not None: maybe_encryption = "" if self._encryption is not None: maybe_encryption = self._generate_options("ENCRYPTION", escaper, self._encryption, False) maybe_credential = "" if self._credential is not None: maybe_credential = self._generate_options("CREDENTIAL", escaper, self._credential, False) maybe_with = f" WITH ({maybe_credential} {maybe_encryption})" location = escaper.escape_item(self._file_location) + maybe_with if self._expression_list is not None: location = f"(SELECT {self._expression_list} FROM {location})" files_or_pattern = "" if self._pattern is not None: files_or_pattern = f"PATTERN = {escaper.escape_item(self._pattern)}\n" elif self._files is not None: files_or_pattern = f"FILES = {escaper.escape_item(self._files)}\n" format_options = self._generate_options("FORMAT_OPTIONS", escaper, self._format_options) + "\n" copy_options = self._generate_options("COPY_OPTIONS", escaper, self._copy_options) + "\n" validation = "" if self._validate is not None: if isinstance(self._validate, bool): if self._validate: validation = "VALIDATE ALL\n" elif isinstance(self._validate, int): if self._validate < 0: raise AirflowException( "Number of rows for validation should be positive, got: " + str(self._validate) ) validation = f"VALIDATE {self._validate} ROWS\n" else: raise AirflowException("Incorrect data type for validate parameter: " + type(self._validate)) # TODO: think on how to make sure that table_name and expression_list aren't used for SQL injection sql = f return sql.strip()
81,887
277,193
80
keras/wrappers/scikit_learn.py
23
12
def predict(self, x, **kwargs):
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
predict
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
scikit_learn.py
13
7
https://github.com/keras-team/keras.git
2
69
0
19
109
Python
{ "docstring": "Returns the class predictions for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments\n of `Sequential.predict`.\n\n Returns:\n preds: array-like, shape `(n_samples,)`\n Class predictions.\n ", "language": "en", "n_whitespaces": 177, "n_words": 48, "vocab_size": 35 }
def predict(self, x, **kwargs): proba = self.model.predict(x, **kwargs) if proba.shape[-1] > 1: classes = proba.argmax(axis=-1) else: classes = (proba > 0.5).astype("int32") return self.classes_[classes]
9,744
49,439
340
modules/image/text_recognition/ppocrv3_det_ch/processor.py
106
23
def resize_image_type0(self, img): limit_side_len = self.max_side_len h, w, _ = img.shape # limit the max side if max(h, w) > limit_side_len: if h > w: ratio = float(limit_side_len) / h else: ratio = float(limit_side_len) / w else: ratio = 1. resize_h = int(h * ratio) resize_w = int(w * ratio) resize_h = int(round(resize_h / 32) * 32) resize_w = int(round(resize_w / 32) * 32) try: if int(resize_w) <= 0 or int(resize_h) <= 0: return None, (None, None) img = cv2.resize(img, (int(resize_w), int(resize_h))) except:
add module
resize_image_type0
9b3119dfb63c4cbb7acfb9f1f1c09ac24e6d68d2
PaddleHub
processor.py
14
24
https://github.com/PaddlePaddle/PaddleHub.git
6
190
0
66
302
Python
{ "docstring": "\n resize image to a size multiple of 32 which is required by the network\n args:\n img(array): array with shape [h, w, c]\n return(tuple):\n img, (ratio_h, ratio_w)\n ", "language": "en", "n_whitespaces": 77, "n_words": 26, "vocab_size": 26 }
def resize_image_type0(self, img): limit_side_len = self.max_side_len h, w, _ = img.shape # limit the max side if max(h, w) > limit_side_len: if h > w: ratio = float(limit_side_len) / h else: ratio = float(limit_side_len) / w else: ratio = 1. resize_h = int(h * ratio) resize_w = int(w * ratio) resize_h = int(round(resize_h / 32) * 32) resize_w = int(round(resize_w / 32) * 32) try: if int(resize_w) <= 0 or int(resize_h) <= 0: return None, (None, None) img = cv2.resize(img, (int(resize_w), int(resize_h))) except: print(img.shape, resize_w, resize_h) sys.exit(0) ratio_h = resize_h / float(h) ratio_w = resize_w / float(w) # return img, np.array([h, w]) return img, [ratio_h, ratio_w]
80,796
271,560
169
keras/engine/training.py
48
9
def metrics(self): metrics = [] if self._is_compiled: # TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects # so that attr names are not l
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
metrics
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training.py
12
10
https://github.com/keras-team/keras.git
5
64
0
36
107
Python
{ "docstring": "Returns the model's metrics added using `compile()`, `add_metric()` APIs.\n\n Note: Metrics passed to `compile()` are available only after a `keras.Model`\n has been trained/evaluated on actual data.\n\n Examples:\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> outputs = tf.keras.layers.Dense(2)(inputs)\n >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs)\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"])\n >>> [m.name for m in model.metrics]\n []\n\n >>> x = np.random.random((2, 3))\n >>> y = np.random.randint(0, 2, (2, 2))\n >>> model.fit(x, y)\n >>> [m.name for m in model.metrics]\n ['loss', 'mae']\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> d = tf.keras.layers.Dense(2, name='out')\n >>> output_1 = d(inputs)\n >>> output_2 = d(inputs)\n >>> model = tf.keras.models.Model(\n ... inputs=inputs, outputs=[output_1, output_2])\n >>> model.add_metric(\n ... tf.reduce_sum(output_2), name='mean', aggregation='mean')\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\", \"acc\"])\n >>> model.fit(x, (y, y))\n >>> [m.name for m in model.metrics]\n ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae',\n 'out_1_acc', 'mean']\n\n ", "language": "en", "n_whitespaces": 330, "n_words": 128, "vocab_size": 83 }
def metrics(self): metrics = [] if self._is_compiled: # TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects # so that attr names are not load-bearing. if self.compiled_loss is not None: metrics += self.compiled_loss.metrics if self.compiled_metrics is not None: metrics += self.compiled_metrics.metrics for l in self._flatten_layers(): metrics.extend(l._metrics) # pylint: disable=protected-access return metrics
50,661
204,162
33
django/contrib/messages/api.py
14
5
def set_level(request, level): if not hasattr(request, "_messages"): retur
Refs #33476 -- Reformatted code with Black.
set_level
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
api.py
9
5
https://github.com/django/django.git
2
28
0
13
48
Python
{ "docstring": "\n Set the minimum level of messages to be recorded, and return ``True`` if\n the level was recorded successfully.\n\n If set to ``None``, use the default level (see the get_level() function).\n ", "language": "en", "n_whitespaces": 43, "n_words": 30, "vocab_size": 24 }
def set_level(request, level): if not hasattr(request, "_messages"): return False request._messages.level = level return True
42,282
177,139
226
networkx/algorithms/lowest_common_ancestors.py
92
15
def all_pairs_lowest_common_ancestor(G, pairs=None): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError
Replace LCA with naive implementations (#5883) * WIP: Replace functions to evaluate tests. * Raise prompt exceptions by wrapping generator. * Fix erroneous ground-truth self-ancestor in tests. * Move pair creation outside of generator and validate. * Convert input with fromkeys to preserve order and rm duplicates. * Replace LCA implementations & update tests. * Test cleanup: move new tests into old class. Allows us to get rid of duplication/another test setup. * Rm naive fns from refguide. * Add release note. * Remove unused imports. * Remove missed duplicate function (bad rebase). Co-authored-by: Dilara Tekinoglu <dilaranurtuncturk@gmail.com>
all_pairs_lowest_common_ancestor
19c1454d3dfa70a893ea67f2d78515658e8c08e5
networkx
lowest_common_ancestors.py
19
17
https://github.com/networkx/networkx.git
6
100
0
67
171
Python
{ "docstring": "Return the lowest common ancestor of all pairs or the provided pairs\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n pairs : iterable of pairs of nodes, optional (default: all pairs)\n The pairs of nodes of interest.\n If None, will find the LCA of all pairs of nodes.\n\n Yields\n ------\n ((node1, node2), lca) : 2-tuple\n Where lca is least common ancestor of node1 and node2.\n Note that for the default case, the order of the node pair is not considered,\n e.g. you will not get both ``(a, b)`` and ``(b, a)``\n\n Raises\n ------\n NetworkXPointlessConcept\n If `G` is null.\n NetworkXError\n If `G` is not a DAG.\n\n Examples\n --------\n The default behavior is to yield the lowest common ancestor for all\n possible combinations of nodes in `G`, including self-pairings:\n\n >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)])\n >>> dict(nx.all_pairs_lowest_common_ancestor(G))\n {(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2}\n\n The pairs argument can be used to limit the output to only the\n specified node pairings:\n\n >>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)]))\n {(1, 2): 1, (2, 3): 0}\n\n Notes\n -----\n Only defined on non-null directed acyclic graphs.\n\n See Also\n --------\n lowest_common_ancestor\n ", "language": "en", "n_whitespaces": 344, "n_words": 208, "vocab_size": 126 }
def all_pairs_lowest_common_ancestor(G, pairs=None): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") if len(G) == 0: raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") if pairs is None: pairs = combinations_with_replacement(G, 2) else: # Convert iterator to iterable, if necessary. Trim duplicates. pairs = dict.fromkeys(pairs) # Verify that each of the nodes in the provided pairs is in G nodeset = set(G) for pair in pairs: if set(pair) - nodeset: raise nx.NodeNotFound( f"Node(s) {set(pair) - nodeset} from pair {pair} not in G." ) # Once input validation is done, construct the generator
11,176
54,884
21
tests/test_task_runners.py
7
4
async def test_is_pickleable_after_start(self, task_runner): task_runner.client_kwargs["set_as_default"] = True
Introduces testing module and task runner standard test suite (PrefectHQ/orion#1655)
test_is_pickleable_after_start
7895fb5013aa98955bb31be96953ac13979a5b94
prefect
test_task_runners.py
8
6
https://github.com/PrefectHQ/prefect.git
1
50
0
7
30
Python
{ "docstring": "\n The task_runner must be picklable as it is attached to `PrefectFuture` objects\n Reimplemented to set Dask client as default to allow unpickling\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 19 }
async def test_is_pickleable_after_start(self, task_runner): task_runner.client_kwargs["set_as_default"] = True
55,971
220,349
26
python3.10.4/Lib/asyncio/base_events.py
5
5
def _add_callback_signalsafe(self, handle): self._add_callback(handle) self._write_to_self()
add python 3.10.4 for windows
_add_callback_signalsafe
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
base_events.py
7
3
https://github.com/XX-net/XX-Net.git
1
19
0
5
34
Python
{ "docstring": "Like _add_callback() but called from a signal handler.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def _add_callback_signalsafe(self, handle): self._add_callback(handle) self._write_to_self()
5,324
30,115
53
spotdl/utils/ffmpeg.py
24
8
def get_local_ffmpeg() -> Optional[Path]:
v4 init
get_local_ffmpeg
fa2ad657482aca9dc628e6d7062b8badf2706bb6
spotify-downloader
ffmpeg.py
12
10
https://github.com/spotDL/spotify-downloader.git
3
43
0
21
79
Python
{ "docstring": "\n Get local ffmpeg binary path or None if not found.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
def get_local_ffmpeg() -> Optional[Path]: ffmpeg_path = Path( get_spotdl_path(), "ffmpeg" + ".exe" if platform.system() == "Windows" else "" ) if ffmpeg_path.is_file(): return ffmpeg_path return None
54,474
216,231
184
salt/modules/cmdmod.py
88
20
def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None): if saltenv is None: saltenv = __opts__.get("saltenv", "base") if not template: return (cmd, cwd) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( "Attempted to render file paths with unavailable engine {}".format(template) ) kwargs = {} kwargs["salt"] = __salt__ if pillarenv is not None or pillar_override is not None: pillarenv = pillarenv or __opts__["pillarenv"] kwargs["pillar"] = _gather_pillar(pillarenv, pillar_override)
fixes salt bug 61507
_render_cmd
21d3f4bc9eb7b9fb1118c59073595a9e9ee836bd
salt
cmdmod.py
12
23
https://github.com/saltstack/salt.git
7
155
0
60
225
Python
{ "docstring": "\n If template is a valid template engine, process the cmd and cwd through\n that engine.\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 14 }
def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None): if saltenv is None: saltenv = __opts__.get("saltenv", "base") if not template: return (cmd, cwd) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( "Attempted to render file paths with unavailable engine {}".format(template) ) kwargs = {} kwargs["salt"] = __salt__ if pillarenv is not None or pillar_override is not None: pillarenv = pillarenv or __opts__["pillarenv"] kwargs["pillar"] = _gather_pillar(pillarenv, pillar_override) else: kwargs["pillar"] = __pillar__ kwargs["grains"] = __grains__ kwargs["opts"] = __opts__ kwargs["saltenv"] = saltenv
71,631
247,373
56
tests/rest/media/v1/test_html_preview.py
21
8
def test_windows_1252(self) -> None: html = b tree = decode_body(html, "http://example.com/test.html") og = parse
Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey`
test_windows_1252
7e91107be1a4287873266e588a3c5b415279f4c8
synapse
test_html_preview.py
10
13
https://github.com/matrix-org/synapse.git
1
44
0
18
83
Python
{ "docstring": "A body which uses cp1252, but doesn't declare that.\n <html>\n <head><title>\\xf3</title></head>\n <body>\n Some text.\n </body>\n </html>\n ", "language": "en", "n_whitespaces": 65, "n_words": 16, "vocab_size": 16 }
def test_windows_1252(self) -> None: html = b tree = decode_body(html, "http://example.com/test.html") og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."})
99,466
300,606
58
homeassistant/helpers/template.py
17
10
def square_root(value, default=_SENTINEL): try: return m
Fail template functions when no default specified (#71687)
square_root
4885331509eeffe50f42d76b234996467b06170f
core
template.py
13
7
https://github.com/home-assistant/core.git
3
42
0
15
70
Python
{ "docstring": "Filter and function to get square root of the value.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def square_root(value, default=_SENTINEL): try: return math.sqrt(float(value)) except (ValueError, TypeError): if default is _SENTINEL: raise_no_default("sqrt", value) return default
90,842
291,738
73
tests/test_core.py
19
9
async def test_track_task_functions(event_loop): hass = ha.HomeAssistant() try: assert hass._track_task hass.async_stop_track_tasks() assert not hass._track_task hass.async_track_tasks() assert hass._track_task finally: await hass.async_stop()
Upgrade pytest-aiohttp (#82475) * Upgrade pytest-aiohttp * Make sure executors, tasks and timers are closed Some test will trigger warnings on garbage collect, these warnings spills over into next test. Some test trigger tasks that raise errors on shutdown, these spill over into next test. This is to mimic older pytest-aiohttp and it's behaviour on test cleanup. Discussions on similar changes for pytest-aiohttp are here: https://github.com/pytest-dev/pytest-asyncio/pull/309 * Replace loop with event_loop * Make sure time is frozen for tests * Make sure the ConditionType is not async /home-assistant/homeassistant/helpers/template.py:2082: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited def wrapper(*args, **kwargs): Enable tracemalloc to get traceback where the object was allocated. See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info. * Increase litejet press tests with a factor 10 The times are simulated anyway, and we can't stop the normal event from occuring. * Use async handlers for aiohttp tests/components/motioneye/test_camera.py::test_get_still_image_from_camera tests/components/motioneye/test_camera.py::test_get_still_image_from_camera tests/components/motioneye/test_camera.py::test_get_stream_from_camera tests/components/motioneye/test_camera.py::test_get_stream_from_camera tests/components/motioneye/test_camera.py::test_camera_option_stream_url_template tests/components/motioneye/test_camera.py::test_camera_option_stream_url_template /Users/joakim/src/hass/home-assistant/venv/lib/python3.9/site-packages/aiohttp/web_urldispatcher.py:189: DeprecationWarning: Bare functions are deprecated, use async ones warnings.warn( * Switch to freezegun in modbus tests The tests allowed clock to tick in between steps * Make sure skybell object are fully mocked Old tests would trigger attempts to post to could services: ``` DEBUG:aioskybell:HTTP post https://cloud.myskybell.com/api/v3/login/ Request with headers: {'content-type': 'application/json', 'accept': '*/*', 'x-skybell-app-id': 'd2b542c7-a7e4-4e1e-b77d-2b76911c7c46', 'x-skybell-client-id': '1f36a3c0-6dee-4997-a6db-4e1c67338e57'} ``` * Fix sorting that broke after rebase
test_track_task_functions
c576a68d336bc91fd82c299d9b3e5dfdc1c14960
core
test_core.py
11
10
https://github.com/home-assistant/core.git
2
46
0
15
83
Python
{ "docstring": "Test function to start/stop track task and initial state.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def test_track_task_functions(event_loop): hass = ha.HomeAssistant() try: assert hass._track_task hass.async_stop_track_tasks() assert not hass._track_task hass.async_track_tasks() assert hass._track_task finally: await hass.async_stop()
92,344
293,281
46
homeassistant/components/kaleidescape/media_player.py
14
8
def media_position_updated_at(self) -> datetime | None:
Add Kaleidescape integration (#67711)
media_position_updated_at
ea82f2e293f43d3e5be103a64b68d088c4b65545
core
media_player.py
9
5
https://github.com/home-assistant/core.git
2
27
0
13
46
Python
{ "docstring": "When was the position of the current playing media valid.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def media_position_updated_at(self) -> datetime | None: if self._device.movie.play_status in KALEIDESCAPE_PLAYING_STATES: return utcnow() return None
56,101
220,720
100
python3.10.4/Lib/asyncio/sslproto.py
23
12
def write(self, data): if n
add python 3.10.4 for windows
write
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
sslproto.py
15
7
https://github.com/XX-net/XX-Net.git
3
44
0
21
84
Python
{ "docstring": "Write some data bytes to the transport.\n\n This does not block; it buffers the data and arranges for it\n to be sent out asynchronously.\n ", "language": "en", "n_whitespaces": 45, "n_words": 24, "vocab_size": 20 }
def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError(f"data: expecting a bytes-like instance, " f"got {type(data).__name__}") if not data: return self._ssl_protocol._write_appdata(data)
46,085
189,484
357
manim/mobject/svg/text_mobject.py
73
22
def _extract_color_tags(self): tags = re.finditer( r'<color\s+col="([^"]+)"(\s+offset="([^"]+)")?>(.+?)</color>', self.original_text, re.S, ) colormap = [] for tag in tags: start = self._count_real_chars(self.original_text[: tag.start(0)]) end = start + self._count_real_chars(tag.group(4)) offsets = tag.group(3).split(",") if tag.group(3) else [0] start_offset = int(offsets[0]) if offsets[0] else 0 end_offset = int(offsets[1]) if len(offsets) == 2 and offsets[1] else 0 colormap.append( { "start": start, "end": end, "color": tag.group(1), "start_offset": start_offset, "end_offset": end_offset,
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
_extract_color_tags
902e7eb4f0147b5882a613b67467e38a1d47f01e
manim
text_mobject.py
14
24
https://github.com/ManimCommunity/manim.git
6
188
0
58
296
Python
{ "docstring": "Used to determine which parts (if any) of the string should be formatted\n with a custom color.\n\n Removes the ``<color>`` tag, as it is not part of Pango's markup and would cause an error.\n\n Note: Using the ``<color>`` tags is deprecated. As soon as the legacy syntax is gone, this function\n will be removed.\n ", "language": "en", "n_whitespaces": 89, "n_words": 54, "vocab_size": 45 }
def _extract_color_tags(self): tags = re.finditer( r'<color\s+col="([^"]+)"(\s+offset="([^"]+)")?>(.+?)</color>', self.original_text, re.S, ) colormap = [] for tag in tags: start = self._count_real_chars(self.original_text[: tag.start(0)]) end = start + self._count_real_chars(tag.group(4)) offsets = tag.group(3).split(",") if tag.group(3) else [0] start_offset = int(offsets[0]) if offsets[0] else 0 end_offset = int(offsets[1]) if len(offsets) == 2 and offsets[1] else 0 colormap.append( { "start": start, "end": end, "color": tag.group(1), "start_offset": start_offset, "end_offset": end_offset, }, ) self.text = re.sub("<color[^>]+>(.+?)</color>", r"\1", self.text, 0, re.S) return colormap
39,268
162,729
888
frequency_response.py
239
44
def write_readme(self, file_path, parametric_eq_peqs=None, fixed_band_eq_peq=None): file_path = os.path.abspath(file_path) dir_path = os.path.dirname(file_path) model = self.name # Write model s = '# {}\n'.format(model) s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and info.\n\n' # Add parametric EQ settings if parametric_eq_peqs is not None: s += '### Parametric EQs\n'
Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.
write_readme
9120cdffe618c6c2ff16fe6a311b6a1367efdbc8
AutoEq
frequency_response.py
18
44
https://github.com/jaakkopasanen/AutoEq.git
11
312
0
135
638
Python
{ "docstring": "Writes README.md with picture and Equalizer APO settings.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def write_readme(self, file_path, parametric_eq_peqs=None, fixed_band_eq_peq=None): file_path = os.path.abspath(file_path) dir_path = os.path.dirname(file_path) model = self.name # Write model s = '# {}\n'.format(model) s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and info.\n\n' # Add parametric EQ settings if parametric_eq_peqs is not None: s += '### Parametric EQs\n' if len(parametric_eq_peqs) > 1: compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, []) n = 0 filter_ranges = '' preamps = '' for i, peq in enumerate(parametric_eq_peqs): for filt in peq.filters: compound.add_filter(filt) filter_ranges += f'1-{len(peq.filters) + n}' preamps += f'{-compound.max_gain - 0.1:.1f} dB' if i < len(parametric_eq_peqs) - 2: filter_ranges += ', ' preamps += ', ' elif i == len(parametric_eq_peqs) - 2: filter_ranges += ' or ' preamps += ' or ' n += len(peq.filters) s += f'You can use filters {filter_ranges}. Apply preamp of {preamps}, respectively.\n\n' else: compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, []) for peq in parametric_eq_peqs: for filt in peq.filters: compound.add_filter(filt) s += f'Apply preamp of -{compound.max_gain + 0.1:.1f} dB when using parametric equalizer.\n\n' s += compound.markdown_table() + '\n\n' # Add fixed band eq if fixed_band_eq_peq is not None: s += f'### Fixed Band EQs\nWhen using fixed band (also called graphic) equalizer, apply preamp of ' \ f'**-{fixed_band_eq_peq.max_gain + 0.1:.1f} dB** (if available) and set gains manually with these ' \ f'parameters.\n\n{fixed_band_eq_peq.markdown_table()}\n\n' # Write image link img_path = os.path.join(dir_path, model + '.png') if os.path.isfile(img_path): img_url = f'./{os.path.split(img_path)[1]}' img_url = urllib.parse.quote(img_url, safe="%/:=&?~#+!$,;'@()*[]") s += f'### Graphs\n![]({img_url})\n' # Write file with open(file_path, 'w', encoding='utf-8') as f: f.write(s)
74,967
256,950
158
haystack/telemetry.py
59
13
def _get_execution_environment(): if os.environ.get("CI", "False").lower() == "true": execution_env
Add basic telemetry features (#2314) * add basic telemetry features * change pipeline_config to _component_config * Update Documentation & Code Style * add super().__init__() calls to error classes * make posthog mock work with python 3.7 * Update Documentation & Code Style * update link to docs web page * log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH) * add comment on send_event in BaseComponent.init() and fix mypy * mock NonPrivateParameters and fix pylint undefined-variable * Update Documentation & Code Style * check model path contains multiple / * add test for writing to file * add test for en-/disable telemetry * Update Documentation & Code Style * merge file deletion methods and ignore pylint global statement * Update Documentation & Code Style * set env variable in demo to activate telemetry * fix mock of HAYSTACK_TELEMETRY_ENABLED * fix mypy and linter * add CI as env variable to execution contexts * remove threading, add test for custom error event * Update Documentation & Code Style * simplify config/log file deletion * add test for final event being sent * force writing config file in test * make test compatible with python 3.7 * switch to posthog production server * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
_get_execution_environment
ac5617e757e9ace6f30b7291686d9dbbc339f433
haystack
telemetry.py
15
17
https://github.com/deepset-ai/haystack.git
7
94
0
36
180
Python
{ "docstring": "\n Identifies the execution environment that Haystack is running in.\n Options are: colab notebook, kubernetes, CPU/GPU docker container, test environment, jupyter notebook, python script\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 22 }
def _get_execution_environment(): if os.environ.get("CI", "False").lower() == "true": execution_env = "ci" elif "google.colab" in sys.modules: execution_env = "colab" elif "KUBERNETES_SERVICE_HOST" in os.environ: execution_env = "kubernetes" elif HAYSTACK_DOCKER_CONTAINER in os.environ: execution_env = os.environ.get(HAYSTACK_DOCKER_CONTAINER) # check if pytest is imported elif "pytest" in sys.modules: execution_env = "test" else: try: execution_env = get_ipython().__class__.__name__ # pylint: disable=undefined-variable except NameError: execution_env = "script" return execution_env
12,258
60,716
58
.venv/lib/python3.8/site-packages/pip/_internal/index/collector.py
30
17
def _ensure_html_response(url, session): # type: (str, PipSession) -> None scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) raise_for_status(resp) _ensure_html
upd; format
_ensure_html_response
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
collector.py
9
7
https://github.com/jindongwang/transferlearning.git
2
60
0
29
98
Python
{ "docstring": "Send a HEAD request to the URL, and ensure the response contains HTML.\n\n Raises `_NotHTTP` if the URL is not available for a HEAD request, or\n `_NotHTML` if the content type is not text/html.\n ", "language": "en", "n_whitespaces": 43, "n_words": 34, "vocab_size": 26 }
def _ensure_html_response(url, session): # type: (str, PipSession) -> None scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) raise_for_status(resp) _ensure_html_header(resp)
4,051
21,808
576
pipenv/vendor/tomlkit/parser.py
120
23
def _parse_item(self) -> Optional[Tuple[Optional[Key], Item]]: self.mark() with self._state as state: while True: c = self._current if c
Update tomlkit==0.9.2 Used: python -m invoke vendoring.update --package=tomlkit
_parse_item
8faa74cdc9da20cfdcc69f5ec29b91112c95b4c9
pipenv
parser.py
19
25
https://github.com/pypa/pipenv.git
7
144
0
77
253
Python
{ "docstring": "\n Attempts to parse the next item and returns it, along with its key\n if the item is value-like.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
def _parse_item(self) -> Optional[Tuple[Optional[Key], Item]]: self.mark() with self._state as state: while True: c = self._current if c == "\n": # Found a newline; Return all whitespace found up to this point. self.inc() return None, Whitespace(self.extract()) elif c in " \t\r": # Skip whitespace. if not self.inc(): return None, Whitespace(self.extract()) elif c == "#": # Found a comment, parse it indent = self.extract() cws, comment, trail = self._parse_comment_trail() return None, Comment(Trivia(indent, cws, comment, trail)) elif c == "[": # Found a table, delegate to the calling function. return else: # Begining of a KV pair. # Return to beginning of whitespace so it gets included # as indentation for the KV about to be parsed. state.restore = True break return self._parse_key_value(True)
49,705
200,570
296
sympy/tensor/tensor.py
55
12
def _xreplace(self, rule): if self in rule: return rule[self], True elif rule: rule = self._dedupe_indices_in_rule(rule) args = [] changed = False for a in self.args: _xreplace = getattr(a, '_xreplace', None) if _xreplace is not None: a_xr = _xreplace(rule) args.append(a_xr[0]) changed |= a_xr[1] else: args.append(a) args = tuple(args) if changed: r
Add TensMul._xreplace to dedupe dummy indices This apparently should not be done. https://github.com/sympy/sympy/pull/24333#issuecomment-1333783127
_xreplace
49222e1e3645ca6948277b8e1a9b526daeb0948d
sympy
tensor.py
15
19
https://github.com/sympy/sympy.git
6
113
0
39
182
Python
{ "docstring": "\n Helper for xreplace. Tracks whether a replacement actually occurred.\n\n Given that the rule has entries {old:new, ...}, this handles the fact\n that if a dummy index in new is the same as an index in self, the\n dummy index in new must be renamed.\n ", "language": "en", "n_whitespaces": 80, "n_words": 44, "vocab_size": 33 }
def _xreplace(self, rule): if self in rule: return rule[self], True elif rule: rule = self._dedupe_indices_in_rule(rule) args = [] changed = False for a in self.args: _xreplace = getattr(a, '_xreplace', None) if _xreplace is not None: a_xr = _xreplace(rule) args.append(a_xr[0]) changed |= a_xr[1] else: args.append(a) args = tuple(args) if changed: return self.func(*args), True return self, False
104,430
305,646
30
homeassistant/components/mpd/media_player.py
9
6
async def async_turn_on(self) -> None: await self._client.play() await self._update_playlists(no_throttle=True)
Improve entity type hints [m] (#77816)
async_turn_on
6355e682fa4aeb526570597d919ad1fb76755b9a
core
media_player.py
9
4
https://github.com/home-assistant/core.git
1
25
0
8
46
Python
{ "docstring": "Service to send the MPD the command to start playing.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
async def async_turn_on(self) -> None: await self._client.play() await self._update_playlists(no_throttle=True)
80,442
270,345
228
keras/distribute/distributed_training_utils_v1.py
122
18
def _build_network_on_replica(model, mode, inputs=None, targets=None): # Need to do imports here since we run into a circular dependency error. from keras import models # pylint: disable=g-import-not-at-top from keras.engine import sequential # pylint: disable=g-import-not-at-top # We rely on the internal methods to avoid having share_weights weights in the # public API. if isinstance(model, sequential.Sequential): updated_model = models._clone_sequential_model( model, input_tensors=inputs, layer_fn=models.share_weights ) else: updated_model = models._clone_functional_model( model, input_tensors=inputs, layer_fn=models.share_weights ) # Callable losses added directly to a functional Model need to be added # here. updated_model._callable_losses = model._callable_losses # Recast all low precision outputs back to float32 since we only casted # the inputs to bfloat16 and not targets. This is done so that we can preserve # precision when calculating the loss value.
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_build_network_on_replica
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
distributed_training_utils_v1.py
13
33
https://github.com/keras-team/keras.git
6
188
0
88
133
Python
{ "docstring": "Build an updated model on replicas.\n\n We create a new Keras model while sharing the variables from the old graph.\n Building a new sub-graph is required since the original keras model creates\n placeholders for the input and the output that are not accessible till we\n call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.\n\n The sharing of weights and layers between the old and the new model guarantee\n that we're using Strategy variables and any updates on either model are\n reflected correctly in callbacks and loop iterations.\n\n We need to make sure we share the optimizers between the old and the new model\n as well so that optimizer state is not lost if the user is running fit\n multiple times.\n\n Args:\n model: Model to be replicated across Replicas\n mode: Which of fit/eval/predict is building the distributed network\n inputs: Input variables to be passed to the model\n targets: Target tensor to be passed to model.compile\n\n Returns:\n A new model with shared layers with the old model.\n ", "language": "en", "n_whitespaces": 227, "n_words": 163, "vocab_size": 103 }
def _build_network_on_replica(model, mode, inputs=None, targets=None): # Need to do imports here since we run into a circular dependency error. from keras import models # pylint: disable=g-import-not-at-top from keras.engine import sequential # pylint: disable=g-import-not-at-top # We rely on the internal methods to avoid having share_weights weights in the # public API. if isinstance(model, sequential.Sequential): updated_model = models._clone_sequential_model( model, input_tensors=inputs, layer_fn=models.share_weights ) else: updated_model = models._clone_functional_model( model, input_tensors=inputs, layer_fn=models.share_weights ) # Callable losses added directly to a functional Model need to be added # here. updated_model._callable_losses = model._callable_losses # Recast all low precision outputs back to float32 since we only casted # the inputs to bfloat16 and not targets. This is done so that we can preserve # precision when calculating the loss value.
83,438
280,800
111
keras/utils/tf_utils.py
36
8
def can_jit_compile(warn=False): if platform.system() == "Darwin" and "arm" in pla
Set `jit_compile` only when TensorFlow XLA is available for the platform. Fixes issue of using new optimizers on Mac M1 as TF on Mac M1 is not built with XLA. PiperOrigin-RevId: 497158007
can_jit_compile
1b32391798a952176b733660c940b1589c2fc8a4
keras
tf_utils.py
13
9
https://github.com/keras-team/keras.git
4
43
0
34
82
Python
{ "docstring": "Returns True if TensorFlow XLA is available for the platform.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def can_jit_compile(warn=False): if platform.system() == "Darwin" and "arm" in platform.processor().lower(): if warn: logging.warning( "Tensorflow is not compiled with XLA on Mac M1 Arm processors, " "so cannot set `jit_compile` to True." ) return False return True
28,767
128,633
637
rllib/algorithms/algorithm.py
165
23
def __setstate__(self, state) -> None: # TODO (sven): Validate that our config and the config in state are compatible. # For example, the model architectures may differ. # Also, what should the behavior be if e.g. some training parameter # (e.g. lr) changed? if hasattr(self, "workers") and "worker" in state: self.workers.local_worker().set_state(state["worker"]) remote_state = ray.put(state["worker"]) for r in self.workers.remote_workers(): r.set_state.remote(remote_state) if self.evaluation_workers: # If evaluation workers are used, also restore the policies # there in case they are used for evaluation purpose. for r in self.evaluation_workers.remote_workers(): r.set_state.remote(remote_state) # If necessary, restore replay data as well. if self.local_replay_buffer is not None: # TODO: Experimental functionality: Restore contents of replay # buffer from checkpoint, only if user has configured this. if self.config.get("store_buffer_in_checkpoints"): if "local_replay_buffer" in state: self.local_replay_buffer.set_state(state["local_replay_buffer"]) else: logger.warning( "`store_buffer_in_checkpoints` is True, but no replay "
[RLlib] Algorithm/Policy checkpoint overhaul and Policy Model export (in native formats). (#28166)
__setstate__
23b3a599b9df8100558c477e94b0b19b1a38ac27
ray
algorithm.py
16
33
https://github.com/ray-project/ray.git
12
176
0
106
318
Python
{ "docstring": "Sets the algorithm to the provided state.\n\n Args:\n state: The state dict to restore this Algorithm instance to. `state` may\n have been returned by a call to an Algorithm's `__getstate__()` method.\n ", "language": "en", "n_whitespaces": 71, "n_words": 31, "vocab_size": 28 }
def __setstate__(self, state) -> None: # TODO (sven): Validate that our config and the config in state are compatible. # For example, the model architectures may differ. # Also, what should the behavior be if e.g. some training parameter # (e.g. lr) changed? if hasattr(self, "workers") and "worker" in state: self.workers.local_worker().set_state(state["worker"]) remote_state = ray.put(state["worker"]) for r in self.workers.remote_workers(): r.set_state.remote(remote_state) if self.evaluation_workers: # If evaluation workers are used, also restore the policies # there in case they are used for evaluation purpose. for r in self.evaluation_workers.remote_workers(): r.set_state.remote(remote_state) # If necessary, restore replay data as well. if self.local_replay_buffer is not None: # TODO: Experimental functionality: Restore contents of replay # buffer from checkpoint, only if user has configured this. if self.config.get("store_buffer_in_checkpoints"): if "local_replay_buffer" in state: self.local_replay_buffer.set_state(state["local_replay_buffer"]) else: logger.warning( "`store_buffer_in_checkpoints` is True, but no replay " "data found in state!" ) elif "local_replay_buffer" in state and log_once( "no_store_buffer_in_checkpoints_but_data_found" ): logger.warning( "`store_buffer_in_checkpoints` is False, but some replay " "data found in state!" ) if self.train_exec_impl is not None: self.train_exec_impl.shared_metrics.get().restore(state["train_exec_impl"])
73,880
251,891
33
test/mitmproxy/proxy/layers/http/test_http2.py
18
15
def test_http2_client_aborts(tctx, stream, when, how): server = P
make it black!
test_http2_client_aborts
b3587b52b25077f68116b9852b041d33e7fc6601
mitmproxy
test_http2.py
8
86
https://github.com/mitmproxy/mitmproxy.git
12
494
0
15
62
Python
{ "docstring": "\n Test handling of the case where a client aborts during request or response transmission.\n\n If the client aborts the request transmission, we must trigger an error hook,\n if the client disconnects during response transmission, no error hook is triggered.\n ", "language": "en", "n_whitespaces": 52, "n_words": 39, "vocab_size": 28 }
def test_http2_client_aborts(tctx, stream, when, how): server = Placeholder(Server) flow = Placeholder(HTTPFlow) playbook, cff = start_h2_client(tctx) resp = Placeholder(bytes)
50,366
203,421
277
django/contrib/admin/options.py
49
21
def get_changelist_instance(self, request): list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Add the action checkboxes if any actions are available. if self.get_actions(request): list_display = ["action_checkbox", *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request),
Refs #33476 -- Reformatted code with Black.
get_changelist_instance
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
options.py
10
23
https://github.com/django/django.git
2
117
0
43
174
Python
{ "docstring": "\n Return a `ChangeList` instance based on `request`. May raise\n `IncorrectLookupParameters`.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
def get_changelist_instance(self, request): list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Add the action checkboxes if any actions are available. if self.get_actions(request): list_display = ["action_checkbox", *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, self.search_help_text, )
26,878
120,516
156
jax/_src/lax/qdwh.py
103
29
def _use_cholesky(u, m, n, params): a, b, c = params _, N = u.shape x = c * (u.T.conj() @ u) + jnp.eye(N, dtype=jnp.dtype(u)) # Pads the lower-right corner with the identity matrix to prevent the Cholesky # decomposition from failing due to the matrix not being PSD if padded with # zeros. x = _mask(x, (n, n), jnp.eye(N,
Add support for padded arrays in QDWH algorithm. This change is in preparation for adding a jit-table QDWH-eig implementation. PiperOrigin-RevId: 448571523
_use_cholesky
db73670ec3fc72f75e6f832351620ac79e9b0c6f
jax
qdwh.py
13
13
https://github.com/google/jax.git
1
174
0
72
261
Python
{ "docstring": "QDWH iteration using Cholesky decomposition.\n\n Args:\n u: a matrix, with static (padded) shape M x N\n m, n: the dynamic shape of the matrix, where m <= M and n <= N.\n params: the QDWH parameters.\n ", "language": "en", "n_whitespaces": 41, "n_words": 36, "vocab_size": 29 }
def _use_cholesky(u, m, n, params): a, b, c = params _, N = u.shape x = c * (u.T.conj() @ u) + jnp.eye(N, dtype=jnp.dtype(u)) # Pads the lower-right corner with the identity matrix to prevent the Cholesky # decomposition from failing due to the matrix not being PSD if padded with # zeros. x = _mask(x, (n, n), jnp.eye(N, dtype=x.dtype)) # `y` is lower triangular. y = lax_linalg.cholesky(x, symmetrize_input=False) z = lax_linalg.triangular_solve( y, u.T, left_side=True, lower=True, conjugate_a=True).conj() z = lax_linalg.triangular_solve(y, z, left_side=True, lower=True, transpose_a=True, conjugate_a=True).T.conj() e = b / c u = e * u + (a - e) * z return u
47,450
195,863
39
sympy/functions/elementary/piecewise.py
18
11
def piecewise_integrate(self, x, **kwargs): from sympy.integrals import integrate return self.func(*[(integrate(e, x, **kwargs), c) for e, c in self.args])
Improved documentation formatting
piecewise_integrate
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
sympy
piecewise.py
13
3
https://github.com/sympy/sympy.git
2
47
0
17
71
Python
{ "docstring": "Return the Piecewise with each expression being\n replaced with its antiderivative. To obtain a continuous\n antiderivative, use the :func:`~.integrate` function or method.\n\n Examples\n ========\n\n >>> from sympy import Piecewise\n >>> from sympy.abc import x\n >>> p = Piecewise((0, x < 0), (1, x < 1), (2, True))\n >>> p.piecewise_integrate(x)\n Piecewise((0, x < 0), (x, x < 1), (2*x, True))\n\n Note that this does not give a continuous function, e.g.\n at x = 1 the 3rd condition applies and the antiderivative\n there is 2*x so the value of the antiderivative is 2:\n\n >>> anti = _\n >>> anti.subs(x, 1)\n 2\n\n The continuous derivative accounts for the integral *up to*\n the point of interest, however:\n\n >>> p.integrate(x)\n Piecewise((0, x < 0), (x, x < 1), (2*x - 1, True))\n >>> _.subs(x, 1)\n 1\n\n See Also\n ========\n Piecewise._eval_integral\n ", "language": "en", "n_whitespaces": 310, "n_words": 135, "vocab_size": 85 }
def piecewise_integrate(self, x, **kwargs): from sympy.integrals import integrate return self.func(*[(integrate(e, x, **kwargs), c) for e, c in self.args])
40,835
173,321
527
cps/helper.py
114
21
def check_send_to_ereader(entry): formats = list() book_formats = list() if len(entry.data): for ele in iter(entry.data): if ele.uncompressed_size < config.mail_size: formats.append(ele.format) if 'EPUB' in formats: book_formats.append({'format': 'Epub', 'convert': 0, 'text':
Eenabled send epubs to E-Reader devices
check_send_to_ereader
fbac3e38ac116855b930ee60fb3c997337ae17b7
calibre-web
helper.py
17
29
https://github.com/janeczku/calibre-web.git
9
202
0
65
370
Python
{ "docstring": "\n returns all available book formats for sending to E-Reader\n ", "language": "en", "n_whitespaces": 20, "n_words": 9, "vocab_size": 9 }
def check_send_to_ereader(entry): formats = list() book_formats = list() if len(entry.data): for ele in iter(entry.data): if ele.uncompressed_size < config.mail_size: formats.append(ele.format) if 'EPUB' in formats: book_formats.append({'format': 'Epub', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Epub')}) if 'MOBI' in formats: book_formats.append({'format': 'Mobi', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Mobi')}) if 'PDF' in formats: book_formats.append({'format': 'Pdf', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Pdf')}) if 'AZW' in formats: book_formats.append({'format': 'Azw', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Azw')}) if config.config_converterpath: book_formats.extend(check_send_to_ereader_with_converter(formats)) return book_formats else: log.error(u'Cannot find book entry %d', entry.id) return None # Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return # list with supported formats
48,606
197,528
37
sympy/stats/joint_rv_types.py
23
8
def Multinomial(syms, n, *p): if not isinstance(p[0], list): p = (list(p), ) return multivariate_rv(MultinomialDistribution, syms, n, p[0]) #------------------------------------------------------------------------------- # Negative
Improved some documentation in the stats module
Multinomial
7fe8e027ae1d7f683243c0229b961671a6cbb4c5
sympy
joint_rv_types.py
11
4
https://github.com/sympy/sympy.git
2
46
0
22
71
Python
{ "docstring": "\n Creates a discrete random variable with Multinomial Distribution.\n\n The density of the said distribution can be found at [1].\n\n Parameters\n ==========\n\n n : Positive integer\n Represents number of trials\n p : List of event probabilites\n Must be in the range of [0, 1]\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import density, Multinomial, marginal_distribution\n >>> from sympy import symbols\n >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True)\n >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True)\n >>> M = Multinomial('M', 3, p1, p2, p3)\n >>> density(M)(x1, x2, x3)\n Piecewise((6*p1**x1*p2**x2*p3**x3/(factorial(x1)*factorial(x2)*factorial(x3)),\n Eq(x1 + x2 + x3, 3)), (0, True))\n >>> marginal_distribution(M, M[0])(x1).subs(x1, 1)\n 3*p1*p2**2 + 6*p1*p2*p3 + 3*p1*p3**2\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Multinomial_distribution\n .. [2] http://mathworld.wolfram.com/MultinomialDistribution.html\n\n ", "language": "en", "n_whitespaces": 210, "n_words": 117, "vocab_size": 91 }
def Multinomial(syms, n, *p): if not isinstance(p[0], list): p = (list(p), ) return multivariate_rv(MultinomialDistribution, syms, n, p[0]) #------------------------------------------------------------------------------- # Negative Multinomial Distribution --------------------------------------------
49,473
199,984
64
sympy/physics/qho_1d.py
43
16
def psi_n(n, x, m, omega): # sympify arguments n, x, m, omega = map(S, [n, x, m, omega]) nu = m * omega / hbar #
applied backtick correction to the remainder of the project
psi_n
a0989bcfd26470833cf03737941bfd80f511c745
sympy
qho_1d.py
14
5
https://github.com/sympy/sympy.git
1
97
0
31
146
Python
{ "docstring": "\n Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator.\n\n Parameters\n ==========\n\n n :\n the \"nodal\" quantum number. Corresponds to the number of nodes in the\n wavefunction. ``n >= 0``\n x :\n x coordinate.\n m :\n Mass of the particle.\n omega :\n Angular frequency of the oscillator.\n\n Examples\n ========\n\n >>> from sympy.physics.qho_1d import psi_n\n >>> from sympy.abc import m, x, omega\n >>> psi_n(0, x, m, omega)\n (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4))\n\n ", "language": "en", "n_whitespaces": 146, "n_words": 66, "vocab_size": 46 }
def psi_n(n, x, m, omega): # sympify arguments n, x, m, omega = map(S, [n, x, m, omega]) nu = m * omega / hbar # normalization coefficient C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n))) return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x)
55,321
218,459
201
python3.10.4/Lib/inspect.py
85
23
def getsourcefile(object): filename = getfile(object) all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] if any(filename.endswith(s) for s in all_bytecode_suffixes): filename = (os.path.splitext(filename)[0] + importlib.machinery.SOURCE_SUFFIXES[0]) elif any(filename.endswith(s) for s in
add python 3.10.4 for windows
getsourcefile
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
inspect.py
14
19
https://github.com/XX-net/XX-Net.git
9
158
0
49
250
Python
{ "docstring": "Return the filename that can be used to locate an object's source.\n Return None if no way can be identified to get the source.\n ", "language": "en", "n_whitespaces": 30, "n_words": 24, "vocab_size": 18 }
def getsourcefile(object): filename = getfile(object) all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] if any(filename.endswith(s) for s in all_bytecode_suffixes): filename = (os.path.splitext(filename)[0] + importlib.machinery.SOURCE_SUFFIXES[0]) elif any(filename.endswith(s) for s in importlib.machinery.EXTENSION_SUFFIXES): return None if os.path.exists(filename): return filename # only return a non-existent filename if the module has a PEP 302 loader module = getmodule(object, filename) if getattr(module, '__loader__', None) is not None: return filename elif getattr(getattr(module, "__spec__", None), "loader", None) is not None: return filename # or it is in the linecache elif filename in linecache.cache: return filename
55,086
218,024
76
python3.10.4/Lib/importlib/_abc.py
27
7
def module_repr(self, module): warnings.warn("importlib.abc.Loader.module_repr() is deprecated and " "slated for removal in Python 3.12", DeprecationWarning
add python 3.10.4 for windows
module_repr
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_abc.py
9
4
https://github.com/XX-net/XX-Net.git
1
19
0
27
37
Python
{ "docstring": "Return a module's repr.\n\n Used by the module type when the method does not raise\n NotImplementedError.\n\n This method is deprecated.\n\n ", "language": "en", "n_whitespaces": 48, "n_words": 20, "vocab_size": 18 }
def module_repr(self, module): warnings.warn("importlib.abc.Loader.module_repr() is deprecated and " "slated for removal in Python 3.12", DeprecationWarning) # The exception will cause ModuleType.__repr__ to ignore this method. raise NotImplementedError
72,960
249,509
275
tests/push/test_email.py
24
25
def test_need_validated_email(self): with
Support enabling/disabling pushers (from MSC3881) (#13799) Partial implementation of MSC3881
test_need_validated_email
8ae42ab8fa3c6b52d74c24daa7ca75a478fa4fbb
synapse
test_email.py
14
17
https://github.com/matrix-org/synapse.git
1
99
0
23
161
Python
{ "docstring": "Test that we can only add an email pusher if the user has validated\n their email.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 16 }
def test_need_validated_email(self): with self.assertRaises(SynapseError) as cm: self.get_success_or_raise( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, access_token=self.token_id, kind="email", app_id="m.email", app_display_name="Email Notifications", device_display_name="b@example.com", pushkey="b@example.com", lang=None, data={}, ) ) self.assertEqual(400, cm.exception.code) self.assertEqual(Codes.THREEPID_NOT_FOUND, cm.exception.errcode)
117,028
319,962
419
src/documents/tests/test_api.py
64
28
def test_get_existing_comments(self): doc = Document.objects.create( title="test", mime_type="application/pdf", content="this is a document which will have comments!", ) comment = Comment.objects.create( comment="This is a comment.", document=doc, user=self.user, ) response = self.client.get( f"/api/documents/{doc.pk}/comments/", format="json", ) self.assertEqual(response.status_code, 200) resp_data = response.json() self.assertEqual(len(resp_data), 1) resp_data = resp_data[0] del resp_data["created"] self.assertDictEqual( resp_data,
Starts on implementing tests for the new API
test_get_existing_comments
6d5d308d6c7b7e359ba72964a300634e1065ace9
paperless-ngx
test_api.py
13
33
https://github.com/paperless-ngx/paperless-ngx.git
1
164
0
51
275
Python
{ "docstring": "\n GIVEN:\n - A document with a single comment\n WHEN:\n - API reuqest for document comments is made\n THEN:\n - The associated comment is returned\n ", "language": "en", "n_whitespaces": 86, "n_words": 24, "vocab_size": 19 }
def test_get_existing_comments(self): doc = Document.objects.create( title="test", mime_type="application/pdf", content="this is a document which will have comments!", ) comment = Comment.objects.create( comment="This is a comment.", document=doc, user=self.user, ) response = self.client.get( f"/api/documents/{doc.pk}/comments/", format="json", ) self.assertEqual(response.status_code, 200) resp_data = response.json() self.assertEqual(len(resp_data), 1) resp_data = resp_data[0] del resp_data["created"] self.assertDictEqual( resp_data, { "id": comment.id, "comment": comment.comment, "user": { "id": comment.user.id, "username": comment.user.username, "firstname": comment.user.first_name, "lastname": comment.user.last_name, }, }, )
77,276
262,565
155
TTS/vocoder/datasets/wavegrad_dataset.py
62
16
def collate_full_clips(batch): max_mel_length = max([b[0].sh
Fix tune wavegrad (#1844) * fix imports in tune_wavegrad * load_config returns Coqpit object instead None * set action (store true) for flag "--use_cuda"; start to tune if module is running as the main program * fix var order in the result of batch collating * make style * make style with black and isort
collate_full_clips
2c9f00a808e0aa76a82af2e8b325abb71f50d1df
TTS
wavegrad_dataset.py
13
11
https://github.com/coqui-ai/TTS.git
6
185
0
38
272
Python
{ "docstring": "This is used in tune_wavegrad.py.\n It pads sequences to the max length.", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
def collate_full_clips(batch): max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1] max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0] mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length]) audios = torch.zeros([len(batch), max_audio_length]) for idx, b in enumerate(batch): mel = b[0] audio = b[1] mels[idx, :, : mel.shape[1]] = mel audios[idx, : audio.shape[0]] = audio return mels, audios
38,550
160,178
249
numpy/distutils/ccompiler_opt.py
81
19
def feature_test(self, name, force_flags=None, macros=[]): if force_flags is None: force_flags = self.feature_flags(name) self.dist_log( "testing feature '%s' with flags (%s)" % ( name, ' '.join(force_flags) )) # Each CPU feature must have C source code contains at #
Add space after argument name
feature_test
f404e9e92e87a3990712d723d5c562a89300ac01
numpy
ccompiler_opt.py
12
18
https://github.com/numpy/numpy.git
4
123
0
65
204
Python
{ "docstring": "\n Test a certain CPU feature against the compiler through its own\n check file.\n\n Parameters\n ----------\n name : str\n Supported CPU feature name.\n\n force_flags : list or None, optional\n If None(default), the returned flags from `feature_flags()`\n will be used.\n\n macros : list of tuples, optional\n A list of C macro definitions.\n ", "language": "en", "n_whitespaces": 151, "n_words": 50, "vocab_size": 41 }
def feature_test(self, name, force_flags=None, macros=[]): if force_flags is None: force_flags = self.feature_flags(name) self.dist_log( "testing feature '%s' with flags (%s)" % ( name, ' '.join(force_flags) )) # Each CPU feature must have C source code contains at # least one intrinsic or instruction related to this feature. test_path = os.path.join( self.conf_check_path, "cpu_%s.c" % name.lower() ) if not os.path.exists(test_path): self.dist_fatal("feature test file is not exist", test_path) test = self.dist_test( test_path, force_flags + self.cc_flags["werror"], macros=macros ) if not test: self.dist_log("testing failed", stderr=True) return test
46,282
189,987
48
manim/mobject/svg/svg_mobject.py
16
6
def get_file_path(self) -> str: if self.file_name is None: raise ValueError("Must specify file for SVGMobject") return get_full_vector_image_path(self.f
Ported improved implementation of :class:`.SVGMobject` from 3b1b/manim (#2898) * port SVGMobject from 3b1b/manim * added svgelements as dependency * revert change of default values * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * set default stroke_width of svg elements to 0 if not set * fix handling of circles with different rx/ry * turn more methods into staticmethods * removed duplicated method * set/adapt stroke-width of some test SVGs * updated control data * forgot some control data * fixed init_colors in tex_mobject and text_mobject * minor changes, added docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * module docstring, removed import * vector_to_coords changed again * nail sphinx version to below 5.1 to fix rtd (?) * update test_text control data for science * changed Brace to use VMobjectFromSVGPath * remove unused classes and methods depending on old SVG path implementation * remove style_utils and svg_path modules * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change test_text to use monospace font * restore geometry.polygram * added get_mobject_type_class auxiliary method; changed polyline implementation to ad-hoc approach * restore test_text to previous version * skip Use tags as svgelements already populates them Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
get_file_path
309c9d41eb734ca85a7aea5533f88a6d4ee7c944
manim
svg_mobject.py
10
5
https://github.com/ManimCommunity/manim.git
2
27
0
16
48
Python
{ "docstring": "Search for an existing file based on the specified file name.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def get_file_path(self) -> str: if self.file_name is None: raise ValueError("Must specify file for SVGMobject") return get_full_vector_image_path(self.file_name)
72,223
248,340
42
tests/replication/_base.py
14
7
def default_config(self) -> Dict[str, Any]: base = super().default_config() base["redis"] = {"enabled": True}
Lay some foundation work to allow workers to only subscribe to some kinds of messages, reducing replication traffic. (#12672)
default_config
177b884ad7cc1ecdd92ff74188732734df203150
synapse
_base.py
10
10
https://github.com/matrix-org/synapse.git
1
34
0
12
60
Python
{ "docstring": "\n Overrides the default config to enable Redis.\n Even if the test only uses make_worker_hs, the main process needs Redis\n enabled otherwise it won't create a Fake Redis server to listen on the\n Redis port and accept fake TCP connections.\n ", "language": "en", "n_whitespaces": 75, "n_words": 39, "vocab_size": 33 }
def default_config(self) -> Dict[str, Any]: base = super().default_config() base["redis"] = {"enabled": True} return base