ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
53,111
211,511
214
ppdet/modeling/rbox_utils.py
128
28
def box2corners(box): B = box.shape[0] x, y, w, h, alpha = paddle.split(box, 5, axis=-1) x4 = paddle.to_tensor( [0.5, 0.5, -0.5, -0.5], dtype=paddle.float32).reshape( (1, 1, 4)) # (1,1,4) x4 = x4 * w # (B, N, 4) y4 = paddle.to_tensor( [-0.5, 0.5, 0.5, -0.5], dtype=paddle.float32).reshape((1, 1, 4)) y4 = y4 * h # (B, N, 4) corners = paddle.stack([x4, y4], axis=-1) # (B, N, 4, 2) sin = paddle.sin(alpha) cos = paddle.cos(alpha) row1 = paddle.concat([cos, sin], axis=-1) row2 = paddle.concat([-sin, cos], axis=-1) # (B, N, 2) rot_T = paddle.stack([row1, row2], axis=-2) # (B, N, 2, 2) rotated = paddle.bmm(corners.reshape([-1, 4, 2]), rot_T.reshape([-1, 2, 2])) rotated = rotated.reshape([B, -1, 4, 2]) # (B*N, 4, 2) -> (B, N, 4, 2) rotated[..., 0] += x rotated[..., 1] += y return rotated
add fcosr model (#6765) * add fcosr * fix some problem * add docs for fcosr * modify code * modify focsr reader * finish tensorrt deployment with dynamic shape * modify according to review comment Co-authored-by: wangxinxin08 <>
box2corners
92078713cced4f0d9450a6fc80a449fa75fd8c10
PaddleDetection
rbox_utils.py
12
21
https://github.com/PaddlePaddle/PaddleDetection.git
1
287
0
71
403
Python
{ "docstring": "convert box coordinate to corners\n Args:\n box (Tensor): (B, N, 5) with (x, y, w, h, alpha) angle is in [0, 90)\n Returns:\n corners (Tensor): (B, N, 4, 2) with (x1, y1, x2, y2, x3, y3, x4, y4)\n ", "language": "en", "n_whitespaces": 61, "n_words": 38, "vocab_size": 32 }
def box2corners(box): B = box.shape[0] x, y, w, h, alpha = paddle.split(box, 5, axis=-1) x4 = paddle.to_tensor( [0.5, 0.5, -0.5, -0.5], dtype=paddle.float32).reshape( (1, 1, 4)) # (1,1,4) x4 = x4 * w # (B, N, 4) y4 = paddle.to_tensor( [-0.5, 0.5, 0.5, -0.5], dtype=paddle.float32).reshape((1, 1, 4)) y4 = y4 * h # (B, N, 4) corners = paddle.stack([x4, y4], axis=-1) # (B, N, 4, 2) sin = paddle.sin(alpha) cos = paddle.cos(alpha) row1 = paddle.concat([cos, sin], axis=-1) row2 = paddle.concat([-sin, cos], axis=-1) # (B, N, 2) rot_T = paddle.stack([row1, row2], axis=-2) # (B, N, 2, 2) rotated = paddle.bmm(corners.reshape([-1, 4, 2]), rot_T.reshape([-1, 2, 2])) rotated = rotated.reshape([B, -1, 4, 2]) # (B*N, 4, 2) -> (B, N, 4, 2) rotated[..., 0] += x rotated[..., 1] += y return rotated
18,753
91,256
155
src/sentry/incidents/subscription_processor.py
29
14
def get_crash_rate_alert_metrics_aggregation_value(self, subscription_update): rows = subscription_update["values"]["data"] if BaseMetricsEntitySubscription.is_crash_rate_format_v2(rows): version = "v2" result = self._get_crash_rate_alert_metrics_aggregation_value_v2(subscription_update) else: version = "v1" result = self._get_crash_rate_alert_metrics_aggregation_value_v1(subscription_update) metrics.incr( "incidents.alert_rules.get_crash_rate_alert_metrics_aggregation_value", tags={"format": version}, sample_rate=1.0, ) ret
fix(cra-metrics): Count all users in metrics alerts (#34957) Use conditional aggregates in order to get both the total user count and the number of crashed users in the same snuba query. To maintain compatibility until existing subscriptions have been migrated, make the subscription processor able to handle both the old and the new format. The actual migration of existing subscriptions will be in a separate PR.
get_crash_rate_alert_metrics_aggregation_value
65f43fd4e0f1821b468547fc08136bbad9cd8446
sentry
subscription_processor.py
11
14
https://github.com/getsentry/sentry.git
2
72
0
22
123
Python
{ "docstring": "Handle both update formats. Once all subscriptions have been updated\n to v2, we can remove v1 and replace this function with current v2.\n ", "language": "en", "n_whitespaces": 37, "n_words": 23, "vocab_size": 23 }
def get_crash_rate_alert_metrics_aggregation_value(self, subscription_update): rows = subscription_update["values"]["data"] if BaseMetricsEntitySubscription.is_crash_rate_format_v2(rows): version = "v2" result = self._get_crash_rate_alert_metrics_aggregation_value_v2(subscription_update) else: version = "v1" result = self._get_crash_rate_alert_metrics_aggregation_value_v1(subscription_update) metrics.incr( "incidents.alert_rules.get_crash_rate_alert_metrics_aggregation_value", tags={"format": version}, sample_rate=1.0, ) return result
26,998
120,945
20
jax/_src/test_util.py
14
6
def strict_promotion_if_dtypes_match(dtypes): if all(dtype == dtypes[0] for dtype in dtypes): return jax.
Add jtu.strict_promotion_if_dtypes_match utility
strict_promotion_if_dtypes_match
4c0d61a1435b70760814f1f678cb041d36b8408d
jax
test_util.py
10
4
https://github.com/google/jax.git
3
35
0
13
61
Python
{ "docstring": "\n Context manager to enable strict promotion if all dtypes match,\n and enable standard dtype promotion otherwise.\n ", "language": "en", "n_whitespaces": 20, "n_words": 16, "vocab_size": 14 }
def strict_promotion_if_dtypes_match(dtypes): if all(dtype == dtypes[0] for dtype in dtypes): return jax.numpy_dtype_promotion('strict') return jax.numpy_dtype_promotion('standard')
76,104
260,170
28
sklearn/utils/tests/test_param_validation.py
15
7
def test_stroptions_deprecated_subset(): with pytest.raises(ValueError, match="deprecated options must be a subset"): StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
MNT Param validation: Make it possible to mark a constraint as hidden (#23558)
test_stroptions_deprecated_subset
de659b9dee2054efb4830eff8e98ece60f4a1758
scikit-learn
test_param_validation.py
12
3
https://github.com/scikit-learn/scikit-learn.git
1
35
0
15
68
Python
{ "docstring": "Check that the deprecated parameter must be a subset of options.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_stroptions_deprecated_subset(): with pytest.raises(ValueError, match="deprecated options must be a subset"): StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
80,793
271,557
337
keras/engine/training.py
148
5
def _validate_target_and_loss(self, y, loss): # `self.loss` references the loss added via `compile` call. If users have # provided such, the target must be provided; otherwise it's a user error. # Note that `self.loss` does not include losses added via `add_loss`, and it # is a valid use when such loss from `add_loss` exists and target does not. if self.loss and y is None: raise ValueError( "Target data is missing. Your model was compiled with " f"loss={self.loss}, " "and therefore expects target data to be provided in `fit()`." ) # For training, there must be compiled loss or regularization loss to exist # in ord
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_validate_target_and_loss
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training.py
13
12
https://github.com/keras-team/keras.git
4
38
0
95
84
Python
{ "docstring": "Raises error if target or loss is not found.\n\n This method verifies that the target and loss are properly populated\n when applicable, or raises errors.\n\n Args:\n y: the target for training.\n loss: the total loss tensor including loss added via `compile` and\n `add_loss`.\n ", "language": "en", "n_whitespaces": 100, "n_words": 43, "vocab_size": 34 }
def _validate_target_and_loss(self, y, loss): # `self.loss` references the loss added via `compile` call. If users have # provided such, the target must be provided; otherwise it's a user error. # Note that `self.loss` does not include losses added via `add_loss`, and it # is a valid use when such loss from `add_loss` exists and target does not. if self.loss and y is None: raise ValueError( "Target data is missing. Your model was compiled with " f"loss={self.loss}, " "and therefore expects target data to be provided in `fit()`." ) # For training, there must be compiled loss or regularization loss to exist # in order to apply the gradients. If one is not found, it means no loss # was supplied via `compile` or `add_loss`. elif loss is None: raise ValueError( "No loss found. You may have forgotten to provide a `loss` argument " "in the `compile()` method." )
39,728
165,883
222
pandas/core/window/rolling.py
52
15
def _validate_datetimelike_monotonic(self): # GH 46061 if self._on.hasnans: self._raise_monoton
BUG: groupby().rolling(freq) with monotonic dates within groups #46065 (#46567)
_validate_datetimelike_monotonic
d2aa44f50f6ac4789d4e351e4e52a53a358da42e
pandas
rolling.py
14
13
https://github.com/pandas-dev/pandas.git
6
75
0
44
135
Python
{ "docstring": "\n Validate that each group in self._on is monotonic\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def _validate_datetimelike_monotonic(self): # GH 46061 if self._on.hasnans: self._raise_monotonic_error("values must not have NaT") for group_indices in self._grouper.indices.values(): group_on = self._on.take(group_indices) if not ( group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing ): on = "index" if self.on is None else self.on raise ValueError( f"Each group within {on} must be monotonic. " f"Sort the values in {on} first." )
56,528
221,825
31
python3.10.4/Lib/ctypes/macholib/framework.py
12
6
def framework_info(filename): is_framework = STRICT_FRAMEWORK_RE.match(filename) if not is_framework: return None return is_framewo
add python 3.10.4 for windows
framework_info
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
framework.py
8
5
https://github.com/XX-net/XX-Net.git
2
26
0
11
46
Python
{ "docstring": "\n A framework name can take one of the following four forms:\n Location/Name.framework/Versions/SomeVersion/Name_Suffix\n Location/Name.framework/Versions/SomeVersion/Name\n Location/Name.framework/Name_Suffix\n Location/Name.framework/Name\n\n returns None if not found, or a mapping equivalent to:\n dict(\n location='Location',\n name='Name.framework/Versions/SomeVersion/Name_Suffix',\n shortname='Name',\n version='SomeVersion',\n suffix='Suffix',\n )\n\n Note that SomeVersion and Suffix are optional and may be None\n if not present\n ", "language": "en", "n_whitespaces": 159, "n_words": 46, "vocab_size": 42 }
def framework_info(filename): is_framework = STRICT_FRAMEWORK_RE.match(filename) if not is_framework: return None return is_framework.groupdict()
13,090
62,994
26
.venv/lib/python3.8/site-packages/pip/_vendor/pep517/in_process/_in_process.py
14
8
def contained_in(filename, directory): filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([f
upd; format
contained_in
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
_in_process.py
11
4
https://github.com/jindongwang/transferlearning.git
1
57
0
12
91
Python
{ "docstring": "Test if a file is located within the given directory.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def contained_in(filename, directory): filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([filename, directory]) == directory
6,193
34,064
25
src/transformers/activations_tf.py
17
12
def glu(x, axis=-1): a, b = tf.split(x, 2, axis=axis) return a *
add TF glu activation function (#15146)
glu
c4f7eb124b218741d66dd1d86b5d744024a78f6f
transformers
activations_tf.py
9
3
https://github.com/huggingface/transformers.git
1
38
0
17
92
Python
{ "docstring": "\n Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where\n the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B).\n\n Args:\n `x`: float Tensor to perform activation\n `axis`: dimension across which `x` be split in half\n\n Returns:\n `x` with the GLU activation applied (with its size halved across the dimension `axis`).\n ", "language": "en", "n_whitespaces": 100, "n_words": 63, "vocab_size": 49 }
def glu(x, axis=-1): a, b = tf.split(x, 2, axis=axis) return a * tf.math.sigmoid(b) if version.parse(tf.version.VERSION) >= version.parse("2.4"):
22,471
106,848
624
py/visdom/__init__.py
149
41
def matplot(self, plot, opts=None, env=None, win=None): opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) # write plot to SVG buffer: buffer = StringIO() plot.savefig(buffer, format="svg") buffer.seek(0) svg = buffer.read() buffer.close() if opts.get("resizable", False): if not BS4_AVAILABLE: raise ImportError("No module named 'bs4'") else: try: soup = bs4.BeautifulSoup(svg, "x
apply black py to all python files
matplot
5b8b7f267cfaf76a2a39a727ef31a62b3909a093
visdom
__init__.py
18
39
https://github.com/fossasia/visdom.git
13
329
0
88
543
Python
{ "docstring": "\n This function draws a Matplotlib `plot`. The function supports\n one plot-specific option: `resizable`. When set to `True` the plot\n is resized with the pane. You need `beautifulsoup4` and `lxml`\n packages installed to use this option.\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 32 }
def matplot(self, plot, opts=None, env=None, win=None): opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) # write plot to SVG buffer: buffer = StringIO() plot.savefig(buffer, format="svg") buffer.seek(0) svg = buffer.read() buffer.close() if opts.get("resizable", False): if not BS4_AVAILABLE: raise ImportError("No module named 'bs4'") else: try: soup = bs4.BeautifulSoup(svg, "xml") except bs4.FeatureNotFound as e: import six six.raise_from(ImportError("No module named 'lxml'"), e) height = soup.svg.attrs.pop("height", None) width = soup.svg.attrs.pop("width", None) svg = str(soup) else: height = None width = None # show SVG: if "height" not in opts: height = height or re.search(r'height\="([0-9\.]*)pt"', svg) if height is not None: if not isstr(height): height = height.group(1) height = height.replace("pt", "00") opts["height"] = 1.4 * int(math.ceil(float(height))) if "width" not in opts: width = width or re.search(r'width\="([0-9\.]*)pt"', svg) if width is not None: if not isstr(width): width = width.group(1) width = width.replace("pt", "00") opts["width"] = 1.35 * int(math.ceil(float(width))) return self.svg(svgstr=svg, opts=opts, env=env, win=win)
54,514
216,309
243
salt/modules/consul.py
89
14
def acl_clone(consul_url=None, token=None, **kwargs): ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error("No Consul URL found.") ret["message"] = "No Consul URL found." ret["res"] = False return ret if "id" not in kwargs: ret["message"] = 'Required parameter "id" is missing.' ret["res"] = False return ret function = "acl/clone/{}".format(kwargs["id"]) res = _query( consul_url=consul_url, token=token, data=data, method="PUT", function=function ) if res["res"]: ret["res"] = True ret["message"] = "ACL {} cloned.".format(kwargs["name"]) ret["ID"] = res["data"] else: ret["res"] = False ret["message"] = "Cloning ACL item {} failed.".format(kwargs["name"]) return ret
fix(consul): serialize to JSON only non string objects. Fixes 35215
acl_clone
50a17432015fb712ec4dc7d3ead79e8939e2bf96
salt
consul.py
13
26
https://github.com/saltstack/salt.git
5
170
0
51
306
Python
{ "docstring": "\n Information about an ACL token.\n\n :param consul_url: The Consul server URL.\n :param id: Unique identifier for the ACL to update.\n :return: Boolean, message of success or\n failure, and new ID of cloned ACL.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716'\n\n ", "language": "en", "n_whitespaces": 83, "n_words": 42, "vocab_size": 39 }
def acl_clone(consul_url=None, token=None, **kwargs): ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error("No Consul URL found.") ret["message"] = "No Consul URL found." ret["res"] = False return ret if "id" not in kwargs: ret["message"] = 'Required parameter "id" is missing.' ret["res"] = False return ret function = "acl/clone/{}".format(kwargs["id"]) res = _query( consul_url=consul_url, token=token, data=data, method="PUT", function=function ) if res["res"]: ret["res"] = True ret["message"] = "ACL {} cloned.".format(kwargs["name"]) ret["ID"] = res["data"] else: ret["res"] = False ret["message"] = "Cloning ACL item {} failed.".format(kwargs["name"]) return ret
40,253
168,242
178
pandas/core/indexes/datetimes.py
81
14
def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default): self._deprecated_arg(kind, "kind", "slice_indexer") # For historical reasons DatetimeIndex supports slices between two # instances of datetime.time as if it were applying a slice mask to # an
PERF cache find_stack_level (#48023) cache stacklevel
slice_indexer
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
datetimes.py
12
38
https://github.com/pandas-dev/pandas.git
14
269
0
63
149
Python
{ "docstring": "\n Return indexer for specified label slice.\n Index.slice_indexer, customized to handle time slicing.\n\n In addition to functionality provided by Index.slice_indexer, does the\n following:\n\n - if both `start` and `end` are instances of `datetime.time`, it\n invokes `indexer_between_time`\n - if `start` and `end` are both either string or None perform\n value-based selection in non-monotonic cases.\n\n ", "language": "en", "n_whitespaces": 120, "n_words": 52, "vocab_size": 43 }
def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default): self._deprecated_arg(kind, "kind", "slice_indexer") # For historical reasons DatetimeIndex supports slices between two # instances of datetime.time as if it were applying a slice mask to # an array of (self.hour, self.minute, self.seconds, self.microsecond). if isinstance(start, time) and isinstance(end, time): if step is not None and step != 1: raise ValueError("Must have step size of 1 with time slices") return self.indexer_between_time(start, end) if isinstance(start, time) or isinstance(end, time): raise KeyError("Cannot mix time and non-time slice keys")
34,618
149,967
120
freqtrade/persistence/migrations.py
14
6
def fix_old_dry_orders(engine):
Cleanup old, left open dry-run orders
fix_old_dry_orders
c0ff554d5be871098cd10424fdd579322b5370df
freqtrade
migrations.py
12
27
https://github.com/freqtrade/freqtrade.git
1
32
0
9
61
Python
{ "docstring": "\n update orders\n set ft_is_open = 0\n where ft_is_open = 1 and (ft_trade_id, order_id) not in (\n select id, stoploss_order_id from trades where stoploss_order_id is not null\n ) and ft_order_side = 'stoploss'\n and order_id like 'dry_%'\n \n update orders\n set ft_is_open = 0\n where ft_is_open = 1\n and (ft_trade_id, order_id) not in (\n select id, open_order_id from trades where open_order_id is not null\n ) and ft_order_side != 'stoploss'\n and order_id like 'dry_%'\n ", "language": "en", "n_whitespaces": 305, "n_words": 70, "vocab_size": 29 }
def fix_old_dry_orders(engine): with engine.begin() as connection: connection.execute( text( ) ) connection.execute( text( ) )
73,833
251,829
104
test/mitmproxy/proxy/layers/http/hyper_h2_test_helpers.py
33
11
def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0): flags = set(flags) if flags is not None else set() f = DataFrame(stream_id) f.data = data f.flags = flags if padding_len: flags.add("PADDED") f.pad_length = padding_len return f
make it black!
build_data_frame
b3587b52b25077f68116b9852b041d33e7fc6601
mitmproxy
hyper_h2_test_helpers.py
10
9
https://github.com/mitmproxy/mitmproxy.git
3
67
0
25
107
Python
{ "docstring": "\n Builds a single data frame out of a chunk of data.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 9 }
def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0): flags = set(flags) if flags is not None else set() f = DataFrame(stream_id) f.data = data f.flags = flags if padding_len: flags.add("PADDED") f.pad_length = padding_len return f
2,421
12,837
106
jina/parsers/dryrun.py
29
9
def set_dryrun_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument( 'host', type=str, help='The full host address of the Gateway, e.g. grpc://localhost:12345', ) parser.add_argument( '--timeout', type=int, default=3000, help=, ) return parser
feat: add dryrun to cli (#5050) * feat: add dryrun to cli * style: fix overload and cli autocomplete * feat: add parsing for dryrun * feat: update checker dryrun * docs: add dryrun cli to healt check page * style: fix overload and cli autocomplete * feat: add exit Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
set_dryrun_parser
124045351137d80d118f9692de4295d50561f1e1
jina
dryrun.py
10
18
https://github.com/jina-ai/jina.git
2
53
0
26
90
Python
{ "docstring": "Set the parser for `dryrun`\n\n :param parser: an existing parser to build upon\n :return: the parser\n \nTimeout in millisecond of one check\n-1 for waiting forever\n", "language": "en", "n_whitespaces": 33, "n_words": 26, "vocab_size": 22 }
def set_dryrun_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument( 'host', type=str, help='The full host address of the Gateway, e.g. grpc://localhost:12345', ) parser.add_argument( '--timeout', type=int, default=3000, help=, ) return parser
12,521
61,339
202
.venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py
83
19
def wheel_dist_info_dir(source, name): # type: (ZipFile, str) -> str # Zip file path separators must be / subdirs = {p.split("/", 1)[0] for p in source.namelist()} info_dirs = [s for s in subdirs if s.endswith(".dist-info")] if not info_dirs: raise UnsupportedWheel(".dist-info directory not found") if len(info_dirs) > 1: raise UnsupportedWheel( "multiple .dist-info directories found: {}".format(", ".join(info_dirs)) ) info_dir = info_dirs[0] info_dir_name = canonicalize_name(info_dir) canonical_name = canonicalize_name(name) if not info_dir_name.startswith(canonical_name): raise Unsupported
upd; format
wheel_dist_info_dir
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
wheel.py
14
19
https://github.com/jindongwang/transferlearning.git
7
120
0
61
204
Python
{ "docstring": "Returns the name of the contained .dist-info directory.\n\n Raises AssertionError or UnsupportedWheel if not found, >1 found, or\n it doesn't match the provided name.\n ", "language": "en", "n_whitespaces": 33, "n_words": 24, "vocab_size": 20 }
def wheel_dist_info_dir(source, name): # type: (ZipFile, str) -> str # Zip file path separators must be / subdirs = {p.split("/", 1)[0] for p in source.namelist()} info_dirs = [s for s in subdirs if s.endswith(".dist-info")] if not info_dirs: raise UnsupportedWheel(".dist-info directory not found") if len(info_dirs) > 1: raise UnsupportedWheel( "multiple .dist-info directories found: {}".format(", ".join(info_dirs)) ) info_dir = info_dirs[0] info_dir_name = canonicalize_name(info_dir) canonical_name = canonicalize_name(name) if not info_dir_name.startswith(canonical_name): raise UnsupportedWheel( ".dist-info directory {!r} does not start with {!r}".format( info_dir, canonical_name ) ) return info_dir
38,667
160,611
976
numpy/lib/arraysetops.py
367
51
def in1d(ar1, ar2, assume_unique=False, invert=False): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size
MAINT: Optimize np.isin for integer arrays - This optimization indexes with an intermediary boolean array to speed up numpy.isin and numpy.in1d for integer arrays over a range of optimal parameters which are calculated.
in1d
cedba623b110caf83f46edfa38cb4fbc0191e285
numpy
arraysetops.py
17
59
https://github.com/numpy/numpy.git
16
504
0
205
792
Python
{ "docstring": "\n Test whether each element of a 1-D array is also present in a second array.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n We recommend using :func:`isin` instead of `in1d` for new code.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n\n .. versionadded:: 1.8.0\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n numpy.lib.arraysetops : Module with a number of other functions for\n performing set operations on arrays.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n .. versionadded:: 1.4.0\n\n Examples\n --------\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n ", "language": "en", "n_whitespaces": 577, "n_words": 303, "vocab_size": 181 }
def in1d(ar1, ar2, assume_unique=False, invert=False): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if integer_arrays: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_range = ar2_max - ar2_min ar2_size = ar2.size # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927, see discussion on # https://github.com/numpy/numpy/pull/12065 optimal_parameters = ( np.log10(ar2_size + 1) > ((np.log10(ar2_range + 1) - 2.27) / 0.927) ) if optimal_parameters: if invert: outgoing_array = np.ones_like(ar1, dtype=np.bool_) else: outgoing_array = np.zeros_like(ar1, dtype=np.bool_) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx]
42,201
176,973
55
networkx/algorithms/centrality/degree_alg.py
33
8
def out_degree_centrality(G): if len(G) <= 1: return {n: 1 for n in G} s = 1.0 / (len(G) - 1.0) centrality = {n: d * s for n, d in G.out_degree()} return centralit
added examples to degree_alg.py (#5644) * added example on degree centrality * added example on in degree centrality * added example on out degree centrality * added opening braces
out_degree_centrality
b8d1438e4ea3d8190c650110b3b7d7c141224842
networkx
degree_alg.py
11
6
https://github.com/networkx/networkx.git
4
61
0
25
91
Python
{ "docstring": "Compute the out-degree centrality for nodes.\n\n The out-degree centrality for a node v is the fraction of nodes its\n outgoing edges are connected to.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with out-degree centrality as values.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is undirected.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> nx.out_degree_centrality(G)\n {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0}\n\n See Also\n --------\n degree_centrality, in_degree_centrality\n\n Notes\n -----\n The degree centrality values are normalized by dividing by the maximum\n possible degree in a simple graph n-1 where n is the number of nodes in G.\n\n For multigraphs or graphs with self loops the maximum degree might\n be higher than n-1 and values of degree centrality greater than 1\n are possible.\n ", "language": "en", "n_whitespaces": 238, "n_words": 136, "vocab_size": 93 }
def out_degree_centrality(G): if len(G) <= 1: return {n: 1 for n in G} s = 1.0 / (len(G) - 1.0) centrality = {n: d * s for n, d in G.out_degree()} return centrality
67,386
235,950
42
packages/python/plotly/plotly/tests/test_optional/test_offline/test_offline.py
14
8
def _read_html(self, file_url): with open(file_url.replace("file://", "").replace(" ", "")) as f: return f.read() if matplotlylib:
switch to black .22
_read_html
43e3a4011080911901176aab919c0ecf5046ddd3
plotly.py
test_offline.py
15
3
https://github.com/plotly/plotly.py.git
1
36
0
14
74
Python
{ "docstring": "Read and return the HTML contents from a file_url in the\n form e.g. file:///Users/chriddyp/Repos/plotly.py/plotly-temp.html\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 13 }
def _read_html(self, file_url): with open(file_url.replace("file://", "").replace(" ", "")) as f: return f.read() if matplotlylib:
30,974
136,708
260
python/ray/_private/utils.py
129
22
def set_omp_num_threads_if_unset() -> bool: num_threads_from_env = os.environ.get("OMP_NUM_THREADS") if num_threads_from_env is not None: # No ops if it's set return False # If unset, try setting the correct CPU count assigned. runtime_ctx = ray.get_runtime_context() if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE: # Non worker mode, no ops. return False num_assigned_cpus = runtime_ctx.get_assigned_resources().get("CPU") if num_assigned_cpus is None: # This is an acto
[core] Set OMP_NUM_THREADS to `num_cpus` required by task/actors by default (#30496) Ray currently sets OMP_NUM_THREADS=1 when the environ variable is not set. This PR: Sets OMP_NUM_THREADS to the number of cpus assigned to the worker that runs a task before running, and reset it after running. If num_cpus is a fractional smaller than 1, it will set OMP_NUM_THREADS to 1. Doesn't override OMP_NUM_THREADS if it's already being specified in runtime env or through os.environ. Signed-off-by: Ricky Xu <xuchen727@hotmail.com> Co-authored-by: Eric Liang <ekhliang@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com>
set_omp_num_threads_if_unset
7c8859f1428224710e4c2db2abf0d9ec28536301
ray
utils.py
11
27
https://github.com/ray-project/ray.git
4
105
0
94
189
Python
{ "docstring": "Set the OMP_NUM_THREADS to default to num cpus assigned to the worker\n\n This function sets the environment variable OMP_NUM_THREADS for the worker,\n if the env is not previously set and it's running in worker (WORKER_MODE).\n\n Returns True if OMP_NUM_THREADS is set in this function.\n\n ", "language": "en", "n_whitespaces": 56, "n_words": 44, "vocab_size": 31 }
def set_omp_num_threads_if_unset() -> bool: num_threads_from_env = os.environ.get("OMP_NUM_THREADS") if num_threads_from_env is not None: # No ops if it's set return False # If unset, try setting the correct CPU count assigned. runtime_ctx = ray.get_runtime_context() if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE: # Non worker mode, no ops. return False num_assigned_cpus = runtime_ctx.get_assigned_resources().get("CPU") if num_assigned_cpus is None: # This is an actor task w/o any num_cpus specified, set it to 1 logger.debug( "[ray] Forcing OMP_NUM_THREADS=1 to avoid performance " "degradation with many workers (issue #6998). You can override this " "by explicitly setting OMP_NUM_THREADS, or changing num_cpus." ) num_assigned_cpus = 1 import math # For num_cpu < 1: Set to 1. # For num_cpus >= 1: Set to the floor of the actual assigned cpus. omp_num_threads = max(math.floor(num_assigned_cpus), 1) os.environ["OMP_NUM_THREADS"] = str(omp_num_threads) return True
88,292
289,145
97
tests/components/homekit/test_type_sensors.py
43
21
async def test_binary_device_classes(hass, hk_driver): entity_id = "binary_sensor.demo" aid = 1 for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items(): hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class}) await hass.async_block_till_done() aid += 1 acc = BinarySensor(hass, hk_driver, "Binary Sensor", entity_id, aid, None) assert acc.get_service(
Add support for restoring HomeKit IIDs (#79913)
test_binary_device_classes
3b33e0d832b238b40360383099391e2093ea05cb
core
test_type_sensors.py
11
10
https://github.com/home-assistant/core.git
2
91
0
37
142
Python
{ "docstring": "Test if services and characteristics are assigned correctly.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
async def test_binary_device_classes(hass, hk_driver): entity_id = "binary_sensor.demo" aid = 1 for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items(): hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class}) await hass.async_block_till_done() aid += 1 acc = BinarySensor(hass, hk_driver, "Binary Sensor", entity_id, aid, None) assert acc.get_service(service).display_name == service assert acc.char_detected.display_name == char
55,002
217,902
144
python3.10.4/Lib/imaplib.py
76
24
def Internaldate2tuple(resp): mo = InternalDate.match(resp) if not mo: return
add python 3.10.4 for windows
Internaldate2tuple
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
imaplib.py
11
19
https://github.com/XX-net/XX-Net.git
3
178
0
57
302
Python
{ "docstring": "Parse an IMAP4 INTERNALDATE string.\n\n Return corresponding local time. The return value is a\n time.struct_time tuple or None if the string has wrong format.\n ", "language": "en", "n_whitespaces": 34, "n_words": 24, "vocab_size": 24 }
def Internaldate2tuple(resp): mo = InternalDate.match(resp) if not mo: return None mon = Mon2num[mo.group('mon')] zonen = mo.group('zonen') day = int(mo.group('day')) year = int(mo.group('year')) hour = int(mo.group('hour')) min = int(mo.group('min')) sec = int(mo.group('sec')) zoneh = int(mo.group('zoneh')) zonem = int(mo.group('zonem')) # INTERNALDATE timezone must be subtracted to get UT zone = (zoneh*60 + zonem)*60 if zonen == b'-': zone = -zone tt = (year, mon, day, hour, min, sec, -1, -1, -1) utc = calendar.timegm(tt) - zone return time.localtime(utc)
16,319
74,795
69
wagtail/documents/tests/test_admin_views.py
16
11
def test_delete_get(self): # Send request response = self.client.get( reverse("wagtaildocs:delete_multiple", args=(self.doc.id,)) ) # Check response self.assertEqual(response.status_code, 405)
Reformat with black
test_delete_get
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_admin_views.py
14
5
https://github.com/wagtail/wagtail.git
1
40
0
14
68
Python
{ "docstring": "\n This tests that a GET request to the delete view returns a 405 \"METHOD NOT ALLOWED\" response\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 16 }
def test_delete_get(self): # Send request response = self.client.get( reverse("wagtaildocs:delete_multiple", args=(self.doc.id,)) ) # Check response self.assertEqual(response.status_code, 405)
17,733
83,840
141
zerver/tests/test_subs.py
22
15
def test_stream_admin_remove_others_from_public_stream(self) -> None: result = self.attempt_unsubscribe_of_principal( query_count=15, target_users=[self.example_user("cordelia")], is_realm_admin=False, is_stream_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True, ) json = self.assert_json_success(result) self.assert_length(json["re
message_flags: Short-circuit if no messages changed. Omit sending an event, and updating the database, if there are no matching messages.
test_stream_admin_remove_others_from_public_stream
803982e87254e3b1ebcb16ed795e224afceea3a3
zulip
test_subs.py
13
16
https://github.com/zulip/zulip.git
1
80
0
21
125
Python
{ "docstring": "\n You can remove others from public streams you're a stream administrator of.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
def test_stream_admin_remove_others_from_public_stream(self) -> None: result = self.attempt_unsubscribe_of_principal( query_count=15, target_users=[self.example_user("cordelia")], is_realm_admin=False, is_stream_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True, ) json = self.assert_json_success(result) self.assert_length(json["removed"], 1) self.assert_length(json["not_removed"], 0)
70,422
244,539
724
mmdet/datasets/pipelines/transforms.py
201
14
def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') if loc == 'top_left': # index0 to top left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ max(center_position_xy[1] - img_shape_wh[1], 0), \ center_position_xy[0], \ center_position_xy[1] crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( y2 - y1), img_shape_wh[0], img_shape_wh[1] elif loc == 'top_right':
Refactor RandomCrop and SegRescale
_mosaic_combine
c407e970a8ee2544f27d1c233855a31129dca158
mmdetection
transforms.py
15
36
https://github.com/open-mmlab/mmdetection.git
4
406
0
71
562
Python
{ "docstring": "Calculate global coordinate of mosaic image and local coordinate of\n cropped sub-image.\n\n Args:\n loc (str): Index for the sub-image, loc in ('top_left',\n 'top_right', 'bottom_left', 'bottom_right').\n center_position_xy (Sequence[float]): Mixing center for 4 images,\n (x, y).\n img_shape_wh (Sequence[int]): Width and height of sub-image\n\n Returns:\n tuple[tuple[float]]: Corresponding coordinate of pasting and\n cropping\n - paste_coord (tuple): paste corner coordinate in mosaic image.\n - crop_coord (tuple): crop corner coordinate in mosaic image.\n ", "language": "en", "n_whitespaces": 212, "n_words": 67, "vocab_size": 48 }
def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') if loc == 'top_left': # index0 to top left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ max(center_position_xy[1] - img_shape_wh[1], 0), \ center_position_xy[0], \ center_position_xy[1] crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( y2 - y1), img_shape_wh[0], img_shape_wh[1] elif loc == 'top_right': # index1 to top right part of image x1, y1, x2, y2 = center_position_xy[0], \ max(center_position_xy[1] - img_shape_wh[1], 0), \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ center_position_xy[1] crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( img_shape_wh[0], x2 - x1), img_shape_wh[1] elif loc == 'bottom_left': # index2 to bottom left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ center_position_xy[1], \ center_position_xy[0], \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( y2 - y1, img_shape_wh[1]) else: # index3 to bottom right part of image x1, y1, x2, y2 = center_position_xy[0], \ center_position_xy[1], \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = 0, 0, min(img_shape_wh[0], x2 - x1), min(y2 - y1, img_shape_wh[1]) paste_coord = x1, y1, x2, y2 return paste_coord, crop_coord
47,857
196,357
517
sympy/matrices/common.py
164
25
def permute(self, perm, orientation='rows', direction='forward'): r from sympy.combinatorics import Permutation # allow british variants and `columns` if direction == 'forwards': direction = 'forward' if direction == 'backwards': direction = 'backward' if orientation == 'columns': orientation = 'cols' if direction not in ('forward', 'backward'): raise TypeError("direction='{}' is an invalid kwarg. " "Try 'forward' or 'backward'".format(direction)) if orientation not in ('rows', 'cols'): raise TypeError("orientation='{}' is an invalid kwarg. " "Try 'rows' or 'cols'".format(orientation)) if not isinstance(perm, (Permutation, Iterable)): raise ValueError( "{} must be a list, a list of lists, " "or a SymPy permutation object.".format(perm)) # ensure all swaps are in range max_index = self.rows if orientation == 'rows' else self.cols if not all(0 <= t <= max_index for t in flatten(list(perm))): raise IndexError("`swap` indices out of range.") if perm and not isinstance(perm, Permutation) and \ isinstance(perm[0], Iterable): if direction == 'forward': perm = list(reversed(perm)) perm = Permutation(perm, size=max_index+1) else: perm = Permutation(perm, size=max_index+1) if orientation == 'rows': return self._eval_permute_rows(perm) if orientation == 'cols': return self._eval_permute_cols(perm)
Moved imports to higher level
permute
59d22b6bb7287613d598611027f640d068ca5748
sympy
common.py
14
128
https://github.com/sympy/sympy.git
16
238
0
95
413
Python
{ "docstring": "Permute the rows or columns of a matrix by the given list of\n swaps.\n\n Parameters\n ==========\n\n perm : Permutation, list, or list of lists\n A representation for the permutation.\n\n If it is ``Permutation``, it is used directly with some\n resizing with respect to the matrix size.\n\n If it is specified as list of lists,\n (e.g., ``[[0, 1], [0, 2]]``), then the permutation is formed\n from applying the product of cycles. The direction how the\n cyclic product is applied is described in below.\n\n If it is specified as a list, the list should represent\n an array form of a permutation. (e.g., ``[1, 2, 0]``) which\n would would form the swapping function\n `0 \\mapsto 1, 1 \\mapsto 2, 2\\mapsto 0`.\n\n orientation : 'rows', 'cols'\n A flag to control whether to permute the rows or the columns\n\n direction : 'forward', 'backward'\n A flag to control whether to apply the permutations from\n the start of the list first, or from the back of the list\n first.\n\n For example, if the permutation specification is\n ``[[0, 1], [0, 2]]``,\n\n If the flag is set to ``'forward'``, the cycle would be\n formed as `0 \\mapsto 2, 2 \\mapsto 1, 1 \\mapsto 0`.\n\n If the flag is set to ``'backward'``, the cycle would be\n formed as `0 \\mapsto 1, 1 \\mapsto 2, 2 \\mapsto 0`.\n\n If the argument ``perm`` is not in a form of list of lists,\n this flag takes no effect.\n\n Examples\n ========\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward')\n Matrix([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]])\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward')\n Matrix([\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 0]])\n\n Notes\n =====\n\n If a bijective function\n `\\sigma : \\mathbb{N}_0 \\rightarrow \\mathbb{N}_0` denotes the\n permutation.\n\n If the matrix `A` is the matrix to permute, represented as\n a horizontal or a vertical stack of vectors:\n\n .. math::\n A =\n \\begin{bmatrix}\n a_0 \\\\ a_1 \\\\ \\vdots \\\\ a_{n-1}\n \\end{bmatrix} =\n \\begin{bmatrix}\n \\alpha_0 & \\alpha_1 & \\cdots & \\alpha_{n-1}\n \\end{bmatrix}\n\n If the matrix `B` is the result, the permutation of matrix rows\n is defined as:\n\n .. math::\n B := \\begin{bmatrix}\n a_{\\sigma(0)} \\\\ a_{\\sigma(1)} \\\\ \\vdots \\\\ a_{\\sigma(n-1)}\n \\end{bmatrix}\n\n And the permutation of matrix columns is defined as:\n\n .. math::\n B := \\begin{bmatrix}\n \\alpha_{\\sigma(0)} & \\alpha_{\\sigma(1)} &\n \\cdots & \\alpha_{\\sigma(n-1)}\n \\end{bmatrix}\n ", "language": "en", "n_whitespaces": 1054, "n_words": 395, "vocab_size": 170 }
def permute(self, perm, orientation='rows', direction='forward'): r from sympy.combinatorics import Permutation # allow british variants and `columns` if direction == 'forwards': direction = 'forward' if direction == 'backwards': direction = 'backward' if orientation == 'columns': orientation = 'cols' if direction not in ('forward', 'backward'): raise TypeError("direction='{}' is an invalid kwarg. " "Try 'forward' or 'backward'".format(direction)) if orientation not in ('rows', 'cols'): raise TypeError("orientation='{}' is an invalid kwarg. " "Try 'rows' or 'cols'".format(orientation)) if not isinstance(perm, (Permutation, Iterable)): raise ValueError( "{} must be a list, a list of lists, " "or a SymPy permutation object.".format(perm)) # ensure all swaps are in range max_index = self.rows if orientation == 'rows' else self.cols if not all(0 <= t <= max_index for t in flatten(list(perm))): raise IndexError("`swap` indices out of range.") if perm and not isinstance(perm, Permutation) and \ isinstance(perm[0], Iterable): if direction == 'forward': perm = list(reversed(perm)) perm = Permutation(perm, size=max_index+1) else: perm = Permutation(perm, size=max_index+1) if orientation == 'rows': return self._eval_permute_rows(perm) if orientation == 'cols': return self._eval_permute_cols(perm)
7,482
42,082
31
seaborn/axisgrid.py
10
5
def apply(self, func, *args, **kwargs):
Add apply and pipe methods to Grid objects for fluent customization (#2928) * Return self from tight_layout and refline * Add apply and pipe methods to FacetGrid for fluent customization * Move apply/pipe down to base class so JointGrid/PaiGrid get them too * Tweak docstrings
apply
949dec3666ab12a366d2fc05ef18d6e90625b5fa
seaborn
axisgrid.py
8
3
https://github.com/mwaskom/seaborn.git
1
26
0
9
41
Python
{ "docstring": "\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n ", "language": "en", "n_whitespaces": 103, "n_words": 53, "vocab_size": 43 }
def apply(self, func, *args, **kwargs): func(self, *args, **kwargs) return self
17,256
81,766
521
awx/main/utils/common.py
110
27
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): for field_name in fields: if hasattr(obj1, field_name): try: field_obj = obj1._meta.get_field(field_name) except FieldDoesNotExist: continue if isinstance(field_obj, ManyToManyField): # Many to Many can be specified as field_name src_field_value = getattr(obj1, field_name) if kwargs and field_name in kwargs: override_field_val = kwargs[field_name] # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order? if field_name == 'instance_groups': # instance_groups are a list but we need to preserve the order for ig_id in override_field_val: getattr(obj2, field_name).add(ig_id) continue
JT param everything (#12646) * Making almost all fields promptable on job templates and config models * Adding EE, IG and label access checks * Changing jobs preferred instance group function to handle the new IG cache field * Adding new ask fields to job template modules * Address unit/functional tests * Adding migration file
copy_m2m_relationships
33c0fb79d66f56374d7c042ba79887faa85e2885
awx
common.py
21
22
https://github.com/ansible/awx.git
11
164
0
77
263
Python
{ "docstring": "\n In-place operation.\n Given two saved objects, copies related objects from obj1\n to obj2 to field of same name, if field occurs in `fields`\n ", "language": "en", "n_whitespaces": 36, "n_words": 23, "vocab_size": 21 }
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): for field_name in fields: if hasattr(obj1, field_name): try: field_obj = obj1._meta.get_field(field_name) except FieldDoesNotExist: continue if isinstance(field_obj, ManyToManyField): # Many to Many can be specified as field_name src_field_value = getattr(obj1, field_name) if kwargs and field_name in kwargs: override_field_val = kwargs[field_name] # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order? if field_name == 'instance_groups': # instance_groups are a list but we need to preserve the order for ig_id in override_field_val: getattr(obj2, field_name).add(ig_id) continue if isinstance(override_field_val, (set, list, QuerySet)): getattr(obj2, field_name).add(*override_field_val) continue if override_field_val.__class__.__name__ == 'ManyRelatedManager': src_field_value = override_field_val dest_field = getattr(obj2, field_name) dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
42,923
179,238
1,541
gradio/external.py
440
43
def load_from_pipeline(pipeline): try: import transformers except ImportError: raise ImportError( "transformers not installed. Please try `pip install transformers`" ) if not isinstance(pipeline, transformers.Pipeline): raise ValueError("pipeline must be a transformers.Pipeline") # Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the # version of the transformers library that the user has installed. if hasattr(transformers, "AudioClassificationPipeline") and isinstance( pipeline, transformers.AudioClassificationPipeline ): pipeline_info = { "inputs": inputs.Audio(label="Input", source="microphone", type="filepath"), "outputs": outputs.Label(label="Class", type="confidences"), "preprocess": lambda i: {"inputs": i}, "postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}, } elif hasattr(transformers, "AutomaticSpeechRecognitionPipeline") and isinstance( pipeline, transformers.AutomaticSpeechRecognitionPipeline ): pipeline_info = { "inputs": inputs.Audio(label="Input", source="microphone", type="filepath"), "outputs": outputs.Textbox(label="Output"), "preprocess": lambda i: {"inputs": i}, "postprocess": lambda r: r["text"], } elif hasattr(transformers, "FeatureExtractionPipeline") and isinstance( pipeline, transformers.FeatureExtractionPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Dataframe(label="Output"), "preprocess": lambda x: {"inputs": x}, "postprocess": lambda r: r[0], } elif hasattr(transformers, "FillMaskPipeline") and isinstance( pipeline, transformers.FillMaskPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Label(label="Classification", type="confidences"), "preprocess": lambda x: {"inputs": x}, "postprocess": lambda r: {i["token_str"]: i["score"] for i in r}, } elif hasattr(transformers, "ImageClassificationPipeline") and isinstance( pipeline, transformers.ImageClassificationPipeline ): pipeline_info = { "inputs": inputs.Image(label="Input Image", type="filepath"), "outputs": outputs.Label(label="Classification", type="confidences"), "preprocess": lambda i: {"images": i}, "postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}, } elif hasattr(transformers, "QuestionAnsweringPipeline") and isinstance( pipeline, transformers.QuestionAnsweringPipeline ): pipeline_info = { "inputs": [ inputs.Textbox(label="Context", lines=7), inputs.Textbox(label="Question"), ], "outputs": [outputs.Textbox(label="Answer"), outputs.Label(label="Score")], "preprocess": lambda c, q: {"context": c, "question": q}, "postprocess": lambda r: (r["answer"], r["score"]), } elif hasattr(transformers, "SummarizationPipeline") and isinstance( pipeline, transformers.SummarizationPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input", lines=7), "outputs": outputs.Textbox(label="Summary"), "preprocess": lambda x: {"inputs": x}, "postprocess": lambda r: r[0]["summary_text"], } elif hasattr(transformers, "TextClassificationPipeline") and isinstance( pipeline, transformers.TextClassificationPipeline ):
Format The Codebase - black formatting - isort formatting
load_from_pipeline
cc0cff893f9d7d472788adc2510c123967b384fe
gradio
external.py
20
139
https://github.com/gradio-app/gradio.git
32
1,075
0
165
1,781
Python
{ "docstring": "\n Gets the appropriate Interface kwargs for a given Hugging Face transformers.Pipeline.\n pipeline (transformers.Pipeline): the transformers.Pipeline from which to create an interface\n Returns:\n (dict): a dictionary of kwargs that can be used to construct an Interface object\n ", "language": "en", "n_whitespaces": 52, "n_words": 36, "vocab_size": 30 }
def load_from_pipeline(pipeline): try: import transformers except ImportError: raise ImportError( "transformers not installed. Please try `pip install transformers`" ) if not isinstance(pipeline, transformers.Pipeline): raise ValueError("pipeline must be a transformers.Pipeline") # Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the # version of the transformers library that the user has installed. if hasattr(transformers, "AudioClassificationPipeline") and isinstance( pipeline, transformers.AudioClassificationPipeline ): pipeline_info = { "inputs": inputs.Audio(label="Input", source="microphone", type="filepath"), "outputs": outputs.Label(label="Class", type="confidences"), "preprocess": lambda i: {"inputs": i}, "postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}, } elif hasattr(transformers, "AutomaticSpeechRecognitionPipeline") and isinstance( pipeline, transformers.AutomaticSpeechRecognitionPipeline ): pipeline_info = { "inputs": inputs.Audio(label="Input", source="microphone", type="filepath"), "outputs": outputs.Textbox(label="Output"), "preprocess": lambda i: {"inputs": i}, "postprocess": lambda r: r["text"], } elif hasattr(transformers, "FeatureExtractionPipeline") and isinstance( pipeline, transformers.FeatureExtractionPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Dataframe(label="Output"), "preprocess": lambda x: {"inputs": x}, "postprocess": lambda r: r[0], } elif hasattr(transformers, "FillMaskPipeline") and isinstance( pipeline, transformers.FillMaskPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Label(label="Classification", type="confidences"), "preprocess": lambda x: {"inputs": x}, "postprocess": lambda r: {i["token_str"]: i["score"] for i in r}, } elif hasattr(transformers, "ImageClassificationPipeline") and isinstance( pipeline, transformers.ImageClassificationPipeline ): pipeline_info = { "inputs": inputs.Image(label="Input Image", type="filepath"), "outputs": outputs.Label(label="Classification", type="confidences"), "preprocess": lambda i: {"images": i}, "postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}, } elif hasattr(transformers, "QuestionAnsweringPipeline") and isinstance( pipeline, transformers.QuestionAnsweringPipeline ): pipeline_info = { "inputs": [ inputs.Textbox(label="Context", lines=7), inputs.Textbox(label="Question"), ], "outputs": [outputs.Textbox(label="Answer"), outputs.Label(label="Score")], "preprocess": lambda c, q: {"context": c, "question": q}, "postprocess": lambda r: (r["answer"], r["score"]), } elif hasattr(transformers, "SummarizationPipeline") and isinstance( pipeline, transformers.SummarizationPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input", lines=7), "outputs": outputs.Textbox(label="Summary"), "preprocess": lambda x: {"inputs": x}, "postprocess": lambda r: r[0]["summary_text"], } elif hasattr(transformers, "TextClassificationPipeline") and isinstance( pipeline, transformers.TextClassificationPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Label(label="Classification", type="confidences"), "preprocess": lambda x: [x], "postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}, } elif hasattr(transformers, "TextGenerationPipeline") and isinstance( pipeline, transformers.TextGenerationPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Textbox(label="Output"), "preprocess": lambda x: {"text_inputs": x}, "postprocess": lambda r: r[0]["generated_text"], } elif hasattr(transformers, "TranslationPipeline") and isinstance( pipeline, transformers.TranslationPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Textbox(label="Translation"), "preprocess": lambda x: [x], "postprocess": lambda r: r[0]["translation_text"], } elif hasattr(transformers, "Text2TextGenerationPipeline") and isinstance( pipeline, transformers.Text2TextGenerationPipeline ): pipeline_info = { "inputs": inputs.Textbox(label="Input"), "outputs": outputs.Textbox(label="Generated Text"), "preprocess": lambda x: [x], "postprocess": lambda r: r[0]["generated_text"], } elif hasattr(transformers, "ZeroShotClassificationPipeline") and isinstance( pipeline, transformers.ZeroShotClassificationPipeline ): pipeline_info = { "inputs": [ inputs.Textbox(label="Input"), inputs.Textbox(label="Possible class names (" "comma-separated)"), inputs.Checkbox(label="Allow multiple true classes"), ], "outputs": outputs.Label(label="Classification", type="confidences"), "preprocess": lambda i, c, m: { "sequences": i, "candidate_labels": c, "multi_label": m, }, "postprocess": lambda r: { r["labels"][i]: r["scores"][i] for i in range(len(r["labels"])) }, } else: raise ValueError("Unsupported pipeline type: {}".format(type(pipeline))) # define the function that will be called by the Interface
126
819
61
packages/syft/src/syft/core/adp/vectorized_publish.py
36
11
def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array): # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_no
Implemented working vectorized_publish method into codebase Took 26 minutes
calculate_bounds_for_mechanism
56137bacda6fea5a0053c65eb6fd88688f5298cc
PySyft
vectorized_publish.py
14
8
https://github.com/OpenMined/PySyft.git
1
67
0
30
113
Python
{ "docstring": "Calculates the squared L2 norm values needed to create a Mechanism, and calculate privacy budget + spend If you calculate the privacy budget spend with the worst case bound, you can show this number to the D.S.\n If you calculate it with the regular value (the value computed below when public_only = False, you cannot show the \n privacy budget to the DS because this violates privacy.\n ", "language": "en", "n_whitespaces": 76, "n_words": 66, "vocab_size": 43 }
def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array): # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_norm values!! worst_case_l2_norm = np.sqrt(np.sum(np.square(max_val_array - min_val_array))) * np.ones_like(value_array) l2_norm = np.sqrt(np.sum(np.square(value_array))) * np.ones_like(value_array) # print(l2_norm.shape, worst_case_l2_norm.shape) # print(l2_norm.shape) return l2_norm, worst_case_l2_norm
13,947
65,572
5
erpnext/buying/report/procurement_tracker/procurement_tracker.py
11
7
def get_po_entries(conditions): ret
style: format code with black
get_po_entries
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
procurement_tracker.py
10
34
https://github.com/frappe/erpnext.git
1
26
0
11
43
Python
{ "docstring": "\n\t\tSELECT\n\t\t\tchild.name,\n\t\t\tchild.parent,\n\t\t\tchild.cost_center,\n\t\t\tchild.project,\n\t\t\tchild.warehouse,\n\t\t\tchild.material_request,\n\t\t\tchild.material_request_item,\n\t\t\tchild.item_code,\n\t\t\tchild.stock_uom,\n\t\t\tchild.qty,\n\t\t\tchild.amount,\n\t\t\tchild.base_amount,\n\t\t\tchild.schedule_date,\n\t\t\tparent.transaction_date,\n\t\t\tparent.supplier,\n\t\t\tparent.status,\n\t\t\tparent.owner\n\t\tFROM `tabPurchase Order` parent, `tabPurchase Order Item` child\n\t\tWHERE\n\t\t\tparent.docstatus = 1\n\t\t\tAND parent.name = child.parent\n\t\t\tAND parent.status not in (\"Closed\",\"Completed\",\"Cancelled\")\n\t\t\t{conditions}\n\t\tGROUP BY\n\t\t\tparent.name, child.item_code\n\t\t", "language": "en", "n_whitespaces": 19, "n_words": 44, "vocab_size": 41 }
def get_po_entries(conditions): return frappe.db.sql( .format( conditions=conditions ), as_dict=1, ) # nosec
51,751
206,849
160
django/views/generic/dates.py
27
9
def get_year(self): year = self.year if year is None: try: year = self.kwargs["year"]
Refs #33476 -- Reformatted code with Black.
get_year
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
dates.py
18
11
https://github.com/django/django.git
4
54
0
17
96
Python
{ "docstring": "Return the year for which this view should display data.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def get_year(self): year = self.year if year is None: try: year = self.kwargs["year"] except KeyError: try: year = self.request.GET["year"] except KeyError: raise Http404(_("No year specified")) return year
117,022
319,934
67
src/documents/tests/test_management_retagger.py
18
13
def test_overwrite_storage_path(self): call_command("document_retagger", "--storage_path", "--overwrite") d_first, d_second, d_unrelated, d_auto = self.get_updated_docs() self.assertEqual(d_first.storage_path, self.sp2) self.assertEqual(d_auto.storage_path, self.sp1) self.assertIsNone(d_second.storage_path) self.assertEqual(d_unrelated.storage_path, self.sp2)
Adds the storage paths to the re-tagger command
test_overwrite_storage_path
c8e838e3a0828e82efac1fd93ebb9aba6a000ff8
paperless-ngx
test_management_retagger.py
8
7
https://github.com/paperless-ngx/paperless-ngx.git
1
71
0
17
116
Python
{ "docstring": "\n GIVEN:\n - 2 storage paths with documents which match them\n - 1 document which matches but has a storage path\n WHEN:\n - document retagger is called with overwrite\n THEN:\n - Matching document's storage paths updated\n - Non-matching documents have no storage path\n - Existing storage patch overwritten\n ", "language": "en", "n_whitespaces": 142, "n_words": 47, "vocab_size": 32 }
def test_overwrite_storage_path(self): call_command("document_retagger", "--storage_path", "--overwrite") d_first, d_second, d_unrelated, d_auto = self.get_updated_docs() self.assertEqual(d_first.storage_path, self.sp2) self.assertEqual(d_auto.storage_path, self.sp1) self.assertIsNone(d_second.storage_path) self.assertEqual(d_unrelated.storage_path, self.sp2)
35,635
153,820
18
modin/core/storage_formats/base/query_compiler.py
4
7
def invert(self): return DataFrameDefault.regis
REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514) Co-authored-by: Rehan Sohail Durrani <rdurrani@berkeley.edu> Signed-off-by: jeffreykennethli <jkli@ponder.io>
invert
57e29bc5d82348006c5170ef9ac0a9eedcd9acf9
modin
query_compiler.py
10
2
https://github.com/modin-project/modin.git
1
20
0
4
35
Python
{ "docstring": "\n Apply bitwise inversion for each element of the QueryCompiler.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing bitwise inversion for each value.\n ", "language": "en", "n_whitespaces": 67, "n_words": 20, "vocab_size": 16 }
def invert(self): return DataFrameDefault.register(pandas.DataFrame.__invert__)(self)
70,506
244,739
532
tests/test_models/test_dense_heads/test_centernet_head.py
183
34
def test_center_head_loss(self): s = 256 img_metas = [{'batch_input_shape': (s, s, 3)}] test_cfg = dict(topK=100, max_per_img=100) centernet_head = CenterNetHead( num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg) feat = [torch.rand(1, 1, s, s)] center_out, wh_out, offset_out = centernet_head.forward(feat) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = empty_gt_losses['loss_center_heatmap'] loss_wh = empty_gt_losses['loss_wh'] loss_offset = empty_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() == 0, ( 'there should be no loss_wh when there are no true
[Refactor] CenterNet
test_center_head_loss
96aa909c19dbe753852ac6dba13bbbc35329b99f
mmdetection
test_centernet_head.py
10
33
https://github.com/open-mmlab/mmdetection.git
1
295
0
101
457
Python
{ "docstring": "Tests center head loss when truth is empty and non-empty.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_center_head_loss(self): s = 256 img_metas = [{'batch_input_shape': (s, s, 3)}] test_cfg = dict(topK=100, max_per_img=100) centernet_head = CenterNetHead( num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg) feat = [torch.rand(1, 1, s, s)] center_out, wh_out, offset_out = centernet_head.forward(feat) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = empty_gt_losses['loss_center_heatmap'] loss_wh = empty_gt_losses['loss_wh'] loss_offset = empty_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() == 0, ( 'there should be no loss_wh when there are no true boxes') assert loss_offset.item() == 0, ( 'there should be no loss_offset when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = one_gt_losses['loss_center_heatmap'] loss_wh = one_gt_losses['loss_wh'] loss_offset = one_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() > 0, 'loss_wh should be non-zero' assert loss_offset.item() > 0, 'loss_offset should be non-zero'
@contextlib.contextmanager
55,183
218,181
22
python3.10.4/Lib/importlib/_common.py
11
11
def from_package(package): spec = wrap_spec(package) reader = spec.loader.get_resource_reader(spec.name) return reader.files() @contextlib.contex
add python 3.10.4 for windows
from_package
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_common.py
9
4
https://github.com/XX-net/XX-Net.git
1
30
1
10
59
Python
{ "docstring": "\n Return a Traversable object for the given package.\n\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
def from_package(package): spec = wrap_spec(package) reader = spec.loader.get_resource_reader(spec.name) return reader.files() @contextlib.contextmanager
5,042
26,683
61
saleor/checkout/complete_checkout.py
13
9
def _is_refund_ongoing(payment): return ( payment.transactions.filter( kind=TransactionKind.REFUND_ONGOING, is_s
Fix payment flow (#9504) * Do not capture payment again when it should be refunded or voided * Do not create order when then is ongoing refund
_is_refund_ongoing
0881beec1ac02dfa97525c5173687defb356d85c
saleor
complete_checkout.py
13
8
https://github.com/saleor/saleor.git
2
33
0
13
53
Python
{ "docstring": "Return True if refund is ongoing for given payment.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _is_refund_ongoing(payment): return ( payment.transactions.filter( kind=TransactionKind.REFUND_ONGOING, is_success=True ).exists() if payment else False )
54,320
216,011
699
salt/states/win_wua.py
215
37
def installed(name, updates=None): if isinstance(updates, str): updates = [updates] if not updates: updates = name ret = {"name": name, "changes": {}, "result": True, "comment": ""} wua = salt.utils.win_update.WindowsUpdateAgent() # Search for updates install_list = wua.search(updates) # No updates found if install_list.count() == 0: ret["comment"] = "No updates found" return ret # List of updates to download download = salt.utils.win_update.Updates() for item in install_list.updates: if not salt.utils.data.is_true(item.IsDownloaded): download.updates.Add(item) # List of updates to install
Remove 40 character limit to update Title
installed
52c922760e8447f0c9efd23b12481ba1a7509dcd
salt
win_wua.py
17
59
https://github.com/saltstack/salt.git
15
441
0
114
772
Python
{ "docstring": "\n Ensure Microsoft Updates are installed. Updates will be downloaded if\n needed.\n\n Args:\n\n name (str):\n The identifier of a single update to install.\n\n updates (list):\n A list of identifiers for updates to be installed. Overrides\n ``name``. Default is None.\n\n .. note:: Identifiers can be the GUID, the KB number, or any part of the\n Title of the Microsoft update. GUIDs and KBs are the preferred method\n to ensure you're installing the correct update.\n\n .. warning:: Using a partial KB number or a partial Title could result in\n more than one update being installed.\n\n Returns:\n dict: A dictionary containing the results of the update\n\n CLI Example:\n\n .. code-block:: yaml\n\n # using a GUID\n install_update:\n wua.installed:\n - name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n\n # using a KB\n install_update:\n wua.installed:\n - name: KB3194343\n\n # using the full Title\n install_update:\n wua.installed:\n - name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)\n\n # Install multiple updates\n install_updates:\n wua.installed:\n - updates:\n - KB3194343\n - 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n ", "language": "en", "n_whitespaces": 423, "n_words": 161, "vocab_size": 101 }
def installed(name, updates=None): if isinstance(updates, str): updates = [updates] if not updates: updates = name ret = {"name": name, "changes": {}, "result": True, "comment": ""} wua = salt.utils.win_update.WindowsUpdateAgent() # Search for updates install_list = wua.search(updates) # No updates found if install_list.count() == 0: ret["comment"] = "No updates found" return ret # List of updates to download download = salt.utils.win_update.Updates() for item in install_list.updates: if not salt.utils.data.is_true(item.IsDownloaded): download.updates.Add(item) # List of updates to install install = salt.utils.win_update.Updates() installed_updates = [] for item in install_list.updates: if not salt.utils.data.is_true(item.IsInstalled): install.updates.Add(item) else: installed_updates.extend("KB" + kb for kb in item.KBArticleIDs) if install.count() == 0: ret["comment"] = "Updates already installed: " ret["comment"] += "\n - ".join(installed_updates) return ret # Return comment of changes if test. if __opts__["test"]: ret["result"] = None ret["comment"] = "Updates will be installed:" for update in install.updates: ret["comment"] += "\n" ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title]) return ret # Download updates wua.download(download) # Install updates wua.install(install) # Refresh windows update info wua.refresh() post_info = wua.updates().list() # Verify the installation for item in install.list(): if not salt.utils.data.is_true(post_info[item]["Installed"]): ret["changes"]["failed"] = { item: { "Title": post_info[item]["Title"], "KBs": post_info[item]["KBs"], } } ret["result"] = False else: ret["changes"]["installed"] = { item: { "Title": post_info[item]["Title"], "NeedsReboot": post_info[item]["NeedsReboot"], "KBs": post_info[item]["KBs"], } } if ret["changes"].get("failed", False): ret["comment"] = "Updates failed" else: ret["comment"] = "Updates installed successfully" return ret
42,673
178,349
200
nuitka/plugins/standard/KivyPlugin.py
36
8
def _getKivyInformation(self): setup_codes = r info = self.queryRuntimeInformationMultiple( info_name="kivy_info", setup_codes=setup_codes, values=( ("libs_loaded", "kivy.core.image.libs_loaded"), ("window_impl", "kivy.core.window.window_impl"), ("label_libs", "kivy.core.text.label_libs"), (
Plugins: Add DLL folders needed on Windows for Kivy plugin * Make DLL reporting code part of plugin base class. * Added new method to scan for DLLs in folders.
_getKivyInformation
6ed4d787519d7075d7ff492bc40a291bc12f088c
Nuitka
KivyPlugin.py
12
32
https://github.com/Nuitka/Nuitka.git
2
72
0
32
125
Python
{ "docstring": "\nimport kivy.core.image\nimport kivy.core.text\n# Prevent Window from being created at compile time.\nkivy.core.core_select_lib=(lambda *args, **kwargs: None)\nimport kivy.core.window\n\n# Kivy has packages designed to provide these on Windows\ntry:\n from kivy_deps.sdl2 import dep_bins as sdl2_dep_bins\nexcept ImportError:\n sdl2_dep_bins = []\ntry:\n from kivy_deps.glew import dep_bins as glew_dep_bins\nexcept ImportError:\n glew_dep_bins = []\n", "language": "en", "n_whitespaces": 55, "n_words": 53, "vocab_size": 37 }
def _getKivyInformation(self): setup_codes = r info = self.queryRuntimeInformationMultiple( info_name="kivy_info", setup_codes=setup_codes, values=( ("libs_loaded", "kivy.core.image.libs_loaded"), ("window_impl", "kivy.core.window.window_impl"), ("label_libs", "kivy.core.text.label_libs"), ("sdl2_dep_bins", "sdl2_dep_bins"), ("glew_dep_bins", "glew_dep_bins"), ), ) if info is None: self.sysexit("Error, it seems Kivy is not installed.") return info
40,375
169,019
32
pandas/core/generic.py
15
5
def __iter__(self) -> Iterator: return
TYP: Autotyping (#48191) * annotate-magics * annotate-imprecise-magics * none-return * scalar-return * pyi files * ignore vendored file * manual changes * ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments) * run autotyping in pre-commit * remove final and expand safe (and add annotate-imprecise-magics)
__iter__
54347fe684e0f7844bf407b1fb958a5269646825
pandas
generic.py
8
10
https://github.com/pandas-dev/pandas.git
1
15
0
15
28
Python
{ "docstring": "\n Iterate over info axis.\n\n Returns\n -------\n iterator\n Info axis as iterator.\n ", "language": "en", "n_whitespaces": 58, "n_words": 11, "vocab_size": 11 }
def __iter__(self) -> Iterator: return iter(self._info_axis) # can we get a better explanation of this?
21,833
104,397
213
src/datasets/table.py
55
26
def cast(self, target_schema, *args, **kwargs): table = table_cast(self.table, target_schema, *args, **kwargs) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Mishig Davaadorj <dmishig@gmail.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr> Co-authored-by: Mishig Davaadorj <dmishig@gmail.com> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>
cast
e35be138148333078284b942ccc9ed7b1d826f97
datasets
table.py
20
14
https://github.com/huggingface/datasets.git
6
134
0
39
208
Python
{ "docstring": "\n Cast table values to another schema\n\n Args:\n target_schema (:obj:`Schema`):\n Schema to cast to, the names and order of fields must match\n safe (:obj:`bool`, defaults to :obj:`True`):\n Check for overflows or other unsafe conversions\n\n Returns:\n :class:`datasets.table.Table`:\n ", "language": "en", "n_whitespaces": 127, "n_words": 35, "vocab_size": 33 }
def cast(self, target_schema, *args, **kwargs): table = table_cast(self.table, target_schema, *args, **kwargs) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subschema = pa.schema(subfields) new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks)
40,850
173,537
100
magenta/models/coconet/lib_util.py
65
16
def softmax(p, axis=None, temperature=1): if axis is None: axis = p.ndim - 1 if temperature == 0.: # NOTE: in case o
Work around tensor2tensor/gym issues, fix pylint errors. PiperOrigin-RevId: 433019701
softmax
e833848c6dda95dbcf17e84d935dcdb8cff6f47d
magenta
lib_util.py
12
14
https://github.com/magenta/magenta.git
4
120
0
44
188
Python
{ "docstring": "Apply the softmax transform to an array of categorical distributions.\n\n Args:\n p: an array of categorical probability vectors, possibly unnormalized.\n axis: the axis that spans the categories (default: -1).\n temperature: if not 1, transform the distribution by dividing the log\n probabilities and renormalizing. Values greater than 1 increase entropy,\n values less than 1 decrease entropy. A value of 0 yields a deterministic\n distribution that chooses the mode.\n\n Returns:\n An array of categorical probability vectors, like `p` but tempered and\n normalized.\n ", "language": "en", "n_whitespaces": 119, "n_words": 80, "vocab_size": 59 }
def softmax(p, axis=None, temperature=1): if axis is None: axis = p.ndim - 1 if temperature == 0.: # NOTE: in case of multiple equal maxima, returns uniform distribution. p = p == np.max(p, axis=axis, keepdims=True) else: # oldp = p logp = np.log(p) logp /= temperature logp -= logp.max(axis=axis, keepdims=True) p = np.exp(logp) p /= p.sum(axis=axis, keepdims=True) if np.isnan(p).any(): pdb.set_trace() # pylint: disable=forgotten-debug-statement return p
1,308
8,001
280
ludwig/benchmarking/profiler.py
77
25
def _populate_static_information(self) -> None: self.info["ludwig_version"] = LUDWIG_VERSION self.info["start_disk_usage"] = shutil.disk_usage(os.path.expanduser("~")).used # CPU information cpu_info = get_my_cpu_info() self.info["cpu_architecture"] = cpu_info["arch"] self.info["num_cpu"] = psutil.cpu_count() self.info["cpu_name"] = cpu_info["brand_raw"] self.info["total_cpu_memory_size"] = psutil.virtual_memory().total # GPU information if self.cuda_is_ava
More precise resource usage tracking (#2363) * added `torch.profiler.record_function` decorator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * export torch profiler metric draft/pseudocode * exporting cpu and cuda memory usage * exporting CPU and CUDA execution time * formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * adding basic comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed `_str` entries in the exported JSONs * attempting to speed up result collection from kineto and function event lists * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * style improvements * speed improvements while calculating averages/summaries * style improvements * using variable defined in torch instead of hard coding it * specifying torch in the tracked metrics * added torch.profiler to ResrouceUsageTracker * combining torch.profiler metrics with ResourceUsageMetrics * handling multiple context mangers with exitstack * making it a decorator * flattening output dict * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * using logging instead of print * flake8 formatting * removed intermediary write/read to/from disk * cleaning after last change * adjusted number of args * replaced tag with code_block_tag * changed total_duration to total_execution_time * support nested use of the context manager and the decorator * remove torch.record_function decorator * adding LUDWIG_TAG to label torch profiler main events * using logging instead of print * style changes * preventing cases of empty list when code block execution is too quick * removing experimental code * fixed gpu tracking * style improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove experimental code from trainer.py * style improvements * more accurate torch op cpu and cuda execution time. * flake8 fixes * rename to LudwigProfiler * updated docstrings * dont collect torch metrics when use_torch_profiler=False * test for LudwigProfiler * formatting improvements * update test to remove repetitive asserts * make the tag->os directory relationship more obvious * explaining what LUDWIG_TAG is used for * removing unncessary asserts * added explanation for `LUDWIG_TAG` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * formatting fixes * removing `get_python_packages_and_versions` Co-authored-by: Joppe Geluykens <joppe@predibase.com> * dataclasses for base profiler * dataclasses for torch profiler * adding OOM event tracking * formatting * normalizing cpu_utilization * pull out flattening dataclass function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added total CPU memory size and CPU memory available * adding system-wide CPU utilization Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Joppe Geluykens <joppe@predibase.com>
_populate_static_information
c50997c2b27e7f7f59a96c0158f3737e22419ed8
ludwig
profiler.py
13
19
https://github.com/ludwig-ai/ludwig.git
3
187
0
58
348
Python
{ "docstring": "Populate the report with static software and hardware information.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _populate_static_information(self) -> None: self.info["ludwig_version"] = LUDWIG_VERSION self.info["start_disk_usage"] = shutil.disk_usage(os.path.expanduser("~")).used # CPU information cpu_info = get_my_cpu_info() self.info["cpu_architecture"] = cpu_info["arch"] self.info["num_cpu"] = psutil.cpu_count() self.info["cpu_name"] = cpu_info["brand_raw"] self.info["total_cpu_memory_size"] = psutil.virtual_memory().total # GPU information if self.cuda_is_available: gpu_infos = get_gpu_info() for i, gpu_info in enumerate(gpu_infos): gpu_key = f"cuda_{i}" self.info[f"{gpu_key}_memory_used"] = [] self.info[f"{gpu_key}_name"] = gpu_info["name"] self.info[f"{gpu_key}_total_memory"] = gpu_info["total_memory"] self.info[f"{gpu_key}_driver_version"] = gpu_info["driver_version"] self.info[f"{gpu_key}_cuda_version"] = gpu_info["cuda_version"] # recording in microseconds to be in line with torch profiler time recording. self.info["start_time"] = time.perf_counter_ns() / 1000
16,457
76,116
212
wagtail/tests/utils/page_tests.py
29
11
def assertCanNotCreateAt(self, parent_model, child_model, msg=None): if self._testCanCreateAt(parent_model, child_model): msg = self._formatMessage( msg, "Can create a %s.%s unde
Reformat with black
assertCanNotCreateAt
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
page_tests.py
14
13
https://github.com/wagtail/wagtail.git
2
69
0
28
103
Python
{ "docstring": "\n Assert a particular child Page type can not be created under a parent\n Page type. ``parent_model`` and ``child_model`` should be the Page\n classes being tested.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 21 }
def assertCanNotCreateAt(self, parent_model, child_model, msg=None): if self._testCanCreateAt(parent_model, child_model): msg = self._formatMessage( msg, "Can create a %s.%s under a %s.%s" % ( child_model._meta.app_label, child_model._meta.model_name, parent_model._meta.app_label, parent_model._meta.model_name, ), ) raise self.failureException(msg)
18,815
91,818
37
src/sentry/features/manager.py
16
11
def get_feature_objects(self) -> Mapping[Project, Feature]: cls = self._manager._get_feature_class(self.feature_name) return {obj: cls(self.feature_name, obj) for obj in self.objects}
feat(notifications): add a feature flag to make slack the default for new users (#35652) We want to have some users automatically get notifications on Slack and email instead of just Slack. But we don't want to impact existing users so instead I introduce the concept of a UserFeature which isn't dependent on the user. I'm adding a flag called users:notification-slack-automatic which is controlled by the age of the user (see getsentry/getsentry#7644). If there is no value for a particular notification setting and provider, it will fall back to being enabled. This is different than the current behavior which defaults to being off for Slack for issue and workflow notifications. This PR is based off a previous PR which had a similar feature flag: #28190
get_feature_objects
f64e58203b6c6d5121a4f6685dace0a4627d49b0
sentry
manager.py
10
9
https://github.com/getsentry/sentry.git
2
44
0
16
68
Python
{ "docstring": "\n Iterate over individual Feature objects.\n\n This is a fallback mode for applying a FeatureHandler that doesn't\n support checking the entire batch at once.\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 22 }
def get_feature_objects(self) -> Mapping[Project, Feature]: cls = self._manager._get_feature_class(self.feature_name) return {obj: cls(self.feature_name, obj) for obj in self.objects}
6,335
34,796
49
tests/test_pipelines_automatic_speech_recognition.py
16
9
def require_ffmpeg(test_case): import subprocess try: s
Adding support for `microphone` streaming within pipeline. (#15046) * Adding support for `microphone` streaming within pipeline. - Uses `ffmpeg` to get microphone data. - Makes sure alignment is made to `size_of_sample`. - Works by sending `{"raw": ..data.., "stride": (n, left, right), "partial": bool}` directly to the pipeline enabling to stream partial results and still get inference. - Let's `partial` information flow through the pipeline to enable caller to get it back and choose to display text or not. - The striding reconstitution is bound to have errors since CTC does not keep previous state. Currently most of the errors are we don't know if there's a space or not between two chunks. Since we have some left striding info, we could use that during decoding to choose what to do with those spaces and even extra letters maybe (if the stride is long enough, it's bound to cover at least a few symbols) Fixing tests. Protecting with `require_torch`. `raw_ctc` support for nicer demo. Post rebase fixes. Revamp to split raw_mic_data from it's live chunking. - Requires a refactor to make everything a bit cleaner. Automatic resampling. Small fix. Small fix. * Post rebase fix (need to let super handle more logic, reorder args.) * Update docstrings * Docstring format. * Remove print. * Prevent flow of `input_values`. * Fixing `stride` too. * Fixing the PR by removing `raw_ctc`. * Better docstrings. * Fixing init. * Update src/transformers/pipelines/audio_utils.py Co-authored-by: Anton Lozhkov <aglozhkov@gmail.com> * Update tests/test_pipelines_automatic_speech_recognition.py Co-authored-by: Anton Lozhkov <aglozhkov@gmail.com> * Quality. Co-authored-by: Anton Lozhkov <aglozhkov@gmail.com>
require_ffmpeg
623d8cb475804f2b0f85a47b04b8b2e522db06ef
transformers
test_pipelines_automatic_speech_recognition.py
12
7
https://github.com/huggingface/transformers.git
2
41
0
15
74
Python
{ "docstring": "\n Decorator marking a test that requires FFmpeg.\n\n These tests are skipped when FFmpeg isn't installed.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
def require_ffmpeg(test_case): import subprocess try: subprocess.check_output(["ffmpeg", "-h"], stderr=subprocess.DEVNULL) return test_case except Exception: return unittest.skip("test requires ffmpeg")(test_case)
9,364
48,117
636
tests/system/providers/google/tasks/example_queue.py
221
59
def generate_random_string(): import random import string return "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) random_string = generate_random_string() # [START create_queue] create_queue = CloudTasksQueueCreateOperator( location=LOCATION, task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)), queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", retry=Retry(maximum=10.0), timeout=5, task_id="create_queue", ) # [END create_queue] # [START delete_queue] delete_queue = CloudTasksQueueDeleteOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="delete_queue", ) # [END delete_queue] delete_queue.trigger_rule = TriggerRule.ALL_DONE # [START resume_queue] resume_queue = CloudTasksQueueResumeOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="resume_queue", ) # [END resume_queue] # [START pause_queue] pause_queue = CloudTasksQueuePauseOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}",
CloudTasks assets & system tests migration (AIP-47) (#23282)
generate_random_string
3977e1798d8294ba628b5f330f43702c1a5c79fc
airflow
example_queue.py
13
4
https://github.com/apache/airflow.git
1
31
0
115
517
Python
{ "docstring": "\n Generate random string for queue and task names.\n Queue name cannot be repeated in preceding 7 days and\n task name in the last 1 hour.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 21 }
def generate_random_string(): import random import string return "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) random_string = generate_random_string() # [START create_queue] create_queue = CloudTasksQueueCreateOperator( location=LOCATION, task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)), queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", retry=Retry(maximum=10.0), timeout=5, task_id="create_queue", ) # [END create_queue] # [START delete_queue] delete_queue = CloudTasksQueueDeleteOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="delete_queue", ) # [END delete_queue] delete_queue.trigger_rule = TriggerRule.ALL_DONE # [START resume_queue] resume_queue = CloudTasksQueueResumeOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="resume_queue", ) # [END resume_queue] # [START pause_queue] pause_queue = CloudTasksQueuePauseOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="pause_queue", ) # [END pause_queue] # [START purge_queue] purge_queue = CloudTasksQueuePurgeOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="purge_queue", ) # [END purge_queue] # [START get_queue] get_queue = CloudTasksQueueGetOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="get_queue", ) get_queue_result = BashOperator( task_id="get_queue_result", bash_command=f"echo {get_queue.output}", ) # [END get_queue] # [START update_queue] update_queue = CloudTasksQueueUpdateOperator( task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=1)), location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", update_mask=FieldMask(paths=["stackdriver_logging_config.sampling_ratio"]), task_id="update_queue", ) # [END update_queue] # [START list_queue] list_queue = CloudTasksQueuesListOperator(location=LOCATION, task_id="list_queue") # [END list_queue] chain( random_string, create_queue, update_queue, pause_queue, resume_queue, purge_queue, get_queue, get_queue_result, list_queue, delete_queue, ) from tests.system.utils.watcher import watcher # This test needs watcher in order to properly mark success/failure # when "tearDown" task with trigger rule is part of the DAG list(dag.tasks) >> watcher() from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag)
81,432
275,619
23
keras/optimizers/optimizer_v2/utils.py
10
3
def make_gradient_clipvalue_fn(clipvalue): if clipvalue is None:
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
make_gradient_clipvalue_fn
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
utils.py
9
5
https://github.com/keras-team/keras.git
2
20
0
10
29
Python
{ "docstring": "Creates a gradient transformation function for clipping by value.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def make_gradient_clipvalue_fn(clipvalue): if clipvalue is None: return lambda grads_and_vars: grads_and_vars
26,583
119,312
184
jax/_src/scipy/signal.py
83
16
def odd_ext(x, n, axis=-1): if n < 1: return x if n > x.shape[axis] - 1: raise ValueError( f"The extension length n ({n}) is too big. " f"It must not exceed x.shape[axis]-1, which is {x.shape[axis] - 1}.") left_end = lax.slice_in_di
Add some functions for spectral analysis. This commit adds "stft", "csd", and "welch" functions in scipy.signal.
odd_ext
e085370ec4137cf0f73c5163cb664bc4e1c46082
jax
signal.py
15
16
https://github.com/google/jax.git
3
159
0
54
252
Python
{ "docstring": "Extends `x` along with `axis` by odd-extension.\n\n This function was previously a part of \"scipy.signal.signaltools\" but is no\n longer exposed.\n\n Args:\n x : input array\n n : the number of points to be added to the both end\n axis: the axis to be extended\n ", "language": "en", "n_whitespaces": 57, "n_words": 44, "vocab_size": 37 }
def odd_ext(x, n, axis=-1): if n < 1: return x if n > x.shape[axis] - 1: raise ValueError( f"The extension length n ({n}) is too big. " f"It must not exceed x.shape[axis]-1, which is {x.shape[axis] - 1}.") left_end = lax.slice_in_dim(x, 0, 1, axis=axis) left_ext = jnp.flip(lax.slice_in_dim(x, 1, n + 1, axis=axis), axis=axis) right_end = lax.slice_in_dim(x, -1, None, axis=axis) right_ext = jnp.flip(lax.slice_in_dim(x, -(n + 1), -1, axis=axis), axis=axis) ext = jnp.concatenate((2 * left_end - left_ext, x, 2 * right_end - right_ext), axis=axis) return ext
79,737
268,868
29
keras/tests/keras_doctest.py
20
7
def filter_on_submodules(all_modules, submodule): filtered_modules = [ mod for mod in all_modules if PACKAGE + submodule in mod.__name__ ] return filtered_modules
Add a keras doctest modeled on tensorflow doctest PiperOrigin-RevId: 424672415
filter_on_submodules
a449efe29b092e658a29cd847e0494979a47d252
keras
keras_doctest.py
10
5
https://github.com/keras-team/keras.git
3
27
0
17
43
Python
{ "docstring": "Filters all the modules based on the module flag.\n\n The module flag has to be relative to the core package imported.\n For example, if `submodule=keras.layers` then, this function will return\n all the modules in the submodule.\n\n Args:\n all_modules: All the modules in the core package.\n submodule: Submodule to filter from all the modules.\n\n Returns:\n All the modules in the submodule.\n ", "language": "en", "n_whitespaces": 75, "n_words": 60, "vocab_size": 38 }
def filter_on_submodules(all_modules, submodule): filtered_modules = [ mod for mod in all_modules if PACKAGE + submodule in mod.__name__ ] return filtered_modules
15,095
69,776
91
erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py
124
24
def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query from_date = frappe.db.get_single_value("Bank Reconciliation Tool", "bank_statement_from_date") to_date = frappe.db.get_single_value("Bank Reconciliation Tool", "bank_statement_to_date") from_reference_date = frappe.db.get_single_value( "Bank Reconciliation Tool", "from_reference_date" ) to_reference_date = frappe.db.get_single_value("Bank Reconciliation Tool", "to_reference_date") filtered_by_reference_date = frappe.db.get_single_value( "Bank Reconcil
Feat:Filter on Payment Entries and Journal Entries Applying filters on Payement entries and Journal Entries as per reference date and posting date
get_pe_matching_query
408c89df030998fe36df135570c9edd90a522996
erpnext
bank_reconciliation_tool.py
10
61
https://github.com/frappe/erpnext.git
4
149
0
60
336
Python
{ "docstring": "\n\t\tSELECT\n\t\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Payment Entry' as doctype,\n\t\t\tname,\n\t\t\tpaid_amount,\n\t\t\treference_no,\n\t\t\treference_date,\n\t\t\tparty,\n\t\t\tparty_type,\n\t\t\tposting_date,\n\t\t\t{currency_field}\n\t\tFROM\n\t\t\t`tabPayment Entry`\n\t\tWHERE\n\t\t\tpaid_amount {amount_condition} %(amount)s\n\t\t\tAND docstatus = 1\n\t\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\tAND {account_from_to} = %(bank_account)s\n\t\t\tAND reference_no = '{transaction.reference_number}'\n\t\t\t{cond_filtered_from_ref_date} \"{from_ref_date}\"\n\t\t\t{cond_filtered_to_ref_date} \"{to_ref_date}\"\n\t\t\t{cond_filtered_from_posting_date} \"{from_post_date}\"\n\t\t\t{cond_filtered_to_posting_date} \"{to_post_date}\"\n\t\t", "language": "en", "n_whitespaces": 55, "n_words": 80, "vocab_size": 60 }
def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query from_date = frappe.db.get_single_value("Bank Reconciliation Tool", "bank_statement_from_date") to_date = frappe.db.get_single_value("Bank Reconciliation Tool", "bank_statement_to_date") from_reference_date = frappe.db.get_single_value( "Bank Reconciliation Tool", "from_reference_date" ) to_reference_date = frappe.db.get_single_value("Bank Reconciliation Tool", "to_reference_date") filtered_by_reference_date = frappe.db.get_single_value( "Bank Reconciliation Tool", "filtered_by_reference_date" ) if transaction.deposit > 0: currency_field = "paid_to_account_currency as currency" else: currency_field = "paid_from_account_currency as currency" cond_filtered_from_ref_date = "" cond_filtered_to_ref_date = "" cond_filtered_from_posting_date = "" cond_filtered_to_posting_date = "" from_ref_date ="" to_ref_date ="" from_post_date = "" to_post_date = "" if(filtered_by_reference_date): cond_filtered_from_ref_date = " AND reference_date >=" cond_filtered_to_ref_date = " AND reference_date <=" from_ref_date = from_reference_date to_ref_date = to_reference_date elif(not filtered_by_reference_date): cond_filtered_from_posting_date = " AND posting_date >=" cond_filtered_to_posting_date = " AND posting_date <=" from_post_date = from_date to_post_date = to_date pe_data= f return pe_data
40,231
168,207
97
pandas/core/arrays/interval.py
22
15
def closed(self) -> IntervalInclusiveType: warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.dtype.inclusive _interval_shared_docs["set_closed"] = textwrap.dedent( )
PERF cache find_stack_level (#48023) cache stacklevel
closed
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
interval.py
12
12
https://github.com/pandas-dev/pandas.git
1
34
0
21
79
Python
{ "docstring": "\n String describing the inclusive side the intervals.\n\n Either ``left``, ``right``, ``both`` or ``neither`.\n \n Return an identical %(klass)s closed on the specified side.\n\n .. deprecated:: 1.5.0\n\n Parameters\n ----------\n closed : {'left', 'right', 'both', 'neither'}\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n new_index : %(klass)s\n\n %(examples)s\\\n ", "language": "en", "n_whitespaces": 166, "n_words": 51, "vocab_size": 41 }
def closed(self) -> IntervalInclusiveType: warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.dtype.inclusive _interval_shared_docs["set_closed"] = textwrap.dedent( )
47,745
196,245
95
sympy/functions/elementary/exponential.py
31
15
def as_real_imag(self, deep=True, **hints): from sympy.functions.elementary.trigonometric import cos, sin re, im = self.a
Updated import locations
as_real_imag
498015021131af4dbb07eb110e5badaba8250c7b
sympy
exponential.py
11
8
https://github.com/sympy/sympy.git
2
93
0
24
144
Python
{ "docstring": "\n Returns this function as a 2-tuple representing a complex number.\n\n Examples\n ========\n\n >>> from sympy import I, exp\n >>> from sympy.abc import x\n >>> exp(x).as_real_imag()\n (exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x)))\n >>> exp(1).as_real_imag()\n (E, 0)\n >>> exp(I).as_real_imag()\n (cos(1), sin(1))\n >>> exp(1+I).as_real_imag()\n (E*cos(1), E*sin(1))\n\n See Also\n ========\n\n sympy.functions.elementary.complexes.re\n sympy.functions.elementary.complexes.im\n ", "language": "en", "n_whitespaces": 171, "n_words": 44, "vocab_size": 35 }
def as_real_imag(self, deep=True, **hints): from sympy.functions.elementary.trigonometric import cos, sin re, im = self.args[0].as_real_imag() if deep: re = re.expand(deep, **hints) im = im.expand(deep, **hints) cos, sin = cos(im), sin(im) return (exp(re)*cos, exp(re)*sin)
38,346
159,579
199
rasa/core/exporter.py
51
11
async def _get_conversation_ids_to_process(self) -> Set[Text]: conversation_ids_in_tracker_store = ( await self._get_conversation_ids_in_tracker() ) if not self.requested_conversation_ids: return conversation_ids_in_tracker_store self._validate_all_requested_ids_exist(conversation_ids_in_tracker_store) conversation_ids_to_process = conversation_ids_in_tracker_store & set( self.request
Async Tracker Store Support (#10696) Make `TrackerStore` interface methods asynchronous and supply an `AwaitableTrackerstore` wrapper for custom tracker stores which do not implement the methods as asynchronous. Squashed commits: * refactor tracker store and tests to be async * update core modules with async tracker store calls * update server with async tracker store calls * await tracker store call in twilio voice * add await in test rasa export * add awaits to test_agent for tracker store * add awaits to tracker store functions in processor tests * refactor exporter tests for async tracker store * use asyncmock from unittest instead of custom * add async in test_rasa_export * fixture update for async tracker store * update marker logic to handle async tracker store * fix mark tracker loader tests for async tracker store * add awaits to server and server tests * add await to dialogue test with tracker store * add await to tracker test * formatting in tracker store * more formatting fixes * more formatting fixes * address formatting changes * change return type and remove awaitable tracker store wrapper in create * make stream_events async * address comments (remove redundant methods in awaitable tracker store + raise exception) * make _run_markers and _run_markers_cli sync to ensure CLI can be run * add warning and test for creating async tracker store from endpoint config * add changelog entry * use TrackerStore instead of "TrackerStore" in typehint Co-authored-by: Joe Juzl <joejuzl@gmail.com> * use TrackerStore instead of "TrackerStore" in typehint Co-authored-by: Joe Juzl <joejuzl@gmail.com> * change user warning to deprecation warning * fix typo in comment * have fallback_tracker_store return in memory tracker store without awaitable wrapper * import async mock from conftest instead of unittest to suport Python 3.7 * delete unused imports in marker_tracker_loader * apply black to modules which failed ci linter * resolve line length linting in tracker_store.py * refer to request.app.ctx.agent object instead of request.app.agent * resolve ci failures from not adding async/await * applied black to reformat three modules failing code quality * correct most docstring linting errors * fix docstring linting errors * fix flake8 line length errors * fix mypy type checking errors * linting corrections after adding type ignores to methods * delete extra periods in docstring Co-authored-by: Joe Juzl <joejuzl@gmail.com>
_get_conversation_ids_to_process
ca316fc80cb490ecf1e2e7261fb7fcef22fccc4a
rasa
exporter.py
11
27
https://github.com/RasaHQ/rasa.git
3
57
0
40
101
Python
{ "docstring": "Get conversation IDs that are good for processing.\n\n Finds the intersection of events that are contained in the tracker store with\n those events requested as a command-line argument.\n\n Returns:\n Conversation IDs that are both requested and contained in the tracker\n store. If no conversation IDs are requested, all conversation IDs in the\n tracker store are returned.\n\n ", "language": "en", "n_whitespaces": 117, "n_words": 56, "vocab_size": 34 }
async def _get_conversation_ids_to_process(self) -> Set[Text]: conversation_ids_in_tracker_store = ( await self._get_conversation_ids_in_tracker() ) if not self.requested_conversation_ids: return conversation_ids_in_tracker_store self._validate_all_requested_ids_exist(conversation_ids_in_tracker_store) conversation_ids_to_process = conversation_ids_in_tracker_store & set( self.requested_conversation_ids ) if not conversation_ids_to_process: raise NoEventsToMigrateError( "Could not find an overlap between the requested " "conversation IDs and those found in the tracker store. Exiting." ) return conversation_ids_to_process
80,808
271,577
115
keras/engine/training.py
39
6
def call(self, inputs, training=None, mask=None): raise NotImplementedError( "Unimplemented `tf.keras.Model.call()`: if you " "intend to create a `Model` with the Functional " "API, please provide `inputs` and
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
call
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training.py
9
8
https://github.com/keras-team/keras.git
1
25
0
34
48
Python
{ "docstring": "Calls the model on new inputs and returns the outputs as tensors.\n\n In this case `call()` just reapplies\n all ops in the graph to the new inputs\n (e.g. build a new computational graph from the provided inputs).\n\n Note: This method should not be called directly. It is only meant to be\n overridden when subclassing `tf.keras.Model`.\n To call a model on an input, always use the `__call__()` method,\n i.e. `model(inputs)`, which relies on the underlying `call()` method.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n training: Boolean or boolean scalar tensor, indicating whether to run\n the `Network` in training mode or inference mode.\n mask: A mask or list of masks. A mask can be either a boolean tensor or\n None (no mask). For more details, check the guide\n [here](https://www.tensorflow.org/guide/keras/masking_and_padding).\n\n Returns:\n A tensor if there is a single output, or\n a list of tensors if there are more than one outputs.\n ", "language": "en", "n_whitespaces": 316, "n_words": 150, "vocab_size": 106 }
def call(self, inputs, training=None, mask=None): raise NotImplementedError( "Unimplemented `tf.keras.Model.call()`: if you " "intend to create a `Model` with the Functional " "API, please provide `inputs` and `outputs` " "arguments. Otherwise, subclass `Model` with an " "overridden `call()` method." )
35,438
153,549
655
modin/core/io/text/json_dispatcher.py
157
58
def _read(cls, path_or_buf, **kwargs): path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) if not kwargs.get("lines", False): return cls.single_worker_read(path_or_buf, **kwargs) with OpenFile(path_or_buf, "rb") as f: columns = pandas.read_json(BytesIO(b"" + f.readline()), lines=True).columns kwargs["columns"] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile(path_or_buf, "rb", kwargs.get("compression", "in
REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854) Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Co-authored-by: Dmitry Chigarev <dchigarev@users.noreply.github.com> Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com>
_read
97769988a6f19e4b76f34238c97bf159ee7626a5
modin
json_dispatcher.py
16
48
https://github.com/modin-project/modin.git
7
398
0
106
641
Python
{ "docstring": "\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n ", "language": "en", "n_whitespaces": 141, "n_words": 44, "vocab_size": 35 }
def _read(cls, path_or_buf, **kwargs): path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) if not kwargs.get("lines", False): return cls.single_worker_read(path_or_buf, **kwargs) with OpenFile(path_or_buf, "rb") as f: columns = pandas.read_json(BytesIO(b"" + f.readline()), lines=True).columns kwargs["columns"] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile(path_or_buf, "rb", kwargs.get("compression", "infer")) as f: partition_ids = [] index_ids = [] dtypes_ids = [] column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) args = {"fname": path_or_buf, "num_splits": num_splits, **kwargs} splits = cls.partitioned_file( f, num_partitions=NPartitions.get(), ) for start, end in splits: args.update({"start": start, "end": end}) partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args) partition_ids.append(partition_id[:-3]) index_ids.append(partition_id[-3]) dtypes_ids.append(partition_id[-2]) # partition_id[-1] contains the columns for each partition, which will be useful # for implementing when `lines=False`. row_lengths = cls.materialize(index_ids) new_index = pandas.RangeIndex(sum(row_lengths)) dtypes = cls.get_dtypes(dtypes_ids) partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) if isinstance(dtypes, pandas.Series): dtypes.index = columns else: dtypes = pandas.Series(dtypes, index=columns) new_frame = cls.frame_cls( np.array(partition_ids), new_index, columns, row_lengths, column_widths, dtypes=dtypes, ) new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame)
46,339
190,124
248
manim/scene/three_d_scene.py
40
21
def stop_ambient_camera_rotation(self, about="theta"): about: str = about.lower() try: if config.renderer == RendererType.CAIRO: trackers = { "theta": self.camera.theta_tracker, "phi": self.camera.phi_tracker, "gamma": self.camera.gamma_tracker, } x: ValueTracker = trackers[about] x.clear_updaters() self.remove(x) elif config.renderer == RendererType.OPENGL: self.camera.clear_updaters() except Exception: raise ValueError("Invalid ambient rotation angl
Replaced renderer strings with :class:`.RendererType` enum entries (#3017) * remove unused constants * remove deprecated --use_opengl_renderer flag * remove unnecessary workaround with class initialization * add OpenGLMobject.name to get rid of one renderer check * add VMobject.n_points_per_curve property to get rid of more renderer checks * replace renderer string checks with enum check * added mobject.utils module with renderer-dependent class getters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ensure that capitalization of passed renderer type is irrelevant * remove unused entries from mobject.utils.__all__ * fixed isort ignore in manim.__init__ * fixed lower-case casting of passed renderer * fixed doctests * more documentation + doctests for mobject.utils * removed incorrect paragraph about ConverToOpenGL metaclass * added docstring for RendererType enum * renderer compatibility section in plugin dev documentation * added mobject.utils to reference manual * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove actual doctest (it ran the compatibility code) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Naveen M K <naveen521kk@gmail.com>
stop_ambient_camera_rotation
bd844f46d804c8cad50d06ad20ab5bebaee9987b
manim
three_d_scene.py
14
16
https://github.com/ManimCommunity/manim.git
4
101
0
36
171
Python
{ "docstring": "\n This method stops all ambient camera rotation.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def stop_ambient_camera_rotation(self, about="theta"): about: str = about.lower() try: if config.renderer == RendererType.CAIRO: trackers = { "theta": self.camera.theta_tracker, "phi": self.camera.phi_tracker, "gamma": self.camera.gamma_tracker, } x: ValueTracker = trackers[about] x.clear_updaters() self.remove(x) elif config.renderer == RendererType.OPENGL: self.camera.clear_updaters() except Exception: raise ValueError("Invalid ambient rotation angle.")
71,842
247,689
100
tests/rest/client/test_relations.py
25
13
def _get_bundled_aggregations(self) -> JsonDict: # Fetch the bundled aggregations of the event. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) return channel.json_body["unsigned"].get("m.relations", {})
Refactor relations tests (#12232) * Moves the relation pagination tests to a separate class. * Move the assertion of the response code into the `_send_relation` helper. * Moves some helpers into the base-class.
_get_bundled_aggregations
1da0f79d5455b594f2aa989106a672786f5b990f
synapse
test_relations.py
11
11
https://github.com/matrix-org/synapse.git
1
55
0
24
105
Python
{ "docstring": "\n Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists.\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
def _get_bundled_aggregations(self) -> JsonDict: # Fetch the bundled aggregations of the event. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) return channel.json_body["unsigned"].get("m.relations", {})
21,558
102,634
394
chia/types/spend_bundle.py
153
27
def get_memos(self) -> Dict[bytes32, List[bytes]]: memos: Dict[bytes32, List[bytes]] = {} for coin_spend in self.coin_spends: result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run( Program.from_bytes(bytes(coin_spend.solution)) ) for condition in result.as_python(): if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4: #
Merge standalone wallet into main (#9793) * wallet changes from pac * cat changes * pool tests * pooling tests passing * offers * lint * mempool_mode * black * linting * workflow files * flake8 * more cleanup * renamed * remove obsolete test, don't cast announcement * memos are not only bytes32 * trade renames * fix rpcs, block_record * wallet rpc, recompile settlement clvm * key derivation * clvm tests * lgtm issues and wallet peers * stash * rename * mypy linting * flake8 * bad initializer * flaky tests * Make CAT wallets only create on verified hints (#9651) * fix clvm tests * return to log lvl warn * check puzzle unhardened * public key, not bytes. api caching change * precommit changes * remove unused import * mypy ci file, tests * ensure balance before creating a tx * Remove CAT logic from full node test (#9741) * Add confirmations and sleeps for wallet (#9742) * use pool executor * rever merge mistakes/cleanup * Fix trade test flakiness (#9751) * remove precommit * older version of black * lint only in super linter * Make announcements in RPC be objects instead of bytes (#9752) * Make announcements in RPC be objects instead of bytes * Lint * misc hint'ish cleanup (#9753) * misc hint'ish cleanup * unremove some ci bits * Use main cached_bls.py * Fix bad merge in main_pac (#9774) * Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75 * Remove unused ignores * more unused ignores * Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e * One more byte32.from_hexstr * Remove obsolete test * remove commented out * remove duplicate payment object * remove long sync * remove unused test, noise * memos type * bytes32 * make it clear it's a single state at a time * copy over asset ids from pacr * file endl linter * Update chia/server/ws_connection.py Co-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com> Co-authored-by: Matt Hauff <quexington@gmail.com> Co-authored-by: Kyle Altendorf <sda@fstab.net> Co-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>
get_memos
89f15f591cc3cc3e8ae40e95ffc802f7f2561ece
chia-blockchain
spend_bundle.py
17
18
https://github.com/Chia-Network/chia-blockchain.git
6
146
0
109
235
Python
{ "docstring": "\n Retrieves the memos for additions in this spend_bundle, which are formatted as a list in the 3rd parameter of\n CREATE_COIN. If there are no memos, the addition coin_id is not included. If they are not formatted as a list\n of bytes, they are not included. This is expensive to call, it should not be used in full node code.\n ", "language": "en", "n_whitespaces": 88, "n_words": 59, "vocab_size": 40 }
def get_memos(self) -> Dict[bytes32, List[bytes]]: memos: Dict[bytes32, List[bytes]] = {} for coin_spend in self.coin_spends: result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run( Program.from_bytes(bytes(coin_spend.solution)) ) for condition in result.as_python(): if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4: # If only 3 elements (opcode + 2 args), there is no memo, this is ph, amount coin_added = Coin(coin_spend.coin.name(), bytes32(condition[1]), int_from_bytes(condition[2])) if type(condition[3]) != list: # If it's not a list, it's not the correct format continue memos[coin_added.name()] = condition[3] return memos # Note that `coin_spends` used to have the bad name `coin_solutions`. # Some API still expects this name. For now, we accept both names. # # TODO: continue this deprecation. Eventually, all code below here should be removed. # 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary) # 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary) # 3. remove all references to `include_legacy_keys=True` # 4. remove all code below this point
12,256
60,712
34
.venv/lib/python3.8/site-packages/pip/_internal/index/collector.py
22
6
def _clean_url_path_part(part): # type: (str) -> str # We unquote prior
upd; format
_clean_url_path_part
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
collector.py
10
2
https://github.com/jindongwang/transferlearning.git
1
22
0
20
40
Python
{ "docstring": "\n Clean a \"part\" of a URL path (i.e. after splitting on \"@\" characters).\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
def _clean_url_path_part(part): # type: (str) -> str # We unquote prior to quoting to make sure nothing is double quoted. return urllib.parse.quote(urllib.parse.unquote(part))
12,155
60,427
161
code/deep/BJMMD/caffe/scripts/cpp_lint.py
114
14
def CheckAltTokens(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(lin
Balanced joint maximum mean discrepancy for deep transfer learning
CheckAltTokens
cc4d0564756ca067516f71718a3d135996525909
transferlearning
cpp_lint.py
14
10
https://github.com/jindongwang/transferlearning.git
5
91
0
84
154
Python
{ "docstring": "Check alternative keywords being used in boolean expressions.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ", "language": "en", "n_whitespaces": 54, "n_words": 40, "vocab_size": 33 }
def CheckAltTokens(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
29,950
133,165
126
python/ray/util/joblib/__init__.py
39
10
def register_ray(): try: from ray.util.joblib.ray_backend import RayBackend register_parallel_backend("ray", RayBackend) except ImportError: msg = ( "T
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
register_ray
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
__init__.py
12
12
https://github.com/ray-project/ray.git
2
39
0
37
83
Python
{ "docstring": "Register Ray Backend to be called with parallel_backend(\"ray\").", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def register_ray(): try: from ray.util.joblib.ray_backend import RayBackend register_parallel_backend("ray", RayBackend) except ImportError: msg = ( "To use the ray backend you must install ray." "Try running 'pip install ray'." "See https://docs.ray.io/en/master/installation.html" "for more information." ) raise ImportError(msg) __all__ = ["register_ray"]
50,470
203,603
22
django/contrib/auth/backends.py
8
5
def get_group_permissions(self, user_obj, obj=None): return self._get_permissi
Refs #33476 -- Reformatted code with Black.
get_group_permissions
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
backends.py
8
2
https://github.com/django/django.git
1
23
0
8
37
Python
{ "docstring": "\n Return a set of permission strings the user `user_obj` has from the\n groups they belong.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
def get_group_permissions(self, user_obj, obj=None): return self._get_permissions(user_obj, obj, "group")
74,993
257,048
20
haystack/document_stores/deepsetcloud.py
6
5
def get_evaluation_sets(self) -> List[dict]: return self.evaluation_set_client.get
EvaluationSetClient for deepset cloud to fetch evaluation sets and la… (#2345) * EvaluationSetClient for deepset cloud to fetch evaluation sets and labels for one specific evaluation set * make DeepsetCloudDocumentStore able to fetch uploaded evaluation set names * fix missing renaming of get_evaluation_set_names in DeepsetCloudDocumentStore * update documentation for evaluation set functionality in deepset cloud document store * DeepsetCloudDocumentStore tests for evaluation set functionality * rename index to evaluation_set_name for DeepsetCloudDocumentStore evaluation set functionality * raise DeepsetCloudError when no labels were found for evaluation set * make use of .get_with_auto_paging in EvaluationSetClient * Return result of get_with_auto_paging() as it parses the response already * Make schema import source more specific * fetch all evaluation sets for a workspace in deepset Cloud * Rename evaluation_set_name to label_index * make use of generator functionality for fetching labels * Update Documentation & Code Style * Adjust function input for DeepsetCloudDocumentStore.get_all_labels, adjust tests for it, fix typos, make linter happy * Match error message with pytest.raises * Update Documentation & Code Style * DeepsetCloudDocumentStore.get_labels_count raises DeepsetCloudError when no evaluation set was found to count labels on * remove unneeded import in tests * DeepsetCloudDocumentStore tests, make reponse bodies a string through json.dumps * DeepsetcloudDocumentStore.get_label_count - move raise to return * stringify uuid before json.dump as uuid is not serilizable * DeepsetcloudDocumentStore - adjust response mocking in tests * DeepsetcloudDocumentStore - json dump response body in test * DeepsetCloudDocumentStore introduce label_index, EvaluationSetClient rename label_index to evaluation_set * Update Documentation & Code Style * DeepsetCloudDocumentStore rename evaluation_set to evaluation_set_response as there is a name clash with the input variable * DeepsetCloudDocumentStore - rename missed variable in test * DeepsetCloudDocumentStore - rename missed label_index to index in doc string, rename label_index to evaluation_set in EvaluationSetClient * Update Documentation & Code Style * DeepsetCloudDocumentStore - update docstrings for EvaluationSetClient * DeepsetCloudDocumentStore - fix typo in doc string Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
get_evaluation_sets
a273c3a51dd432bd125e5b35df4be94260a2cdb7
haystack
deepsetcloud.py
8
8
https://github.com/deepset-ai/haystack.git
1
19
0
6
33
Python
{ "docstring": "\n Returns a list of uploaded evaluation sets to deepset cloud.\n\n :return: list of evaluation sets as dicts\n These contain (\"name\", \"evaluation_set_id\", \"created_at\", \"matched_labels\", \"total_labels\") as fields.\n ", "language": "en", "n_whitespaces": 64, "n_words": 26, "vocab_size": 21 }
def get_evaluation_sets(self) -> List[dict]: return self.evaluation_set_client.get_evaluation_sets()
42,555
177,986
231
label_studio/data_import/uploader.py
34
16
def allowlist_svg(dirty_xml): from lxml.html import clean allow_tags = [ 'xml', 'svg', 'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect' ] cleaner = clean.Cleaner( allow_tags=allow_tags,
fix: DEV-2236: Stored XSS via SVG file (#2273) * user uploaded content rendered as plain text or known image only * allow list for svg in progress * allow list for svg basic pass * add error handling * add to file processing re: code review * rm uneeded code * add env var to disable svg cleaning * add test * update env setting * rm lxml string methods * Update uploader.py * Update base.py Co-authored-by: Max Tkachenko <makseq@gmail.com>
allowlist_svg
53f6308186aa131946e196b0409f3d732ec9e007
label-studio
uploader.py
9
23
https://github.com/heartexlabs/label-studio.git
1
77
0
31
126
Python
{ "docstring": "Filter out malicious/harmful content from SVG files\n by defining allowed tags\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
def allowlist_svg(dirty_xml): from lxml.html import clean allow_tags = [ 'xml', 'svg', 'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect' ] cleaner = clean.Cleaner( allow_tags=allow_tags, style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=False, remove_unknown_tags=False) clean_xml = cleaner.clean_html(dirty_xml) return clean_xml
21,021
101,613
87
tools/sort/sort_methods.py
28
15
def _sort_filelist(self) -> None: for filename, image, alignments in self._iterator(): self.score_image(filename, image, alignments) self.sort() logger.debug("sorted list: %s", [r[0] if isinsta
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
_sort_filelist
98d01760e469fd2108eed8d0b0a1ba6297c3177c
faceswap
sort_methods.py
12
16
https://github.com/deepfakes/faceswap.git
4
68
0
24
104
Python
{ "docstring": " Call the sort method's logic to populate the :attr:`_results` attribute.\n\n Put logic for scoring an individual frame in in :attr:`score_image` of the child\n\n Returns\n -------\n list\n The sorted file. A list of tuples with the filename in the first position and score in\n the second position\n ", "language": "en", "n_whitespaces": 104, "n_words": 46, "vocab_size": 34 }
def _sort_filelist(self) -> None: for filename, image, alignments in self._iterator(): self.score_image(filename, image, alignments) self.sort() logger.debug("sorted list: %s", [r[0] if isinstance(r, (tuple, list)) else r for r in self._result])
81,369
275,284
76
keras/optimizers/optimizer_experimental/optimizer.py
29
5
def finalize_variable_values(self, var_list): if self.use_ema: # If the optimizer uses EMA, then when finalizing, we replace the model # variable value with its moving average stored inside optimizer. self._overwrite_model_variable
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
finalize_variable_values
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
optimizer.py
9
3
https://github.com/keras-team/keras.git
2
19
0
27
35
Python
{ "docstring": "Set the final value of model's trainable variables.\n\n Sometimes there are some extra steps before ending the variable updates,\n such as overriding the model variables with its average value.\n\n Args:\n var_list: list of model variables.\n ", "language": "en", "n_whitespaces": 72, "n_words": 35, "vocab_size": 30 }
def finalize_variable_values(self, var_list): if self.use_ema: # If the optimizer uses EMA, then when finalizing, we replace the model # variable value with its moving average stored inside optimizer. self._overwrite_model_variables_with_average_value(var_list)
2,402
12,761
34
jina/serve/stream/__init__.py
9
5
async def wait_floating_requests_end(self): while self.total_num_floating_tasks_alive > 0:
feat: wait for floating Executor tasks (#5004)
wait_floating_requests_end
1b3edacf531e4e8d29eac4ea73785f8d201255d6
jina
__init__.py
10
3
https://github.com/jina-ai/jina.git
2
20
0
9
37
Python
{ "docstring": "\n Await this coroutine to make sure that all the floating tasks that the request handler may bring are properly consumed\n ", "language": "en", "n_whitespaces": 35, "n_words": 20, "vocab_size": 18 }
async def wait_floating_requests_end(self): while self.total_num_floating_tasks_alive > 0: await asyncio.sleep(0)
21,504
102,266
61
torch/functional.py
46
13
def _lu_impl(A, pivot=True, get_infos=False, out=None): # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor] r # If get_infos is True, then we don't need to ch
Add linalg.lu_factor (#66933) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/66933 This PR exposes `torch.lu` as `torch.linalg.lu_factor` and `torch.linalg.lu_factor_ex`. This PR also adds support for matrices with zero elements both in the size of the matrix and the batch. Note that this function simply returns empty tensors of the correct size in this case. We add a test and an OpInfo for the new function. This PR also adds documentation for this new function in line of the documentation in the rest of `torch.linalg`. Fixes https://github.com/pytorch/pytorch/issues/56590 Fixes https://github.com/pytorch/pytorch/issues/64014 cc jianyuh nikitaved pearu mruberry walterddr IvanYashchuk xwang233 Lezcano Test Plan: Imported from OSS Reviewed By: gchanan Differential Revision: D32834069 Pulled By: mruberry fbshipit-source-id: 51ef12535fa91d292f419acf83b800b86ee9c7eb
_lu_impl
a35b4b49d2e2a215a64e455101c779ae623b3321
pytorch
functional.py
10
77
https://github.com/pytorch/pytorch.git
1
37
0
42
83
Python
{ "docstring": "Computes the LU factorization of a matrix or batches of matrices\n :attr:`A`. Returns a tuple containing the LU factorization and\n pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to\n ``True``.\n\n .. note::\n * The returned permutation matrix for every matrix in the batch is\n represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``.\n ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm,\n the ``i``-th row was permuted with the ``j-1``-th row.\n * LU factorization with :attr:`pivot` = ``False`` is not available\n for CPU, and attempting to do so will throw an error. However,\n LU factorization with :attr:`pivot` = ``False`` is available for\n CUDA.\n * This function does not check if the factorization was successful\n or not if :attr:`get_infos` is ``True`` since the status of the\n factorization is present in the third element of the return tuple.\n * In the case of batches of square matrices with size less or equal\n to 32 on a CUDA device, the LU factorization is repeated for\n singular matrices due to the bug in the MAGMA library\n (see magma issue 13).\n * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`.\n\n .. warning::\n The gradients of this function will only be finite when :attr:`A` is full rank.\n This is because the LU decomposition is just differentiable at full rank matrices.\n Furthermore, if :attr:`A` is close to not being full rank,\n the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`.\n\n Args:\n A (Tensor): the tensor to factor of size :math:`(*, m, n)`\n pivot (bool, optional): controls whether pivoting is done. Default: ``True``\n get_infos (bool, optional): if set to ``True``, returns an info IntTensor.\n Default: ``False``\n out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,\n then the elements in the tuple are Tensor, IntTensor,\n and IntTensor. If :attr:`get_infos` is ``False``, then the\n elements in the tuple are Tensor, IntTensor. Default: ``None``\n\n Returns:\n (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing\n\n - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`\n\n - **pivots** (*IntTensor*): the pivots of size :math:`(*, \\text{min}(m, n))`.\n ``pivots`` stores all the intermediate transpositions of rows.\n The final permutation ``perm`` could be reconstructed by\n applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``,\n where ``perm`` is initially the identity permutation of :math:`m` elements\n (essentially this is what :func:`torch.lu_unpack` is doing).\n\n - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of\n size :math:`(*)` where non-zero values indicate whether factorization for the matrix or\n each minibatch has succeeded or failed\n\n Example::\n\n >>> A = torch.randn(2, 3, 3)\n >>> A_LU, pivots = torch.lu(A)\n >>> A_LU\n tensor([[[ 1.3506, 2.5558, -0.0816],\n [ 0.1684, 1.1551, 0.1940],\n [ 0.1193, 0.6189, -0.5497]],\n\n [[ 0.4526, 1.2526, -0.3285],\n [-0.7988, 0.7175, -0.9701],\n [ 0.2634, -0.9255, -0.3459]]])\n >>> pivots\n tensor([[ 3, 3, 3],\n [ 3, 3, 3]], dtype=torch.int32)\n >>> A_LU, pivots, info = torch.lu(A, get_infos=True)\n >>> if info.nonzero().size(0) == 0:\n ... print('LU factorization succeeded for all samples!')\n LU factorization succeeded for all samples!\n ", "language": "en", "n_whitespaces": 1147, "n_words": 497, "vocab_size": 265 }
def _lu_impl(A, pivot=True, get_infos=False, out=None): # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor] r # If get_infos is True, then we don't need to check for errors and vice versa return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) if TYPE_CHECKING: _ListOrSeq = Sequence[Tensor] else: _ListOrSeq = List[Tensor]
72,153
248,205
324
tests/config/test_workers.py
32
9
def test_new_configs_appservice_worker(self) -> None: appservice_worker_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker1" ) self.assertTrue( appservice_worker_config._should_this_worker_perform_duty( { "notify_appservices_from_worker": "worker1", }, "notify_appservices", "synapse.app.appservice", "notify_appservices_from_worker", ) ) self.assertFalse( appservice_worker_config._should_this_worker_perform_duty( { "notify_appservices_from_worker"
Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. (#12452)
test_new_configs_appservice_worker
c2d50e9f6c5f7b01cbd8bf1dca36cb8c0e7b007f
synapse
test_workers.py
12
27
https://github.com/matrix-org/synapse.git
1
68
0
21
125
Python
{ "docstring": "\n Tests new config options. This is for the worker's config.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def test_new_configs_appservice_worker(self) -> None: appservice_worker_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker1" ) self.assertTrue( appservice_worker_config._should_this_worker_perform_duty( { "notify_appservices_from_worker": "worker1", }, "notify_appservices", "synapse.app.appservice", "notify_appservices_from_worker", ) ) self.assertFalse( appservice_worker_config._should_this_worker_perform_duty( { "notify_appservices_from_worker": "worker2", }, "notify_appservices", "synapse.app.appservice", "notify_appservices_from_worker", ) )
48,477
197,334
41
sympy/physics/hydrogen.py
22
6
def E_nl(n, Z=1): n
Remove abbreviations in documentation
E_nl
65be461082dda54c8748922f9c29a19af1279fe1
sympy
hydrogen.py
10
5
https://github.com/sympy/sympy.git
3
52
0
22
86
Python
{ "docstring": "\n Returns the energy of the state (n, l) in Hartree atomic units.\n\n The energy does not depend on \"l\".\n\n Parameters\n ==========\n\n n : integer\n Principal Quantum Number which is\n an integer with possible values as 1, 2, 3, 4,...\n Z :\n Atomic number (1 for Hydrogen, 2 for Helium, ...)\n\n Examples\n ========\n\n >>> from sympy.physics.hydrogen import E_nl\n >>> from sympy.abc import n, Z\n >>> E_nl(n, Z)\n -Z**2/(2*n**2)\n >>> E_nl(1)\n -1/2\n >>> E_nl(2)\n -1/8\n >>> E_nl(3)\n -1/18\n >>> E_nl(3, 47)\n -2209/18\n\n ", "language": "en", "n_whitespaces": 165, "n_words": 80, "vocab_size": 66 }
def E_nl(n, Z=1): n, Z = S(n), S(Z) if n.is_integer and (n < 1): raise ValueError("'n' must be positive integer") return -Z**2/(2*n**2)
69,687
241,760
96
tests/checkpointing/test_model_checkpoint.py
30
21
def test_model_checkpoint_no_extraneous_invocations(tmpdir): model = LogInTwoMethods() num_epochs = 4 model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, save_top_k=-1) trainer = Trainer( strategy="ddp_spawn", accelerator="cpu", devices=2, default_root_dir=tmpdir, callbacks=[model_checkpoint], max_epochs=num_epochs, ) trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}"
Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408) Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
test_model_checkpoint_no_extraneous_invocations
d2d284fd6e3e8f53e9a44ab233771850af1e4dab
lightning
test_model_checkpoint.py
10
14
https://github.com/Lightning-AI/lightning.git
1
77
0
27
130
Python
{ "docstring": "Test to ensure that the model callback saves the checkpoints only once in distributed mode.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
def test_model_checkpoint_no_extraneous_invocations(tmpdir): model = LogInTwoMethods() num_epochs = 4 model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, save_top_k=-1) trainer = Trainer( strategy="ddp_spawn", accelerator="cpu", devices=2, default_root_dir=tmpdir, callbacks=[model_checkpoint], max_epochs=num_epochs, ) trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}"
55,204
218,207
175
python3.10.4/Lib/importlib/abc.py
45
12
def find_module(self, fullname, path): warnings.warn("MetaPathFinder.find_module() is deprecated since Python " "3.4 in favor of MetaPathFinder.find_spec() and is "
add python 3.10.4 for windows
find_module
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
abc.py
9
10
https://github.com/XX-net/XX-Net.git
3
56
0
34
92
Python
{ "docstring": "Return a loader for the module.\n\n If no module is found, return None. The fullname is a str and\n the path is a list of strings or None.\n\n This method is deprecated since Python 3.4 in favor of\n finder.find_spec(). If find_spec() exists then backwards-compatible\n functionality is provided for this method.\n\n ", "language": "en", "n_whitespaces": 93, "n_words": 50, "vocab_size": 39 }
def find_module(self, fullname, path): warnings.warn("MetaPathFinder.find_module() is deprecated since Python " "3.4 in favor of MetaPathFinder.find_spec() and is " "slated for removal in Python 3.12", DeprecationWarning, stacklevel=2) if not hasattr(self, 'find_spec'): return None found = self.find_spec(fullname, path) return found.loader if found is not None else None
24,533
111,992
440
nni/algorithms/hpo/evolution_tuner.py
103
28
def _generate_individual(self, parameter_id): pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) else: random.shuffle(self.population) # avoid only 1 individual has result if len(self.population) > 1 and self.population[0].result < self.population[1].result: self.population[0] = self.population[1] # mutation on the worse individual space = json2space(self.searchspace_json, self.population[0].config) is_rand = dict() mutation_pos = space[random.randint(0, len(space)-1)]
[WIP] add doc for evolution (#4575)
_generate_individual
de6662a4a0fbfc557614b6c022edaf8117de7a5a
nni
evolution_tuner.py
15
27
https://github.com/microsoft/nni.git
8
259
0
70
403
Python
{ "docstring": "\n This function will generate the config for a trial.\n If at the first generation, randomly generates individuals to satisfy self.population_size.\n Otherwise, random choose a pair of individuals and compare their fitnesses.\n The worst of the pair will be removed. Copy the best of the pair and mutate it to generate a new individual.\n\n Parameters\n ----------\n\n parameter_id : int\n\n Returns\n -------\n dict\n A group of candidate parameters that evolution tuner generated.\n ", "language": "en", "n_whitespaces": 159, "n_words": 70, "vocab_size": 54 }
def _generate_individual(self, parameter_id): pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) else: random.shuffle(self.population) # avoid only 1 individual has result if len(self.population) > 1 and self.population[0].result < self.population[1].result: self.population[0] = self.population[1] # mutation on the worse individual space = json2space(self.searchspace_json, self.population[0].config) is_rand = dict() mutation_pos = space[random.randint(0, len(space)-1)] for i in range(len(self.space)): is_rand[self.space[i]] = (self.space[i] == mutation_pos) config = json2parameter( self.searchspace_json, is_rand, self.random_state, self.population[0].config) if len(self.population) > 1: self.population.pop(1) indiv = Individual(config=config) # remove "_index" from config and save params-id self.running_trials[parameter_id] = indiv config = split_index(indiv.config) return config
5,417
30,232
50
spotdl/console/web.py
15
11
async def connect(self): connection = {"client_id": self.client_id, "websocket": self.websocket} logging.info(f"Connect
fixed docstrings
connect
448bd75fe5de981995446a536963c5bd11e491ec
spotify-downloader
web.py
9
5
https://github.com/spotDL/spotify-downloader.git
1
44
0
15
83
Python
{ "docstring": "\n Called when a new client connects to the websocket.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
async def connect(self): connection = {"client_id": self.client_id, "websocket": self.websocket} logging.info(f"Connecting WebSocket: {connection}") await self.websocket.accept() WSProgressHandler.instances.append(self)
70,180
243,990
73
mmdet/datasets/custom.py
20
10
def prepare_test_img(self, idx): img_info = self.data_infos[idx] results = dict(img_info=img_in
[Feature] Support OpenImages Dataset (#6331) * [Feature] support openimage group of eval * [Feature] support openimage group of eval * support openimage dataset * support openimage challenge dataset * fully support OpenImages-V6 and OpenImages Challenge 2019 * Fix some logic error * update config file * fix get data_infos error * fully support OpenImages evaluation * update OpenImages config files * [Feature] support OpenImages datasets * fix bug * support load image metas from pipeline * fix bug * fix get classes logic error * update code * support get image metas * support openimags * support collect image metas * support Open Images * fix openimages logic * minor fix * add a new function to compute openimages tpfp * minor fix * fix ci error * minor fix * fix indication * minor fix * fix returns * fix returns * fix returns * fix returns * fix returns * minor fix * update readme * support loading image level labels and fix some logic * minor fix * minor fix * add class names * minor fix * minor fix * minor fix * add openimages test unit * minor fix * minor fix * fix test unit * minor fix * fix logic error * minor fix * fully support openimages * minor fix * fix docstring * fix docstrings in readthedocs * update get image metas script * label_description_file -> label_file * update openimages readme * fix test unit * fix test unit * minor fix * update readme file * Update get_image_metas.py
prepare_test_img
1516986a616fee8bb741d0ab2be40683045efccd
mmdetection
custom.py
10
7
https://github.com/open-mmlab/mmdetection.git
2
56
0
18
92
Python
{ "docstring": "Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by \\\n pipeline.\n ", "language": "en", "n_whitespaces": 82, "n_words": 24, "vocab_size": 21 }
def prepare_test_img(self, idx): img_info = self.data_infos[idx] results = dict(img_info=img_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results)
@frappe.whitelist()
14,456
67,253
67
erpnext/regional/report/provident_fund_deductions/provident_fund_deductions.py
107
26
def get_data(filters): data = [] conditions = get_conditions(filters) salary_slips = frappe.db.sql( % (conditions), as_dict=1, ) component_type_dict = frappe._dict( frappe.db.sql( ) ) if not len(component_type_dict): return [] entry = frappe.db.sql( % (conditions, ", ".join(["%s"] * len(component_type_dict))), tuple(component_type_dict.keys()), as_dict=1, ) data_list = prepare_data(entry, component_type_dict) for d in salary_slips: total = 0 if data_list.get(d.name): employee = { "employee": data_list.get(d.name).get("employee"), "employee_name": data_list.get(d.name).get("employee_name"), "pf_account": data_list.get(d.name).get("pf_account"), } if data_list.get(d.name).get("Provident Fund"): employee["pf_amount"] = data_list.get(d.name).get("Provident Fund") total += data_list.get(
style: format code with black
get_data
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
provident_fund_deductions.py
17
52
https://github.com/frappe/erpnext.git
7
337
1
60
586
Python
{ "docstring": " select sal.name from `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t\t select name, component_type from `tabSalary Component`\n\t\twhere component_type in ('Provident Fund', 'Additional Provident Fund', 'Provident Fund Loan') select sal.name, sal.employee, sal.employee_name, ded.salary_component, ded.amount\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", "language": "en", "n_whitespaces": 55, "n_words": 63, "vocab_size": 40 }
def get_data(filters): data = [] conditions = get_conditions(filters) salary_slips = frappe.db.sql( % (conditions), as_dict=1, ) component_type_dict = frappe._dict( frappe.db.sql( ) ) if not len(component_type_dict): return [] entry = frappe.db.sql( % (conditions, ", ".join(["%s"] * len(component_type_dict))), tuple(component_type_dict.keys()), as_dict=1, ) data_list = prepare_data(entry, component_type_dict) for d in salary_slips: total = 0 if data_list.get(d.name): employee = { "employee": data_list.get(d.name).get("employee"), "employee_name": data_list.get(d.name).get("employee_name"), "pf_account": data_list.get(d.name).get("pf_account"), } if data_list.get(d.name).get("Provident Fund"): employee["pf_amount"] = data_list.get(d.name).get("Provident Fund") total += data_list.get(d.name).get("Provident Fund") if data_list.get(d.name).get("Additional Provident Fund"): employee["additional_pf"] = data_list.get(d.name).get("Additional Provident Fund") total += data_list.get(d.name).get("Additional Provident Fund") if data_list.get(d.name).get("Provident Fund Loan"): employee["pf_loan"] = data_list.get(d.name).get("Provident Fund Loan") total += data_list.get(d.name).get("Provident Fund Loan") employee["total"] = total data.append(employee) return data @frappe.whitelist()
106,449
307,681
55
homeassistant/components/trace/models.py
12
7
def as_dict(self) -> dict[str, Any]: return { "extended_dict": self.a
Move Trace classes to separate module (#78433)
as_dict
219cee2ca9f6cd9eb7e0abcbda6d9540240e20d3
core
models.py
9
6
https://github.com/home-assistant/core.git
1
32
0
12
55
Python
{ "docstring": "Return an dictionary version of this ActionTrace for saving.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def as_dict(self) -> dict[str, Any]: return { "extended_dict": self.as_extended_dict(), "short_dict": self.as_short_dict(), }
@keras_export( "keras.__internal__.optimizers.convert_to_legacy_optimizer", v1=[] )
83,359
280,502
761
keras/optimizers/__init__.py
218
49
def deserialize(config, custom_objects=None, **kwargs): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic
Move new optimizer out of optimizer_experimental/ directory. PiperOrigin-RevId: 488998585
deserialize
5a105aadbdc6fde2c2529280c4789864adbb81c7
keras
__init__.py
12
55
https://github.com/keras-team/keras.git
6
311
1
130
547
Python
{ "docstring": "Inverse of the `serialize` function.\n\n Args:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during\n deserialization.\n\n Returns:\n A Keras Optimizer instance.\n ", "language": "en", "n_whitespaces": 84, "n_words": 32, "vocab_size": 30 }
def deserialize(config, custom_objects=None, **kwargs): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) use_legacy_optimizer = kwargs.pop("use_legacy_optimizer", False) if len(config["config"]) > 0: # If the optimizer config is not empty, then we use the value of # `is_legacy_optimizer` to override `use_legacy_optimizer`. If # `is_legacy_optimizer` does not exist in config, it means we are # using the legacy optimzier. use_legacy_optimizer = config["config"].get("is_legacy_optimizer", True) if ( tf.__internal__.tf2.enabled() and tf.executing_eagerly() and not use_legacy_optimizer ): all_classes = { "adadelta": adadelta_experimental.Adadelta, "adagrad": adagrad_experimental.Adagrad, "adam": adam_experimental.Adam, "adamax": adamax_experimental.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_experimental.Nadam, "rmsprop": rmsprop_experimental.RMSprop, "sgd": sgd_experimental.SGD, "ftrl": ftrl_experimental.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizerV3, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } else: all_classes = { "adadelta": adadelta_v2.Adadelta, "adagrad": adagrad_v2.Adagrad, "adam": adam_v2.Adam, "adamax": adamax_v2.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_v2.Nadam, "rmsprop": rmsprop_v2.RMSprop, "sgd": gradient_descent_v2.SGD, "ftrl": ftrl_v2.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizer, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config["class_name"].lower() in all_classes: config["class_name"] = config["class_name"].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name="optimizer", ) @keras_export( "keras.__internal__.optimizers.convert_to_legacy_optimizer", v1=[] )
55,689
219,661
85
python3.10.4/Lib/_pydecimal.py
28
11
def multiply(self, a, b): a = _convert_other(a, raiseit=True) r = a.__mul__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" %
add python 3.10.4 for windows
multiply
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_pydecimal.py
11
7
https://github.com/XX-net/XX-Net.git
2
48
0
24
78
Python
{ "docstring": "multiply multiplies two operands.\n\n If either operand is a special value then the general rules apply.\n Otherwise, the operands are multiplied together\n ('long multiplication'), resulting in a number which may be as long as\n the sum of the lengths of the two operands.\n\n >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))\n Decimal('3.60')\n >>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))\n Decimal('21')\n >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))\n Decimal('0.72')\n >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))\n Decimal('-0.0')\n >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))\n Decimal('4.28135971E+11')\n >>> ExtendedContext.multiply(7, 7)\n Decimal('49')\n >>> ExtendedContext.multiply(Decimal(7), 7)\n Decimal('49')\n >>> ExtendedContext.multiply(7, Decimal(7))\n Decimal('49')\n ", "language": "en", "n_whitespaces": 222, "n_words": 75, "vocab_size": 53 }
def multiply(self, a, b): a = _convert_other(a, raiseit=True) r = a.__mul__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r
7,871
43,210
111
tests/utils/test_db_cleanup.py
17
12
def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip): run_cleanup( clean_before_timestamp=None, table_
Don't rely on current ORM structure for db clean command (#23574) For command DB clean, by not relying on the ORM models, we will be able to use the command even when the metadatabase is not yet upgraded to the version of Airflow you have installed. Additionally we archive all rows before deletion.
test_run_cleanup_skip_archive
95bd6b71cc9f5da377e272707f7b68000d980939
airflow
test_db_cleanup.py
10
10
https://github.com/apache/airflow.git
1
52
0
17
78
Python
{ "docstring": "test that delete confirmation input is called when appropriate", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip): run_cleanup( clean_before_timestamp=None, table_names=['log'], dry_run=None, verbose=None, confirm=False, **kwargs, ) assert cleanup_table_mock.call_args[1]['skip_archive'] is should_skip
976
6,409
41
ludwig/datasets/base_dataset.py
9
5
def process(self) -> None: if not self.is_downloaded(): self.download()
Add and expand docstrings in base_dataset.py (#1819)
process
d0bcbb2a6e2ab82501fd34ef583329ff2ac22a15
ludwig
base_dataset.py
9
5
https://github.com/ludwig-ai/ludwig.git
2
26
0
9
48
Python
{ "docstring": "Process the dataset into a dataframe and save it at self.processed_dataset_path.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def process(self) -> None: if not self.is_downloaded(): self.download() self.process_downloaded_dataset()
30,799
136,008
66
rllib/utils/tests/test_actor_manager.py
24
10
def test_healthy_only_works_for_list_of_functions(self): act
[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938) Signed-off-by: Jun Gong <jungong@anyscale.com>
test_healthy_only_works_for_list_of_functions
e707ce4fb3717e3c05118c57f503dfbd03552ca9
ray
test_actor_manager.py
10
11
https://github.com/ray-project/ray.git
4
115
0
22
77
Python
{ "docstring": "Test healthy only mode works when a list of funcs are provided.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_healthy_only_works_for_list_of_functions(self): actors = [Actor.remote(i) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # Mark first and second actor as unhealthy. manager.set_actor_state(1, False) manager.set_actor_state(2, False)
19,665
99,587
235
tests/sentry/integrations/slack/notifications/test_unassigned.py
42
21
def test_unassignment(self, mock_func): notification = UnassignedActivityNotification(
fix(notifications): Use `metrics_key` (#34572)
test_unassignment
1730c481f1a8a71446326fa1ff72e10663016385
sentry
test_unassigned.py
14
19
https://github.com/getsentry/sentry.git
1
93
0
34
171
Python
{ "docstring": "\n Test that a Slack message is sent with the expected payload when an issue is unassigned\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
def test_unassignment(self, mock_func): notification = UnassignedActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.ASSIGNED, data={"assignee": ""}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert text == f"Issue unassigned by {self.name}" assert attachment["title"] == self.group.title assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=unassigned_activity-slack-user|Notification Settings>" )
83,843
281,546
61
gamestonk_terminal/stocks/insider/insider_controller.py
26
11
def print_help(self): has_ticker_start = "
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: james <jmaslek11@gmail.com> Co-authored-by: jose-donato <zmcdonato@gmail.com>
print_help
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
insider_controller.py
9
45
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
42
0
19
100
Python
{ "docstring": "Print help[cmds]\n view view available presets\n set set one of the available presets[/cmds]\n\n[param]PRESET: [/param]{self.preset}[cmds]\n\n filter filter insiders based on preset [src][Open Insider][/src]\n\n\n load load a specific stock ticker for analysis[/cmds]\n{has_ticker_start}\n[param]Ticker: [/param]{self.ticker}\n\n stats insider stats of the company [src][Open Insider][/src]\n act insider activity over time [src][Business Insider][/src]\n lins last insider trading of the company [src][Finviz][/src]\n{has_ticker_end}\n\n[info]Latest Insiders[/info] [src][Open Insider][/src][cmds]\n lcb latest cluster boys\n lpsb latest penny stock buys\n lit latest insider trading (all filings)\n lip latest insider purchases\n blip big latest insider purchases ($25k+)\n blop big latest officer purchases ($25k+)\n blcp big latest CEO/CFO purchases ($25k+)\n lis latest insider sales\n blis big latest insider sales ($100k+)\n blos big latest officer sales ($100k+)\n blcs big latest CEO/CFO sales ($100k+)\n[info]Top Insiders [src][Open Insider][/src][/info]\n topt top officer purchases today\n toppw top officer purchases past week\n toppm top officer purchases past month\n tipt top insider purchases today\n tippw top insider purchases past week\n tippm top insider purchases past month\n tist top insider sales today\n tispw top insider sales past week\n tispm top insider sales past month[/cmds]\n", "language": "en", "n_whitespaces": 490, "n_words": 176, "vocab_size": 88 }
def print_help(self): has_ticker_start = "[unvl]" if not self.ticker else "" has_ticker_end = "[/unvl]" if not self.ticker else "" help_text = f console.print(text=help_text, menu="Stocks - Insider Trading")
@pytest.mark.django_db
18,216
87,078
47
tests/sentry/relay/test_config.py
23
12
def test_project_config_dynamic_sampling_is_none(default_project): default_project.update_option("sentry:dynamic_sampling", None) with Feature({"organizations:server-side-sampling": True}): cfg = get_project_config(default_project) cfg = cfg.to_dict() dynamic_sampling = get_path(cfg, "config", "dynamicSampling") assert dynamic_sampling is None
feat(dynamic-sampling): Handles updating ProjectConfig with uniform DS rule for v2 [TET-465] (#40268) This PR forces your uniform rule by your plan or respect old logic. If both feature flags are enabled dynamic-sampling-basic flag takes the highest precedence. Original PR https://github.com/getsentry/sentry/pull/40180 was reverted via https://github.com/getsentry/sentry/pull/40266 due to issue of removing incorrect line. Co-authored-by: Joris Bayer <joris.bayer@sentry.io>
test_project_config_dynamic_sampling_is_none
e0e2c4ff4248042abda3cc93024930dada416af8
sentry
test_config.py
12
7
https://github.com/getsentry/sentry.git
1
51
1
19
103
Python
{ "docstring": "\n Tests test check inc-237 that dynamic sampling is None,\n so it's pass when we have fix and fails when we dont\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 19 }
def test_project_config_dynamic_sampling_is_none(default_project): default_project.update_option("sentry:dynamic_sampling", None) with Feature({"organizations:server-side-sampling": True}): cfg = get_project_config(default_project) cfg = cfg.to_dict() dynamic_sampling = get_path(cfg, "config", "dynamicSampling") assert dynamic_sampling is None @pytest.mark.django_db
56,268
221,198
29
python3.10.4/Lib/bz2.py
8
8
def seek(self, offset, whence=io.SEEK_SET):
add python 3.10.4 for windows
seek
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
bz2.py
8
3
https://github.com/XX-net/XX-Net.git
1
30
0
8
48
Python
{ "docstring": "Change the file position.\n\n The new position is specified by offset, relative to the\n position indicated by whence. Values for whence are:\n\n 0: start of stream (default); offset must not be negative\n 1: current stream position\n 2: end of stream; offset must not be positive\n\n Returns the new file position.\n\n Note that seeking is emulated, so depending on the parameters,\n this operation may be extremely slow.\n ", "language": "en", "n_whitespaces": 141, "n_words": 66, "vocab_size": 49 }
def seek(self, offset, whence=io.SEEK_SET): self._check_can_seek() return self._buffer.seek(offset, whence)
50,903
204,818
100
django/db/backends/base/base.py
26
13
def savepoint(self): if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace("-", "") self.savepoint_state += 1 sid = "s%s_x%d" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid
Refs #33476 -- Reformatted code with Black.
savepoint
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
10
10
https://github.com/django/django.git
2
64
0
22
113
Python
{ "docstring": "\n Create a savepoint inside the current transaction. Return an\n identifier for the savepoint that will be used for the subsequent\n rollback or commit. Do nothing if savepoints are not supported.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 26 }
def savepoint(self): if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace("-", "") self.savepoint_state += 1 sid = "s%s_x%d" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid
13,876
65,388
9
erpnext/accounts/report/unpaid_expense_claim/unpaid_expense_claim.py
20
9
def get_unclaimed_expese_claims(filters): cond = "1=1" if filters.get("employee"):
style: format code with black
get_unclaimed_expese_claims
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
unpaid_expense_claim.py
10
22
https://github.com/frappe/erpnext.git
2
42
0
17
73
Python
{ "docstring": "\n\t\tselect\n\t\t\tec.employee, ec.employee_name, ec.name, ec.total_sanctioned_amount, ec.total_amount_reimbursed,\n\t\t\tsum(gle.credit_in_account_currency - gle.debit_in_account_currency) as outstanding_amt\n\t\tfrom\n\t\t\t`tabExpense Claim` ec, `tabGL Entry` gle\n\t\twhere\n\t\t\tgle.against_voucher_type = \"Expense Claim\" and gle.against_voucher = ec.name\n\t\t\tand gle.party is not null and ec.docstatus = 1 and ec.is_paid = 0 and {cond} group by ec.name\n\t\thaving\n\t\t\toutstanding_amt > 0\n\t", "language": "en", "n_whitespaces": 39, "n_words": 49, "vocab_size": 39 }
def get_unclaimed_expese_claims(filters): cond = "1=1" if filters.get("employee"): cond = "ec.employee = %(employee)s" return frappe.db.sql( .format( cond=cond ), filters, as_list=1, )
3,279
20,227
73
pipenv/patched/notpip/_vendor/platformdirs/unix.py
27
9
def site_config_dir(self) -> str: # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False path = os.environ.get
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
site_config_dir
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
unix.py
9
10
https://github.com/pypa/pipenv.git
2
38
0
24
71
Python
{ "docstring": "\n :return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`\n is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS\n path separator), e.g. ``/etc/xdg/$appname/$version``\n ", "language": "en", "n_whitespaces": 65, "n_words": 34, "vocab_size": 25 }
def site_config_dir(self) -> str: # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False path = os.environ.get("XDG_CONFIG_DIRS", "") if not path.strip(): path = "/etc/xdg" return self._with_multi_path(path)
76,206
260,360
29
sklearn/decomposition/_fastica.py
8
7
def fit_transform(self, X, y=None): self._validate_params() return self._fit_transform(X, compute_sources=
MAINT Use _validate_params in FastICA (#23711) Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr>
fit_transform
4cc347d4d0cbbfdcbd353f08842e0668fed78c9f
scikit-learn
_fastica.py
8
3
https://github.com/scikit-learn/scikit-learn.git
1
28
0
8
45
Python
{ "docstring": "Fit the model and recover the sources from X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Estimated sources obtained by transforming the data with the\n estimated unmixing matrix.\n ", "language": "en", "n_whitespaces": 177, "n_words": 66, "vocab_size": 49 }
def fit_transform(self, X, y=None): self._validate_params() return self._fit_transform(X, compute_sources=True)
18,148
86,689
672
src/sentry/api/endpoints/project_dynamic_sampling.py
92
53
def __fetch_randomly_sampled_transactions(self, project, query, sample_size, query_time_range): sampling_factor = self.__generate_transactions_sampling_factor( project=project, query=query, sample_size=sample_size, query_time_range=query_time_range, ) builder = QueryBuilder( Dataset.Discover, params={ "start": query_time_range.start_time, "end": query_time_range.end_time, "project_id": [project.id], "organization_id": project.organization.id, }, query=f"{query} event.type:transaction", selected_columns=[ "id", "trace", "random_number() as rand_num", f"modulo(rand_num, {sampling_factor}) as modulo_num", ], equations=[], orderby=None, auto_fields=True, auto_aggregations=True, use_aggregate_conditions=True, functions_acl=["random_number", "modulo"], limit=sample_size, offset=0, equation_config={"auto_add": False}, ) builder.add_conditions([Condition(lhs=Column("modulo_num"), op=Op.EQ, rhs=0)]) snuba_query = builder.get_snql_query().query snuba_query = snuba_query.set_select( snuba_query.select + [ Function( "not", [Function("has", [Column("contexts.key"), TRACE_PARENT_SPAN_CONTEXT
feat(dynamic-sampling): Improve empty transaction breakdown message [TET-338] (#39539) This PR add new attribute parentProjectBreakdown to /api/0/projects/<organization_slug>/<project_slug>/dynamic-sampling/distribution/ api: ``` { "projectBreakdown": null, "sampleSize": 0, "startTimestamp": null, "endTimestamp": null, "parentProjectBreakdown": [ { "projectId": 1, "percentage": 0.9, "project": "sentry" }, { "projectId": 2, "percentage": 0.1, "project": "javascript" } ] } ``` TODO: - [x] Update src/sentry/snuba/referrer.py https://github.com/getsentry/sentry/blob/0fbbf1626f86399b1ca4a2781d66ef96aac69de7/src/sentry/snuba/referrer.py#L208-L210 - [x] Add missing tests Co-authored-by: Andrii Soldatenko <andrii.soldatenko@gmail.io> Co-authored-by: ahmedetefy <ahmed.etefy12@gmail.com>
__fetch_randomly_sampled_transactions
ceee9dfd8d6fed70d34546e7b46ebb7bf1d49745
sentry
project_dynamic_sampling.py
19
52
https://github.com/getsentry/sentry.git
1
275
0
78
436
Python
{ "docstring": "\n Fetches a random sample of transactions of size `sample_size` in the last period\n defined by `stats_period`. The random sample is fetched by generating a random number by\n for every row, and then doing a modulo operation on it, and if that number is divisible\n by the sampling factor then its kept, otherwise is discarded. This is an alternative to\n sampling the query before applying the conditions. The goal here is to fetch the\n transaction ids, their sample rates and their trace ids.\n ", "language": "en", "n_whitespaces": 132, "n_words": 82, "vocab_size": 56 }
def __fetch_randomly_sampled_transactions(self, project, query, sample_size, query_time_range): sampling_factor = self.__generate_transactions_sampling_factor( project=project, query=query, sample_size=sample_size, query_time_range=query_time_range, ) builder = QueryBuilder( Dataset.Discover, params={ "start": query_time_range.start_time, "end": query_time_range.end_time, "project_id": [project.id], "organization_id": project.organization.id, }, query=f"{query} event.type:transaction", selected_columns=[ "id", "trace", "random_number() as rand_num", f"modulo(rand_num, {sampling_factor}) as modulo_num", ], equations=[], orderby=None, auto_fields=True, auto_aggregations=True, use_aggregate_conditions=True, functions_acl=["random_number", "modulo"], limit=sample_size, offset=0, equation_config={"auto_add": False}, ) builder.add_conditions([Condition(lhs=Column("modulo_num"), op=Op.EQ, rhs=0)]) snuba_query = builder.get_snql_query().query snuba_query = snuba_query.set_select( snuba_query.select + [ Function( "not", [Function("has", [Column("contexts.key"), TRACE_PARENT_SPAN_CONTEXT])], alias="is_root", ) ] ) snuba_query = snuba_query.set_groupby( snuba_query.groupby + [Column("modulo_num"), Column("contexts.key")] ) data = raw_snql_query( SnubaRequest(dataset=Dataset.Discover.value, app_id="default", query=snuba_query), referrer=Referrer.DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_TRANSACTIONS.value, )["data"] return data
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
13,591
64,270
175
erpnext/controllers/queries.py
235
55
def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False): conditions = [] if isinstance(filters, str): filters = json.loads(filters) #Get searchfields from meta and use in Item Link field query meta = frappe.get_meta("Item", cached=True) searchfields = meta.get_search_fields() # these are handled separately ignored_search_fields = ("item_name", "description") for ignored_field in ignored_search_fields: if ignored_field in searchfields: searchfields.remove(ignored_field) columns = '' extra_searchfields = [field for field in searchfields if not field in ["name", "item_group", "description", "item_name"]] if extra_searchfields: columns = ", " + ", ".join(extra_searchfields) searchfields = searchfields + [field for field in[searchfield or "name", "item_code", "item_group", "item_name"] if not field in searchfields] searchfields = " or ".join([field + " like %(txt)s" for field in searchfields]) if filters and isinstance(filters, dict): if filters.get('customer') or filters.get('supplier'): party = filters.get('customer') or filters.get('supplier') item_rules_list = frappe.get_all('Party Specific Item', filters = {'party': party}, fields = ['restrict_based_on', 'based_on_value']) filters_dict = {} for rule in item_rules_list: if rule['restrict_based_on'] == 'Item': rule['restrict_based_on'] = 'name' filters_dict[rule.restrict_based_on] = [] for rule in item_rules_list: filters_dict[rule.restrict_based_on].append(rule.based_on_value) for filter in filters_dict: filters[scrub(filter)] = ['in', filters_dict[filter]] if filters.get('customer'): del filters['customer'] else: del filters['supplier'] else: filters.pop('customer', None) filters.pop('supplier', None) description_cond = '' if frappe.db.count('Item', cache=True) < 50000: # scan description only if items are less than 50000 description_cond = 'or tabItem.description LIKE %(txt)s' return frappe.db.sql(.format(
fix: ignore empty customer/supplier in item query (#29610) * fix: dont try to filter by customer/supplier if None * test: item query with emtpy supplier
item_query
41a95e56241ff8f3dceac7285f0bc6b9a43d7a06
erpnext
queries.py
16
73
https://github.com/frappe/erpnext.git
22
449
1
145
783
Python
{ "docstring": "select\n\t\t\ttabItem.name, tabItem.item_name, tabItem.item_group,\n\t\tif(length(tabItem.description) > 40, \\\n\t\t\tconcat(substr(tabItem.description, 1, 40), \"...\"), description) as description\n\t\t{columns}\n\t\tfrom tabItem\n\t\twhere tabItem.docstatus < 2\n\t\t\tand tabItem.disabled=0\n\t\t\tand tabItem.has_variants=0\n\t\t\tand (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')\n\t\t\tand ({scond} or tabItem.item_code IN (select parent from `tabItem Barcode` where barcode LIKE %(txt)s)\n\t\t\t\t{description_cond})\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, item_name\n\t\tlimit %(start)s, %(page_len)s ", "language": "en", "n_whitespaces": 51, "n_words": 69, "vocab_size": 57 }
def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False): conditions = [] if isinstance(filters, str): filters = json.loads(filters) #Get searchfields from meta and use in Item Link field query meta = frappe.get_meta("Item", cached=True) searchfields = meta.get_search_fields() # these are handled separately ignored_search_fields = ("item_name", "description") for ignored_field in ignored_search_fields: if ignored_field in searchfields: searchfields.remove(ignored_field) columns = '' extra_searchfields = [field for field in searchfields if not field in ["name", "item_group", "description", "item_name"]] if extra_searchfields: columns = ", " + ", ".join(extra_searchfields) searchfields = searchfields + [field for field in[searchfield or "name", "item_code", "item_group", "item_name"] if not field in searchfields] searchfields = " or ".join([field + " like %(txt)s" for field in searchfields]) if filters and isinstance(filters, dict): if filters.get('customer') or filters.get('supplier'): party = filters.get('customer') or filters.get('supplier') item_rules_list = frappe.get_all('Party Specific Item', filters = {'party': party}, fields = ['restrict_based_on', 'based_on_value']) filters_dict = {} for rule in item_rules_list: if rule['restrict_based_on'] == 'Item': rule['restrict_based_on'] = 'name' filters_dict[rule.restrict_based_on] = [] for rule in item_rules_list: filters_dict[rule.restrict_based_on].append(rule.based_on_value) for filter in filters_dict: filters[scrub(filter)] = ['in', filters_dict[filter]] if filters.get('customer'): del filters['customer'] else: del filters['supplier'] else: filters.pop('customer', None) filters.pop('supplier', None) description_cond = '' if frappe.db.count('Item', cache=True) < 50000: # scan description only if items are less than 50000 description_cond = 'or tabItem.description LIKE %(txt)s' return frappe.db.sql(.format( columns=columns, scond=searchfields, fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'), mcond=get_match_cond(doctype).replace('%', '%%'), description_cond = description_cond), { "today": nowdate(), "txt": "%%%s%%" % txt, "_txt": txt.replace("%", ""), "start": start, "page_len": page_len }, as_dict=as_dict) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
36,680
156,567
106
dask/array/core.py
44
10
def apply_and_enforce(*args, **kwargs): func = kwargs.pop("_func") expected_ndim = kwargs.pop("expected_ndim") out = func(*args, **kwargs) if getattr(out, "ndim", 0) != expected_ndim: out_ndim = getattr(out, "ndim", 0) raise ValueError( f"Dimensio
Add kwarg ``enforce_ndim`` to ``dask.array.map_blocks()`` (#8865)
apply_and_enforce
2b90415b02d3ad1b08362889e0818590ca3133f4
dask
core.py
12
11
https://github.com/dask/dask.git
2
68
0
36
129
Python
{ "docstring": "Apply a function, and enforce the output.ndim to match expected_ndim\n\n Ensures the output has the expected dimensionality.", "language": "en", "n_whitespaces": 19, "n_words": 17, "vocab_size": 15 }
def apply_and_enforce(*args, **kwargs): func = kwargs.pop("_func") expected_ndim = kwargs.pop("expected_ndim") out = func(*args, **kwargs) if getattr(out, "ndim", 0) != expected_ndim: out_ndim = getattr(out, "ndim", 0) raise ValueError( f"Dimension mismatch: expected output of {func} " f"to have dims = {expected_ndim}. Got {out_ndim} instead." ) return out
40,570
170,534
26
pandas/core/construction.py
13
8
def _sanitize_non_ordered(data) -> None: if isinstance(data, (set, frozenset)): raise TypeError(f"'{type(data).__name__}' type is unordered")
REF: simplify sanitize_array (#49347) REF: simpify sanitize_array
_sanitize_non_ordered
6b4fa02e10480c4ddae0714e36b7fe765fa42eac
pandas
construction.py
14
6
https://github.com/pandas-dev/pandas.git
2
26
0
13
55
Python
{ "docstring": "\n Raise only for unordered sets, e.g., not for dict_keys\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 8 }
def _sanitize_non_ordered(data) -> None: if isinstance(data, (set, frozenset)): raise TypeError(f"'{type(data).__name__}' type is unordered")
56,619
222,529
421
python3.10.4/Lib/dis.py
145
29
def dis(x=None, *, file=None, depth=None): if x is Non
add python 3.10.4 for windows
dis
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
dis.py
17
33
https://github.com/XX-net/XX-Net.git
14
249
0
96
413
Python
{ "docstring": "Disassemble classes, methods, functions, and other compiled objects.\n\n With no argument, disassemble the last traceback.\n\n Compiled objects currently include generator objects, async generator\n objects, and coroutine objects, all of which store their code object\n in a special attribute.\n ", "language": "en", "n_whitespaces": 53, "n_words": 38, "vocab_size": 34 }
def dis(x=None, *, file=None, depth=None): if x is None: distb(file=file) return # Extract functions from methods. if hasattr(x, '__func__'): x = x.__func__ # Extract compiled code objects from... if hasattr(x, '__code__'): # ...a function, or x = x.__code__ elif hasattr(x, 'gi_code'): #...a generator object, or x = x.gi_code elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or x = x.ag_code elif hasattr(x, 'cr_code'): #...a coroutine. x = x.cr_code # Perform the disassembly. if hasattr(x, '__dict__'): # Class or module items = sorted(x.__dict__.items()) for name, x1 in items: if isinstance(x1, _have_code): print("Disassembly of %s:" % name, file=file) try: dis(x1, file=file, depth=depth) except TypeError as msg: print("Sorry:", msg, file=file) print(file=file) elif hasattr(x, 'co_code'): # Code object _disassemble_recursive(x, file=file, depth=depth) elif isinstance(x, (bytes, bytearray)): # Raw bytecode _disassemble_bytes(x, file=file) elif isinstance(x, str): # Source code _disassemble_str(x, file=file, depth=depth) else: raise TypeError("don't know how to disassemble %s objects" % type(x).__name__)
49,336
199,680
18
sympy/polys/appellseqs.py
13
7
def bernoulli_poly(n, x=None, polys=False):
Run orthopolys and appellseqs through a common interface Including unifying the two Chebyshev generators into one function. There are also two kinds of Hermite polynomials, and they too share the same recurrence, but the second type He_n(x) (aka the probabilist, reduced or small polynomials) will not be added here.
bernoulli_poly
d1d46df73ebaad94089847558d00a8b7269f554d
sympy
appellseqs.py
8
54
https://github.com/sympy/sympy.git
1
33
0
13
47
Python
{ "docstring": "Generates the Bernoulli polynomial `\\operatorname{B}_n(x)`.\n\n `\\operatorname{B}_n(x)` is the unique polynomial satisfying\n\n .. math :: \\int_{x}^{x+1} \\operatorname{B}_n(t) \\,dt = x^n.\n\n Based on this, we have for nonnegative integer `s` and integer\n `a` and `b`\n\n .. math :: \\sum_{k=a}^{b} k^s = \\frac{\\operatorname{B}_{s+1}(b+1) -\n \\operatorname{B}_{s+1}(a)}{s+1}\n\n which is related to Jakob Bernoulli's original motivation for introducing\n the Bernoulli numbers, the values of these polynomials at `x = 1`.\n\n Examples\n ========\n\n >>> from sympy import summation\n >>> from sympy.abc import x\n >>> from sympy.polys import bernoulli_poly\n >>> bernoulli_poly(5, x)\n x**5 - 5*x**4/2 + 5*x**3/3 - x/6\n\n >>> def psum(p, a, b):\n ... return (bernoulli_poly(p+1,b+1) - bernoulli_poly(p+1,a)) / (p+1)\n >>> psum(4, -6, 27)\n 3144337\n >>> summation(x**4, (x, -6, 27))\n 3144337\n\n >>> psum(1, 1, x).factor()\n x*(x + 1)/2\n >>> psum(2, 1, x).factor()\n x*(x + 1)*(2*x + 1)/6\n >>> psum(3, 1, x).factor()\n x**2*(x + 1)**2/4\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Bernoulli_polynomials\n ", "language": "en", "n_whitespaces": 302, "n_words": 168, "vocab_size": 117 }
def bernoulli_poly(n, x=None, polys=False): r return named_poly(n, dup_bernoulli, QQ, "Bernoulli polynomial", (x,), polys)
13,510
63,813
49
.venv/lib/python3.8/site-packages/pip/_vendor/tenacity/after.py
26
6
def after_log(logger, log_level, sec_format="%0.3f"): log_tpl = ( "Finished call to '%s' af
upd; format
after_log
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
after.py
11
7
https://github.com/jindongwang/transferlearning.git
1
29
0
24
49
Python
{ "docstring": "After call strategy that logs to some logger the finished attempt.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def after_log(logger, log_level, sec_format="%0.3f"): log_tpl = ( "Finished call to '%s' after " + str(sec_format) + "(s), " "this was the %s time calling it." )
24,214
110,568
34
lib/matplotlib/offsetbox.py
17
11
def _compat_get_offset(meth): sigs = [lambda self,
Reparametrize offsetbox calculations in terms of bboxes. Passing a single bbox instead of (xdescent, ydescent, width, height) separately is easier to follow (see e.g. the changes in VPacker and HPacker, which no longer have to repeatedly pack/unpack whd_list), and avoids having to figure out e.g. the sign of the descents and whether width/height includes the descents, for example. Currently get_offset keeps a back compatible signature (we *could* consider killing the old signature but let's not do that for now), and _get_bbox_and_child_offsets is private because I *may* want to later also change the convention to make offsets relative to the bbox (0, 0) point rather than the bbox lower-left corner.
_compat_get_offset
de2192589f8ea50c9dc90be87b649399ff623feb
matplotlib
offsetbox.py
10
6
https://github.com/matplotlib/matplotlib.git
1
48
0
15
55
Python
{ "docstring": "\n Decorator for the get_offset method of OffsetBox and subclasses, that\n allows supporting both the new signature (self, bbox, renderer) and the old\n signature (self, width, height, xdescent, ydescent, renderer).\n ", "language": "en", "n_whitespaces": 42, "n_words": 29, "vocab_size": 24 }
def _compat_get_offset(meth): sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(), lambda self, bbox, renderer: locals()]
55,381
218,549
48
python3.10.4/Lib/ipaddress.py
16
4
def sixtofour(self):
add python 3.10.4 for windows
sixtofour
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
ipaddress.py
11
4
https://github.com/XX-net/XX-Net.git
2
34
0
14
53
Python
{ "docstring": "Return the IPv4 6to4 embedded address.\n\n Returns:\n The IPv4 6to4-embedded address if present or None if the\n address doesn't appear to contain a 6to4 embedded address.\n\n ", "language": "en", "n_whitespaces": 62, "n_words": 26, "vocab_size": 19 }
def sixtofour(self): if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)