ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
39,462
163,605
58
pandas/core/indexers/utils.py
34
11
def is_empty_indexer(indexer) -> bool: if is_list_like(indexer) and not len(indexer): return True if not isinstance(indexer, tuple): indexer = (indexer,)
REF: simplify Block.setitem (#45403)
is_empty_indexer
6b43a78f2f1036ebae205d2d35ab96f07549fe96
pandas
utils.py
11
17
https://github.com/pandas-dev/pandas.git
6
60
0
29
98
Python
{ "docstring": "\n Check if we have an empty indexer.\n\n Parameters\n ----------\n indexer : object\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 40, "n_words": 15, "vocab_size": 15 }
def is_empty_indexer(indexer) -> bool: if is_list_like(indexer) and not len(indexer): return True if not isinstance(indexer, tuple): indexer = (indexer,) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) # ----------------------------------------------------------- # Indexer Validation
22,565
107,046
510
lib/matplotlib/_constrained_layout.py
134
29
def make_layoutgrids_gs(layoutgrids, gs): if gs in layoutgrids or gs.figure is None: return layoutgrids # in order to do constrained_layout there has to be at least *one* # gridspec in the tree: layoutgrids['hasgrids'] = True if not hasattr(gs, '_subplot_spec'): # normal gridspec parent = layoutgrids[gs.figure] layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) else: # this is a gridspecfromsubplotspec: subplot_spec = g
FIX: better repr for subgridspecs
make_layoutgrids_gs
c682ca40c647770a967b6b8a7615eb91c7cb3fc9
matplotlib
_constrained_layout.py
16
33
https://github.com/matplotlib/matplotlib.git
6
230
0
80
361
Python
{ "docstring": "\n Make the layoutgrid for a gridspec (and anything nested in the gridspec)\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 11 }
def make_layoutgrids_gs(layoutgrids, gs): if gs in layoutgrids or gs.figure is None: return layoutgrids # in order to do constrained_layout there has to be at least *one* # gridspec in the tree: layoutgrids['hasgrids'] = True if not hasattr(gs, '_subplot_spec'): # normal gridspec parent = layoutgrids[gs.figure] layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) else: # this is a gridspecfromsubplotspec: subplot_spec = gs._subplot_spec parentgs = subplot_spec.get_gridspec() # if a nested gridspec it is possible the parent is not in there yet: if parentgs not in layoutgrids: layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs) subspeclb = layoutgrids[parentgs] # get a unique representation: rep = object.__repr__(gs) + 'top' # gridspecfromsubplotspec need an outer container: if rep not in layoutgrids: layoutgrids[rep] = mlayoutgrid.LayoutGrid( parent=subspeclb, name='top', nrows=1, ncols=1, parent_pos=(subplot_spec.rowspan, subplot_spec.colspan)) layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=layoutgrids[rep], name='gridspec', nrows=gs._nrows, ncols=gs._ncols, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) return layoutgrids
17,396
82,430
212
cms/tests/test_sitemap.py
44
22
def test_sitemap_published_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() for item in urlset: locations.append(item['location']) for title in Title.objects.public(): page = title.page.get_public_object()
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <cclauss@me.com> * ci: codespell config taken from #7292
test_sitemap_published_titles
c1290c9ff89cb00caa5469129fd527e9d82cd820
django-cms
test_sitemap.py
14
16
https://github.com/django-cms/django-cms.git
6
102
0
31
203
Python
{ "docstring": "\n Check that published titles are in the urls\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def test_sitemap_published_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() for item in urlset: locations.append(item['location']) for title in Title.objects.public(): page = title.page.get_public_object() if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' if page.is_published('en') and not page.publisher_is_draft: self.assertTrue(url in locations) else: self.assertFalse(url in locations)
@derived_from(np)
36,471
155,800
343
dask/array/creation.py
121
27
def eye(N, chunks="auto", M=None, k=0, dtype=float): eye = {} if M is None: M = N if dtype is None: dtype = float if not isinstance(chunks, (int, str)): raise ValueError("chunks must be an int or string") vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = vchunks[0] token = tokenize(N, chunks, M, k, dtype) name_eye = "eye-" + token for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = ( np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype, ) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype)
Fix eye inconsistency with NumPy for dtype=None (#8669) (#8685)
eye
e25284dced9749f02bd5d8c80b6225153aa282d8
dask
creation.py
17
25
https://github.com/dask/dask.git
7
230
1
80
342
Python
{ "docstring": "\n Return a 2-D Array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n chunks : int, str\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : Array of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n ", "language": "en", "n_whitespaces": 295, "n_words": 162, "vocab_size": 103 }
def eye(N, chunks="auto", M=None, k=0, dtype=float): eye = {} if M is None: M = N if dtype is None: dtype = float if not isinstance(chunks, (int, str)): raise ValueError("chunks must be an int or string") vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = vchunks[0] token = tokenize(N, chunks, M, k, dtype) name_eye = "eye-" + token for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = ( np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype, ) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) @derived_from(np)
80,885
271,876
93
keras/engine/training_utils_v1.py
24
11
def is_composite_or_composite_value(tensor): # TODO(b/125094323): This sho
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
is_composite_or_composite_value
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_utils_v1.py
12
9
https://github.com/keras-team/keras.git
1
39
0
23
61
Python
{ "docstring": "Returns true if 'tensor' is a CompositeTensor or a CT Value object.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
def is_composite_or_composite_value(tensor): # TODO(b/125094323): This should be isinstance(CompositeTensor) or # isinstance(CompositeTensorValue) once we support that. return isinstance( tensor, ( tf.__internal__.CompositeTensor, tf.compat.v1.SparseTensorValue, tf.compat.v1.ragged.RaggedTensorValue, ), )
17,125
80,990
346
awx/main/utils/common.py
99
28
def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False): current_time = now() if not start: if minutely: start = current_time.replace(microsecond=0, second=0) else: start = current_time.replace(microsecond=0, second=0, minute=0) if not end: if minutely: end = start.replace(microsecond=0, second=0) + timedelta(minutes=1) else: end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1) start_timestamp = str(start) end_timestamp = str(end) if not partition_label: if minutely: partition_label = start.strftime('%Y%m%d_%H%M') else: partition_label = start.strftime('%Y%m%d_%H') try: with transaction.atomic(): with connection.cursor() as cursor: cursor.execute( f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} ' f'PARTITION OF {tblname} ' f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');' ) exce
Handle error for create_partition Occasionally the create_partition will error with, relation "main_projectupdateevent_20220323_19" already exists This change wraps the db command into a try except block with its own transaction
create_partition
24152555c5d1b52d5024197bcaf80fdb87b8b14e
awx
common.py
16
29
https://github.com/ansible/awx.git
8
201
0
67
360
Python
{ "docstring": "Creates new partition table for events.\n - start defaults to beginning of current hour\n - end defaults to end of current hour\n - partition_label defaults to YYYYMMDD_HH\n\n - minutely will create partitions that span _a single minute_ for testing purposes\n ", "language": "en", "n_whitespaces": 55, "n_words": 40, "vocab_size": 28 }
def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False): current_time = now() if not start: if minutely: start = current_time.replace(microsecond=0, second=0) else: start = current_time.replace(microsecond=0, second=0, minute=0) if not end: if minutely: end = start.replace(microsecond=0, second=0) + timedelta(minutes=1) else: end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1) start_timestamp = str(start) end_timestamp = str(end) if not partition_label: if minutely: partition_label = start.strftime('%Y%m%d_%H%M') else: partition_label = start.strftime('%Y%m%d_%H') try: with transaction.atomic(): with connection.cursor() as cursor: cursor.execute( f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} ' f'PARTITION OF {tblname} ' f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');' ) except ProgrammingError as e: logger.debug(f'Caught known error due to existing partition: {e}')
76,319
260,529
63
sklearn/metrics/pairwise.py
34
11
def rbf_kernel(X, Y=None, gamma=None): X, Y = check_pairwise_arrays(X, Y)
DOC Ensure `rbf_kernel` passes numpydoc validation (#23954) Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
rbf_kernel
095e46670a1e21e8c49972b23e75f2d2a48c6c93
scikit-learn
pairwise.py
11
8
https://github.com/scikit-learn/scikit-learn.git
2
68
0
28
101
Python
{ "docstring": "Compute the rbf (gaussian) kernel between X and Y.\n\n K(x, y) = exp(-gamma ||x-y||^2)\n\n for each pair of rows x in X and y in Y.\n\n Read more in the :ref:`User Guide <rbf_kernel>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_features)\n A feature array.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n An optional second feature array. If `None`, uses `Y=X`.\n\n gamma : float, default=None\n If None, defaults to 1.0 / n_features.\n\n Returns\n -------\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\n The RBF kernel.\n ", "language": "en", "n_whitespaces": 153, "n_words": 85, "vocab_size": 63 }
def rbf_kernel(X, Y=None, gamma=None): X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K
78,517
266,698
974
lib/ansible/module_utils/common/parameters.py
248
35
def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None): if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): choices = spec.get('choices') if choices is None: continue if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)): if param in parameters: # Allow one or more when type='list' param with choices if isinstance(parameters[param], list):
parameters: handle blank values when argument is a list (#77119) Fixes: #77108 Signed-off-by: Abhijeet Kasurde <akasurde@redhat.com>
_validate_argument_values
4f48f375a0203b0d09c55522a86300a52da5b24a
ansible
parameters.py
24
38
https://github.com/ansible/ansible.git
22
356
0
128
578
Python
{ "docstring": "Ensure all arguments have the requested values, and there are no stray arguments", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None): if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): choices = spec.get('choices') if choices is None: continue if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)): if param in parameters: # Allow one or more when type='list' param with choices if isinstance(parameters[param], list): diff_list = [item for item in parameters[param] if item not in choices] if diff_list: choices_str = ", ".join([to_native(c) for c in choices]) diff_str = ", ".join(diff_list) msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_str) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) elif parameters[param] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. if parameters[param] == 'False': overlap = BOOLEANS_FALSE.intersection(choices) if len(overlap) == 1: # Extract from a set (parameters[param],) = overlap if parameters[param] == 'True': overlap = BOOLEANS_TRUE.intersection(choices) if len(overlap) == 1: (parameters[param],) = overlap if parameters[param] not in choices: choices_str = ", ".join([to_native(c) for c in choices]) msg = "value of %s must be one of: %s, got: %s" % (param, choices_str, parameters[param]) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) else: msg = "internal error: choices for argument %s are not iterable: %s" % (param, choices) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentTypeError(msg))
2,892
19,145
41
mlflow/models/evaluation/base.py
9
4
def content(self): if self._content is None: self._load()
Improve evaluation api (#5256) * init Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add shap limitation on value type Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix format Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
content
4c58179509e6f6047789efb0a95c2b0e20cb6c8f
mlflow
base.py
9
4
https://github.com/mlflow/mlflow.git
2
22
0
8
39
Python
{ "docstring": "\n The content of the artifact (representation varies)\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def content(self): if self._content is None: self._load() return self._content
@frappe.whitelist(allow_guest=True)
13,657
64,540
95
erpnext/e_commerce/api.py
143
37
def get_product_filter_data(query_args=None): if isinstance(query_args, str): query_args = json.loads(query_args) query_args = frappe._dict(query_args) if query_args: search = query_args.get("search") field_filters = query_args.get("field_filters", {}) attribute_filters = query_args.get("attribute_filters", {}) start = cint(query_args.start) if query_args.get("start") else 0 item_group = query_args.get("item_group") from_filters = query_args.get("from_filters") else: search, attri
feat: Include child item group products in Item Group Page & cleanup - Added 'Include descendants' checkbox, which will pull child item group products too - Build item group filters in query engine file - Include logic in filter engine - Clean up Website section of Item Group page (UX) - Add util to fetch child item groups including self
get_product_filter_data
b2755f6fdddd3e1b0a305b57c18651c98fee8f7e
erpnext
api.py
13
46
https://github.com/frappe/erpnext.git
9
271
1
100
464
Python
{ "docstring": "\n\t\tReturns filtered products and discount filters.\n\t\t:param query_args (dict): contains filters to get products list\n\n\t\tQuery Args filters:\n\t\tsearch (str): Search Term.\n\t\tfield_filters (dict): Keys include item_group, brand, etc.\n\t\tattribute_filters(dict): Keys include Color, Size, etc.\n\t\tstart (int): Offset items by\n\t\titem_group (str): Valid Item Group\n\t\tfrom_filters (bool): Set as True to jump to page 1\n\t", "language": "en", "n_whitespaces": 46, "n_words": 55, "vocab_size": 47 }
def get_product_filter_data(query_args=None): if isinstance(query_args, str): query_args = json.loads(query_args) query_args = frappe._dict(query_args) if query_args: search = query_args.get("search") field_filters = query_args.get("field_filters", {}) attribute_filters = query_args.get("attribute_filters", {}) start = cint(query_args.start) if query_args.get("start") else 0 item_group = query_args.get("item_group") from_filters = query_args.get("from_filters") else: search, attribute_filters, item_group, from_filters = None, None, None, None field_filters = {} start = 0 # if new filter is checked, reset start to show filtered items from page 1 if from_filters: start = 0 sub_categories = [] if item_group: sub_categories = get_child_groups_for_website(item_group, immediate=True) engine = ProductQuery() try: result = engine.query( attribute_filters, field_filters, search_term=search, start=start, item_group=item_group ) except Exception: traceback = frappe.get_traceback() frappe.log_error(traceback, frappe._("Product Engine Error")) return {"exc": "Something went wrong!"} # discount filter data filters = {} discounts = result["discounts"] if discounts: filter_engine = ProductFiltersBuilder() filters["discount_filters"] = filter_engine.get_discount_filters(discounts) return { "items": result["items"] or [], "filters": filters, "settings": engine.settings, "sub_categories": sub_categories, "items_count": result["items_count"] } @frappe.whitelist(allow_guest=True)
89,421
290,303
35
homeassistant/components/mqtt/light/schema_basic.py
13
7
async def async_turn_on(self, **kwargs): # noqa: C901 should
Use `_attr_` for MQTT light (#81465) * Schema basic * Schema json * Schema template * add color_mode - follow up comments * Fix regression * Follow up comments 2 * Fix mypy errors * Update homeassistant/components/mqtt/light/schema_template.py Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
async_turn_on
d66d079330b92c02c38fb1c9dca539617161fdbc
core
schema_basic.py
8
126
https://github.com/home-assistant/core.git
43
909
0
12
36
Python
{ "docstring": "Turn the device on.\n\n This method is a coroutine.\n ", "language": "en", "n_whitespaces": 23, "n_words": 9, "vocab_size": 9 }
async def async_turn_on(self, **kwargs): # noqa: C901 should_update = False on_command_type = self._config[CONF_ON_COMMAND_TYPE]
117,683
321,374
54
tests/unit/keyinput/test_keyutils.py
19
16
def test_fake_mac(self, modifiers, expected): seq = keyutils.KeySequence() info = keyutils.KeyInfo(key=Qt.K
Run scripts/dev/rewrite_enums.py
test_fake_mac
0877fb0d78635692e481c8bde224fac5ad0dd430
qutebrowser
test_keyutils.py
11
5
https://github.com/qutebrowser/qutebrowser.git
1
65
0
17
102
Python
{ "docstring": "Make sure Control/Meta are swapped with a simulated Mac.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_fake_mac(self, modifiers, expected): seq = keyutils.KeySequence() info = keyutils.KeyInfo(key=Qt.Key.Key_A, modifiers=modifiers) new = seq.append_event(info.to_event()) assert new[0] == keyutils.KeyInfo(Qt.Key.Key_A, expected)
628
4,171
25
airbyte-integrations/connectors/source-recurly/source_recurly/streams.py
11
5
def default_params(self) -> dict: return {"order": "asc", "sort": self.sort_key, "limit": self.limit}
🎉 Recurly Schema Revamp (#9866) * Cleanup Recurly connector schemas * Add more Recurly schemas to the connector - `billing_infos` - `shipping_addresses` - `shipping_methods` - `subscription_changes` * Add Recurly `add-on` resouce * Add Recurly's account notes resource schema * Add unique coupons to Recurly source * Add credit payments to Recurly connector * Add Recurly resources to integration tests configurations * Bump Recurly source version to `0.4.0` * Add `line_items` Recurly resource * Add `line_items` to Recurly documentation * Add missing `line_items` JSON schema * Replace Subscription Change Recurly API call with Subscription `pending_changes` field * Replace Recurly unique coupon codes API call with coupons `unique_coupon` field To avoid the extra API call to import unique coupon calls * Revert "Replace Recurly unique coupon codes API call with coupons `unique_coupon` field" This reverts commit 1c4592d82da3c5e5e0026dda8eb2ed7a896ac5b8. * Add `end_time` parameter to Recurly connector * Order Recurly specs * Set the Recurly `begin_time` and `end_time` to be optional * Add `order` to Recurly `source_spec.yaml` * Add `maxLength` to Recurly source schemas * Set `maxLength` for Recurly Subscription and Transaction `uuid` * Fix Recurly `export_dates` acceptance tests
default_params
63af98e3b999d4b223237b51472a819915c5a558
airbyte
streams.py
8
5
https://github.com/airbytehq/airbyte.git
1
26
0
11
49
Python
{ "docstring": "\n Returns the parameters to be sent together with the API call to Recurly\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 11 }
def default_params(self) -> dict: return {"order": "asc", "sort": self.sort_key, "limit": self.limit}
33,312
144,810
154
python/ray/serve/deployment_state.py
69
11
def _should_start_new_health_check(self) -> bool: if self._health_check_ref is not None: # There's already an active health check. return False # If there's no active health check, kick off another and reset # the timer if it's been long enough since the last health # check. Add some randomness to avo
[serve] Improve health check failure semantics (#22297)
_should_start_new_health_check
610930ae6aeafb37be75851a8c1b9ff39d5f7d22
ray
deployment_state.py
9
16
https://github.com/ray-project/ray.git
2
51
0
55
81
Python
{ "docstring": "Determines if a new health check should be kicked off.\n\n A health check will be started if:\n 1) There is not already an active health check.\n 2) It has been more than self._health_check_period_s since the\n previous health check was *started*.\n\n This assumes that self._health_check_ref is reset to `None` when an\n active health check succeeds or fails (due to returning or timeout).\n ", "language": "en", "n_whitespaces": 125, "n_words": 61, "vocab_size": 48 }
def _should_start_new_health_check(self) -> bool: if self._health_check_ref is not None: # There's already an active health check. return False # If there's no active health check, kick off another and reset # the timer if it's been long enough since the last health # check. Add some randomness to avoid synchronizing across all # replicas. time_since_last = time.time() - self._last_health_check_time randomized_period = self._health_check_period_s * random.uniform(0.9, 1.1) return time_since_last > randomized_period
6,885
37,910
104
src/transformers/trainer_pt_utils.py
64
14
def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleas
Ensure tensors are at least 1d for pad and concat (#17179) * Ensure tensors are at least 1d for pad and concat * Compatibility * Fix * Fix * Add test * Retrigger CI * Consistency with master * Retrigger CI
numpy_pad_and_concatenate
47412c7d434f6ddfc02a9b7ecd6182b86ae0a164
transformers
trainer_pt_utils.py
12
10
https://github.com/huggingface/transformers.git
3
162
0
49
242
Python
{ "docstring": "Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result
10,926
53,859
146
src/prefect/task_runners.py
32
6
def _ray(self) -> "ray": global ray if r
First draft `RayTaskRunner` implementation
_ray
f97603bba836c215e153d7d3d5b3b9de4d0ae822
prefect
task_runners.py
13
14
https://github.com/PrefectHQ/prefect.git
3
33
0
29
61
Python
{ "docstring": "\n Delayed import of `ray` allowing configuration of the task runner\n without the extra installed and improves `prefect` import times.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 16 }
def _ray(self) -> "ray": global ray if ray is None: try: import ray except ImportError as exc: raise RuntimeError( "Using the `RayTaskRunner` requires `ray` to be installed." ) from exc return ray
89,349
290,231
90
homeassistant/components/zwave_js/climate.py
18
10
def temperature_unit(self) -> str: if ( self._unit_value and self._unit_v
Use enums instead of deprecated constants (#81591)
temperature_unit
9a747bafa398185eb3d4fe041c52acfbb8264372
core
climate.py
13
9
https://github.com/home-assistant/core.git
4
45
0
16
75
Python
{ "docstring": "Return the unit of measurement used by the platform.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def temperature_unit(self) -> str: if ( self._unit_value and self._unit_value.metadata.unit and "f" in self._unit_value.metadata.unit.lower() ): return UnitOfTemperature.FAHRENHEIT return UnitOfTemperature.CELSIUS
23,870
109,993
544
examples/text_labels_and_annotations/angles_on_bracket_arrows.py
221
48
def get_point_of_rotated_vertical(origin, line_length, degrees): rad = np.deg2rad(-degrees) return [origin[0] + line_length * np.sin(rad), origin[1] + line_length * np.cos(rad)] fig, ax = plt.subplots(figsize=(8, 7)) ax.set(xlim=(0, 6), ylim=(-1, 4)) ax.set_title("Orientation of the bracket arrows relative to angleA and angleB") for i, style in enumerate(["]-[", "|-|"]): for j, angle in enumerate([-40, 60]): y = 2*i + j arrow_centers = ((1, y), (5, y)) vlines = ((1, y + 0.5), (5, y + 0.5)) anglesAB = (angle, -angle) bracketstyle = f"{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}" bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle, mutation_scale=42) ax.add_patch(bracket) ax.text(3, y + 0.05, bracketstyle, ha="center", va="bottom") ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines], linestyles="--", color="C0") # Get the top coordinates for the drawn patches at A and B patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle) for center, angle in zip(arrow_centers, anglesAB)] # Define the connection directions for the annotation arrows connection_dirs = (1, -1) if angle > 0 else (-1, 1) # Add arrows and annotation text arrowstyle = "Simple, tail_width=0.5, head_width=4, head_length=8" for vline, dir, patch_top, angle in zip(vlines, connection_dirs, patch_tops, anglesAB): kw = dict(connectionstyle=f"arc3,rad={dir * 0.5}", arrowst
Updated Angles on Bracket arrow styles example to make angles clear #23176 (#24145) * removed AngleAnnotation from angle_on_bracket_arrow example * Fixes indentation mistake. * rebase to main, remove conflicting commit
get_point_of_rotated_vertical
f15aeee5e8d380c2ea04bcbed202a8940a7db1d0
matplotlib
angles_on_bracket_arrows.py
16
4
https://github.com/matplotlib/matplotlib.git
1
49
0
150
608
Python
{ "docstring": "Return xy coordinates of the vertical line end rotated by degrees.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def get_point_of_rotated_vertical(origin, line_length, degrees): rad = np.deg2rad(-degrees) return [origin[0] + line_length * np.sin(rad), origin[1] + line_length * np.cos(rad)] fig, ax = plt.subplots(figsize=(8, 7)) ax.set(xlim=(0, 6), ylim=(-1, 4)) ax.set_title("Orientation of the bracket arrows relative to angleA and angleB") for i, style in enumerate(["]-[", "|-|"]): for j, angle in enumerate([-40, 60]): y = 2*i + j arrow_centers = ((1, y), (5, y)) vlines = ((1, y + 0.5), (5, y + 0.5)) anglesAB = (angle, -angle) bracketstyle = f"{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}" bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle, mutation_scale=42) ax.add_patch(bracket) ax.text(3, y + 0.05, bracketstyle, ha="center", va="bottom") ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines], linestyles="--", color="C0") # Get the top coordinates for the drawn patches at A and B patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle) for center, angle in zip(arrow_centers, anglesAB)] # Define the connection directions for the annotation arrows connection_dirs = (1, -1) if angle > 0 else (-1, 1) # Add arrows and annotation text arrowstyle = "Simple, tail_width=0.5, head_width=4, head_length=8" for vline, dir, patch_top, angle in zip(vlines, connection_dirs, patch_tops, anglesAB): kw = dict(connectionstyle=f"arc3,rad={dir * 0.5}", arrowstyle=arrowstyle, color="C0") ax.add_patch(FancyArrowPatch(vline, patch_top, **kw)) ax.text(vline[0] - dir * 0.15, y + 0.3, f'{angle}°', ha="center", va="center") ############################################################################# # # .. admonition:: References # # The use of the following functions, methods, classes and modules is shown # in this example: # # - `matplotlib.patches.ArrowStyle`
12,977
62,436
19
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/_tokenizer.py
5
5
def processEntityInAttribute(self, allowedChar): self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
upd; format
processEntityInAttribute
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
_tokenizer.py
8
2
https://github.com/jindongwang/transferlearning.git
1
20
0
5
33
Python
{ "docstring": "This method replaces the need for \"entityInAttributeValueState\".\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def processEntityInAttribute(self, allowedChar): self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
49,638
200,424
148
sympy/physics/secondquant.py
67
21
def _get_ordered_dummies(mul, verbose=False): # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' d
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
_get_ordered_dummies
24f1e7730119fe958cc8e28411f790c9a5ec04eb
sympy
secondquant.py
12
26
https://github.com/sympy/sympy.git
13
258
0
43
211
Python
{ "docstring": "Returns all dummies in the mul sorted in canonical order.\n\n Explanation\n ===========\n\n The purpose of the canonical ordering is that dummies can be substituted\n consistently across terms with the result that equivalent terms can be\n simplified.\n\n It is not possible to determine if two terms are equivalent based solely on\n the dummy order. However, a consistent substitution guided by the ordered\n dummies should lead to trivially (non-)equivalent terms, thereby revealing\n the equivalence. This also means that if two terms have identical sequences of\n dummies, the (non-)equivalence should already be apparent.\n\n Strategy\n --------\n\n The canonical order is given by an arbitrary sorting rule. A sort key\n is determined for each dummy as a tuple that depends on all factors where\n the index is present. The dummies are thereby sorted according to the\n contraction structure of the term, instead of sorting based solely on the\n dummy symbol itself.\n\n After all dummies in the term has been assigned a key, we check for identical\n keys, i.e. unorderable dummies. If any are found, we call a specialized\n method, _determine_ambiguous(), that will determine a unique order based\n on recursive calls to _get_ordered_dummies().\n\n Key description\n ---------------\n\n A high level description of the sort key:\n\n 1. Range of the dummy index\n 2. Relation to external (non-dummy) indices\n 3. Position of the index in the first factor\n 4. Position of the index in the second factor\n\n The sort key is a tuple with the following components:\n\n 1. A single character indicating the range of the dummy (above, below\n or general.)\n 2. A list of strings with fully masked string representations of all\n factors where the dummy is present. By masked, we mean that dummies\n are represented by a symbol to indicate either below fermi, above or\n general. No other information is displayed about the dummies at\n this point. The list is sorted stringwise.\n 3. An integer number indicating the position of the index, in the first\n factor as sorted in 2.\n 4. An integer number indicating the position of the index, in the second\n factor as sorted in 2.\n\n If a factor is either of type AntiSymmetricTensor or SqOperator, the index\n position in items 3 and 4 is indicated as 'upper' or 'lower' only.\n (Creation operators are considered upper and annihilation operators lower.)\n\n If the masked factors are identical, the two factors cannot be ordered\n unambiguously in item 2. In this case, items 3, 4 are left out. If several\n indices are contracted between the unorderable factors, it will be handled by\n _determine_ambiguous()\n\n\n ", "language": "en", "n_whitespaces": 650, "n_words": 415, "vocab_size": 207 }
def _get_ordered_dummies(mul, verbose=False): # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' dum_repr = {d: __kprint(d) for d in all_dums}
9,378
48,153
385
airflow/providers/amazon/aws/example_dags/example_athena.py
107
51
def read_results_from_s3(query_execution_id): s3_hook = S3Hook() file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv') file_content = file_obj['Body'].read().decode('utf-8') print(file_content) QUERY_CREATE_TABLE = f QUERY_READ_TABLE = f QUERY_DROP_TABLE = f with DAG( dag_id='example_athena', schedule_interval=None, start_date=datetime(2021, 1, 1), tags=['example'], catchup=False, ) as dag: upload_sample_data = S3CreateObjectOperator( task_id='upload_sample_data', s3_bucket=S3_BUCKET, s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}', data=SAMPLE_DATA, replace=True, ) create_table = AthenaOperator( task_id='create_table', query=QUERY_CREATE_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [START howto_athena_operator] read_table = AthenaOperator( task_id='read_table', query=QUERY_READ_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{
Update the Athena Sample DAG and Docs (#23428) * Update the Athena Sample DAG and Docs
read_results_from_s3
46af5baba810a07eec395e89db08fc5dab175e23
airflow
example_athena.py
12
5
https://github.com/apache/airflow.git
1
48
0
63
462
Python
{ "docstring": "\nCREATE EXTERNAL TABLE IF NOT EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} ( `name` string, `age` int )\nROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'\nWITH SERDEPROPERTIES ( 'serialization.format' = ',', 'field.delim' = ','\n) LOCATION 's3://{S3_BUCKET}/{S3_KEY}/{ATHENA_TABLE}'\nTBLPROPERTIES ('has_encrypted_data'='false')\n\nSELECT * from {ATHENA_DATABASE}.{ATHENA_TABLE}\n\nDROP TABLE IF EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE}\n", "language": "en", "n_whitespaces": 33, "n_words": 40, "vocab_size": 32 }
def read_results_from_s3(query_execution_id): s3_hook = S3Hook() file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv') file_content = file_obj['Body'].read().decode('utf-8') print(file_content) QUERY_CREATE_TABLE = f QUERY_READ_TABLE = f QUERY_DROP_TABLE = f with DAG( dag_id='example_athena', schedule_interval=None, start_date=datetime(2021, 1, 1), tags=['example'], catchup=False, ) as dag: upload_sample_data = S3CreateObjectOperator( task_id='upload_sample_data', s3_bucket=S3_BUCKET, s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}', data=SAMPLE_DATA, replace=True, ) create_table = AthenaOperator( task_id='create_table', query=QUERY_CREATE_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [START howto_athena_operator] read_table = AthenaOperator( task_id='read_table', query=QUERY_READ_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [END howto_athena_operator] # [START howto_athena_sensor] await_query = AthenaSensor( task_id='await_query', query_execution_id=read_table.output, ) # [END howto_athena_sensor] drop_table = AthenaOperator( task_id='drop_table', query=QUERY_DROP_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) remove_s3_files = S3DeleteObjectsOperator( task_id='remove_s3_files', bucket=S3_BUCKET, prefix=S3_KEY, ) ( upload_sample_data >> create_table >> read_table >> await_query >> read_results_from_s3(read_table.output) >> drop_table >> remove_s3_files )
40,074
167,667
112
pandas/core/dtypes/common.py
37
12
def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = get_dtype(arr_or_dtype) except TypeError:
ENH: DTI/DTA.astype support non-nano (#47579) * ENH: DTI/DTA.astype support non-nano * whatsnew * GH ref * pyright fixup
is_datetime64_ns_dtype
67e8c4c3761ab1da4b0a341a472c0fe2ea393e8b
pandas
common.py
14
47
https://github.com/pandas-dev/pandas.git
6
63
0
29
106
Python
{ "docstring": "\n Check whether the provided array or dtype is of the datetime64[ns] dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n bool\n Whether or not the array or dtype is of the datetime64[ns] dtype.\n\n Examples\n --------\n >>> is_datetime64_ns_dtype(str)\n False\n >>> is_datetime64_ns_dtype(int)\n False\n >>> is_datetime64_ns_dtype(np.datetime64) # no unit\n False\n >>> is_datetime64_ns_dtype(DatetimeTZDtype(\"ns\", \"US/Eastern\"))\n True\n >>> is_datetime64_ns_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_ns_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=\"datetime64\")) # no unit\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=\"datetime64[ps]\")) # wrong unit\n False\n >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype=\"datetime64[ns]\"))\n True\n ", "language": "en", "n_whitespaces": 188, "n_words": 86, "vocab_size": 49 }
def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or ( isinstance(tipo, DatetimeTZDtype) and tipo._unit == "ns" )
@register.tag
50,267
203,239
290
django/template/defaulttags.py
131
15
def regroup(parser, token): bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") if bits[4] != 'as': raise Template
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
regroup
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
defaulttags.py
10
16
https://github.com/django/django.git
4
95
1
95
172
Python
{ "docstring": "\n Regroup a list of alike objects by a common attribute.\n\n This complex tag is best illustrated by use of an example: say that\n ``musicians`` is a list of ``Musician`` objects that have ``name`` and\n ``instrument`` attributes, and you'd like to display a list that\n looks like:\n\n * Guitar:\n * Django Reinhardt\n * Emily Remler\n * Piano:\n * Lovie Austin\n * Bud Powell\n * Trumpet:\n * Duke Ellington\n\n The following snippet of template code would accomplish this dubious task::\n\n {% regroup musicians by instrument as grouped %}\n <ul>\n {% for group in grouped %}\n <li>{{ group.grouper }}\n <ul>\n {% for musician in group.list %}\n <li>{{ musician.name }}</li>\n {% endfor %}\n </ul>\n {% endfor %}\n </ul>\n\n As you can see, ``{% regroup %}`` populates a variable with a list of\n objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the\n item that was grouped by; ``list`` contains the list of objects that share\n that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano``\n and ``Trumpet``, and ``list`` is the list of musicians who play this\n instrument.\n\n Note that ``{% regroup %}`` does not work when the list to be grouped is not\n sorted by the key you are grouping by! This means that if your list of\n musicians was not sorted by instrument, you'd need to make sure it is sorted\n before using it, i.e.::\n\n {% regroup musicians|dictsort:\"instrument\" by instrument as grouped %}\n ", "language": "en", "n_whitespaces": 478, "n_words": 230, "vocab_size": 128 }
def regroup(parser, token): bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") if bits[4] != 'as': raise TemplateSyntaxError( "next-to-last argument to 'regroup' tag must be 'as'" ) var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag
5,427
30,242
42
spotdl/types/saved.py
14
7
def create_basic_list(cls) -> "Saved": metadata
fixed arguments for frozen env fixed pylint errors fixed arguments black fixed argument parser for all scenarios black docs black
create_basic_list
773398048b7990ab58e2998fe4d15355f7998774
spotify-downloader
saved.py
9
10
https://github.com/spotDL/spotify-downloader.git
1
39
0
13
70
Python
{ "docstring": "\n Create a basic list with only the required metadata and urls.\n\n ### Returns\n - The Saved object.\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 17 }
def create_basic_list(cls) -> "Saved": metadata = cls.get_metadata("saved") urls = cls.get_urls("saved") return cls(**metadata, urls=urls, songs=[])
95,675
296,701
49
tests/common.py
23
5
def assert_lists_same(a, b): assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a
Mark device actions from hidden or auxiliary entities as secondary (#70278)
assert_lists_same
64381acbaf2930cda5dfa538d00bfa9f5172e690
core
common.py
8
6
https://github.com/home-assistant/core.git
3
36
0
14
57
Python
{ "docstring": "Compare two lists, ignoring order.\n\n Check both that all items in a are in b and that all items in b are in a,\n otherwise assert_lists_same([\"1\", \"1\"], [\"1\", \"2\"]) could be True.\n ", "language": "en", "n_whitespaces": 41, "n_words": 32, "vocab_size": 24 }
def assert_lists_same(a, b): assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a
36,771
156,780
453
dask/dataframe/tests/test_format.py
100
20
def test_dataframe_format_with_index(): pytest.importorskip("jinja2") df = pd.DataFrame( { "A": [1, 2, 3, 4, 5, 6, 7, 8], "B": list("ABCDEFGH"), "C": pd.Categorical(list("AAABBBCC")), }, index=list("ABCDEFGH"), ) ddf = dd.from_pandas(df, 3) exp = ( "Dask DataFrame Structure:\n" " A B C\n" "npartitions=3 \n" "A int64 object category[known]\n" "D ... ... ...\n" "G ... ... ...\n" "H ... ... ...\n" "Dask Name: from_pandas, 1 graph layer" ) assert repr(ddf) == exp assert str(ddf) == exp exp_table = exp = .format( exp_table=exp_table ) assert ddf.to_html() == exp # table is boxed with div and has style exp = .format( style=style, exp_table=exp_table ) assert ddf._repr_html_() == exp
Change repr methods to avoid Layer materialization (#9289) * change task count to layer count in DataFrame and Array reprs * add test * address doctest failure * simplify test * support pluralization * use 'graph layers' instead of 'layers' to be more explicit
test_dataframe_format_with_index
ddcb841903f8f180aa359bd8db0054aa3b5964e3
dask
test_format.py
15
79
https://github.com/dask/dask.git
1
145
0
70
259
Python
{ "docstring": "<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>A</th>\n <th>B</th>\n <th>C</th>\n </tr>\n <tr>\n <th>npartitions=3</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>A</th>\n <td>int64</td>\n <td>object</td>\n <td>category[known]</td>\n </tr>\n <tr>\n <th>D</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>G</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>H</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n </tbody>\n</table><div><strong>Dask DataFrame Structure:</strong></div>\n{exp_table}\n<div>Dask Name: from_pandas, 1 graph layer</div><div><strong>Dask DataFrame Structure:</strong></div>\n<div>\n{style}{exp_table}\n</div>\n<div>Dask Name: from_pandas, 1 graph layer</div>", "language": "en", "n_whitespaces": 218, "n_words": 66, "vocab_size": 38 }
def test_dataframe_format_with_index(): pytest.importorskip("jinja2") df = pd.DataFrame( { "A": [1, 2, 3, 4, 5, 6, 7, 8], "B": list("ABCDEFGH"), "C": pd.Categorical(list("AAABBBCC")), }, index=list("ABCDEFGH"), ) ddf = dd.from_pandas(df, 3) exp = ( "Dask DataFrame Structure:\n" " A B C\n" "npartitions=3 \n" "A int64 object category[known]\n" "D ... ... ...\n" "G ... ... ...\n" "H ... ... ...\n" "Dask Name: from_pandas, 1 graph layer" ) assert repr(ddf) == exp assert str(ddf) == exp exp_table = exp = .format( exp_table=exp_table ) assert ddf.to_html() == exp # table is boxed with div and has style exp = .format( style=style, exp_table=exp_table ) assert ddf._repr_html_() == exp
51,942
207,372
88
tests/admin_scripts/tests.py
28
15
def test_run_from_argv_closes_connections(self):
Refs #33476 -- Reformatted code with Black.
test_run_from_argv_closes_connections
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
11
7
https://github.com/django/django.git
1
61
0
26
111
Python
{ "docstring": "\n A command called from the command line should close connections after\n being executed (#21255).\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
def test_run_from_argv_closes_connections(self): command = BaseCommand() command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch("django.core.management.base.connections") as mock_connections: command.run_from_argv(["", ""]) # Test connections have been closed self.assertTrue(mock_connections.close_all.called)
26,025
117,547
1,445
tests/unit/test_project_structure.py
536
31
def test_version_managing(self, data_handler): # set up df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(data_handler, name='pg', tables={'tasks': df}) # ================= retrain cycles ===================== # create folder self.run_sql('create database proj') # -- create model -- self.run_sql( ) self.wait_predictor('proj', 'task_model') assert data_handler().native_query.call_args[0][0] == 'select * from tasks' # tag works in create model ret = self.run_sql('select * from proj.models') assert ret['TAG'][0] == 'first' # use model ret = self.run_sql() assert len(ret) == 3 assert ret.predicted[0] == 42 # -- retrain predictor with tag -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'second'}) # get current model ret = self.run_sql('select * from proj.models') # check target assert ret['PREDICT'][0] == 'b' # check label a
update and delete model version renaming (predictor->model)
test_version_managing
3f1a5c30c2ccbd78b21f1f41b7dfdfca87bb7135
mindsdb
test_project_structure.py
13
130
https://github.com/mindsdb/mindsdb.git
5
716
0
173
1,293
Python
{ "docstring": "\n CREATE PREDICTOR proj.task_model\n from pg (select * from tasks)\n PREDICT a\n using engine='dummy_ml', tag = 'first'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT b\n using tag = 'second'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT a\n using tag='third', active=0\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model.3 as m\n \n update proj.models_versions \n set active=1\n where version=1 and name='task_model' \n \n delete from proj.models_versions \n where version=2 \n and name='task_model'\n \n delete from proj.models_versions \n where version=3 \n and model='task_model'\n ", "language": "en", "n_whitespaces": 654, "n_words": 109, "vocab_size": 43 }
def test_version_managing(self, data_handler): # set up df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(data_handler, name='pg', tables={'tasks': df}) # ================= retrain cycles ===================== # create folder self.run_sql('create database proj') # -- create model -- self.run_sql( ) self.wait_predictor('proj', 'task_model') assert data_handler().native_query.call_args[0][0] == 'select * from tasks' # tag works in create model ret = self.run_sql('select * from proj.models') assert ret['TAG'][0] == 'first' # use model ret = self.run_sql() assert len(ret) == 3 assert ret.predicted[0] == 42 # -- retrain predictor with tag -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'second'}) # get current model ret = self.run_sql('select * from proj.models') # check target assert ret['PREDICT'][0] == 'b' # check label assert ret['TAG'][0] == 'second' # check integration sql assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2' # use model ret = self.run_sql() assert ret.predicted[0] == 42 # used model has tag 'second' models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # -- retrain again with active=0 -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'third'}) ret = self.run_sql('select * from proj.models') # check target is from previous retrain assert ret['PREDICT'][0] == 'b' # use model ret = self.run_sql() # used model has tag 'second' (previous) models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # ================ working with inactive versions ================= # run 3st version model and check used model version ret = self.run_sql() models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # one-line query model by version ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2') model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # not existing version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.4 where a=1 and b=2', ) assert 'does not exists' in str(exc_info.value) # ================== managing versions ========================= # show models command # Show models <from | in> <project> where <expr> ret = self.run_sql('Show models') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models from proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models in proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models where name='task_model'") assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models from proj where name='xxx'") assert len(ret) == 0 # ---------------- # See all versions ret = self.run_sql('select * from proj.models_versions') # we have all tags in versions assert set(ret['TAG']) == {'first', 'second', 'third'} # Set active selected version self.run_sql() # get active version ret = self.run_sql('select * from proj.models_versions where active = 1') assert ret['TAG'][0] == 'first' # use active version ? # Delete specific version self.run_sql() # deleted version not in list ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 2 assert 'second' not in ret['TAG'] # try to use deleted version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.2 where a=1', ) assert 'does not exists' in str(exc_info.value) # exception with deleting active version with pytest.raises(Exception) as exc_info: self.run_sql() assert 'is not found' in str(exc_info.value) # drop predictor and check model is deleted and no versions self.run_sql('drop predictor proj.task_model') ret = self.run_sql('select * from proj.models') assert len(ret) == 0 ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 0
@functools.lru_cache(maxsize=None)
2,979
19,462
58
pipenv/patched/notpip/_internal/locations/__init__.py
25
12
def _looks_like_red_hat_lib() -> bool: from distutils.command.install import INSTALL_SCHEMES # type: ignore return all( k in INSTALL_SCHEMES and _looks_like_red_hat_patched_platlib
Vendor in pip 21.2.4 release (from pip 21.2.2 prior). (#5009) * Vendor in pip 21.2.4 release (from pip 21.2.2 prior). * Add news fragment for pip 21.2.4 vendor update. * Add potentially missing LICENSE files
_looks_like_red_hat_lib
7e33fcae4384563b4c927fd44318c29dd524a097
pipenv
__init__.py
11
11
https://github.com/pypa/pipenv.git
3
38
1
22
79
Python
{ "docstring": "Red Hat patches platlib in unix_prefix and unix_home, but not purelib.\n\n This is the only way I can see to tell a Red Hat-patched Python.\n ", "language": "en", "n_whitespaces": 31, "n_words": 25, "vocab_size": 24 }
def _looks_like_red_hat_lib() -> bool: from distutils.command.install import INSTALL_SCHEMES # type: ignore return all( k in INSTALL_SCHEMES and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) for k in ("unix_prefix", "unix_home") ) @functools.lru_cache(maxsize=None)
15,958
73,144
331
wagtail/contrib/modeladmin/helpers/permission.py
83
27
def get_valid_parent_pages(self, user): # Get queryset of pages where this page type can be added allowed_parent_page_content_types = list( ContentType.objects.get_for_models( *self.model.allowed_parent_page_models() ).values() ) allowed_parent_pages = Page.objects.filter( content_type__in=allowed_parent_page_content_types ) # Get queryset of pages where the user has permission to add subpages if user.is_superuser: pages_where_user_can_add = Page.objects.all() else: pages_where_user_can_add = Page.objects.none() user_perms = UserPagePermissionsProxy(user)
Reformat with black
get_valid_parent_pages
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
permission.py
16
19
https://github.com/wagtail/wagtail.git
3
109
0
58
184
Python
{ "docstring": "\n Identifies possible parent pages for the current user by first looking\n at allowed_parent_page_models() on self.model to limit options to the\n correct type of page, then checking permissions on those individual\n pages to make sure we have permission to add a subpage to it.\n ", "language": "en", "n_whitespaces": 79, "n_words": 43, "vocab_size": 36 }
def get_valid_parent_pages(self, user): # Get queryset of pages where this page type can be added allowed_parent_page_content_types = list( ContentType.objects.get_for_models( *self.model.allowed_parent_page_models() ).values() ) allowed_parent_pages = Page.objects.filter( content_type__in=allowed_parent_page_content_types ) # Get queryset of pages where the user has permission to add subpages if user.is_superuser: pages_where_user_can_add = Page.objects.all() else: pages_where_user_can_add = Page.objects.none() user_perms = UserPagePermissionsProxy(user) for perm in user_perms.permissions.filter(permission_type="add"): # user has add permission on any subpage of perm.page # (including perm.page itself) pages_where_user_can_add |= Page.objects.descendant_of( perm.page, inclusive=True ) # Combine them return allowed_parent_pages & pages_where_user_can_add
14,149
66,255
22
erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
35
19
def get_attendance_list(conditions, filters): attendance_list = frapp
style: format code with black
get_attendance_list
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
monthly_attendance_sheet.py
13
15
https://github.com/frappe/erpnext.git
3
94
0
31
149
Python
{ "docstring": "select employee, day(attendance_date) as day_of_month,\n\t\tstatus from tabAttendance where docstatus = 1 %s order by employee, attendance_date", "language": "en", "n_whitespaces": 15, "n_words": 17, "vocab_size": 16 }
def get_attendance_list(conditions, filters): attendance_list = frappe.db.sql( % conditions, filters, as_dict=1, ) if not attendance_list: msgprint(_("No attendance record found"), alert=True, indicator="orange") att_map = {} for d in attendance_list: att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, "") att_map[d.employee][d.day_of_month] = d.status return att_map
@add_start_docstrings( """ RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks. """, REMBERT_START_DOCSTRING, )
6,559
35,987
60
src/transformers/models/rembert/modeling_tf_rembert.py
25
12
def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, bea
TF generate refactor - past without encoder outputs (#15944) * Remove packed past from generation_tf_utils * update models with the new past format * update template accordingly
_reorder_cache
70203b59379b1841013980b6941bddfd34bfe816
transformers
modeling_tf_rembert.py
14
5
https://github.com/huggingface/transformers.git
3
42
1
21
76
Python
{ "docstring": "\n RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 14 }
def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) return reordered_past @add_start_docstrings( , REMBERT_START_DOCSTRING, )
19,756
100,089
223
tests/sentry/api/endpoints/test_team_details.py
66
27
def test_remove_as_admin_not_in_team(self): # an org with closed membership (byproduct of flags=0) org = self.create_organization(owner=self.user, flags=0) team = self.create_team(organization=org) admin_user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member( organization=org, user=admin_user, role="admin", teams=[], # note that admin_user isn't a member of `team` ) self.login_as(admin_user) # first, try deleting the team with open membership off self.get_error_response(team.organization.slug, team.slug, status_code=403) self.assert_team_not_deleted(
ref(tests): Remove `get_valid_response()` (#34822)
test_remove_as_admin_not_in_team
096b5511e244eecd8799b2a0324655207ce8985e
sentry
test_team_details.py
10
17
https://github.com/getsentry/sentry.git
1
138
0
49
221
Python
{ "docstring": "Admins can't remove teams of which they're not a part, unless\n open membership is on.", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 15 }
def test_remove_as_admin_not_in_team(self): # an org with closed membership (byproduct of flags=0) org = self.create_organization(owner=self.user, flags=0) team = self.create_team(organization=org) admin_user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member( organization=org, user=admin_user, role="admin", teams=[], # note that admin_user isn't a member of `team` ) self.login_as(admin_user) # first, try deleting the team with open membership off self.get_error_response(team.organization.slug, team.slug, status_code=403) self.assert_team_not_deleted(team.id) # now, with open membership on org.flags.allow_joinleave = True org.save() self.get_success_response(team.organization.slug, team.slug, status_code=204) self.assert_team_deleted(team.id)
29,474
131,087
239
python/ray/tests/aws/test_aws_batch_tag_update.py
61
29
def batch_test(num_threads, delay): with mock.patch( "ray.autoscaler._private.aws.node_provider.make_ec2_client" ), mock.patch.object(AWSNodeProvider, "_create_tags", mock_create_tags): provider = AWSNodeProvider( provider_config={"region": "nowhere"}, cluster_name="default" ) provider.batch_counter = 0 provider.tag_update_count
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
batch_test
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
test_aws_batch_tag_update.py
17
22
https://github.com/ray-project/ray.git
5
154
0
43
256
Python
{ "docstring": "Run AWSNodeProvider.set_node_tags in several threads, with a\n specified delay between thread launches.\n\n Return the number of batches of tag updates and the number of tags\n updated.\n ", "language": "en", "n_whitespaces": 38, "n_words": 26, "vocab_size": 22 }
def batch_test(num_threads, delay): with mock.patch( "ray.autoscaler._private.aws.node_provider.make_ec2_client" ), mock.patch.object(AWSNodeProvider, "_create_tags", mock_create_tags): provider = AWSNodeProvider( provider_config={"region": "nowhere"}, cluster_name="default" ) provider.batch_counter = 0 provider.tag_update_counter = 0 provider.tag_cache = {str(x): {} for x in range(num_threads)} threads = [] for x in range(num_threads): thread = threading.Thread( target=provider.set_node_tags, args=(str(x), {"foo": "bar"}) ) threads.append(thread) for thread in threads: thread.start() time.sleep(delay) for thread in threads: thread.join() return provider.batch_counter, provider.tag_update_counter
@PLUGIN_LAYERS.register_module()
70,215
244,048
142
mmdet/models/plugins/pixel_decoder.py
42
22
def forward(self, feats, img_metas): y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_fpn = self.lateral_convs[i](x) y = cur_fpn + \ F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module()
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
forward
cac356380d505bf15587f07c0529218cc36b9652
mmdetection
pixel_decoder.py
15
11
https://github.com/open-mmlab/mmdetection.git
2
113
1
32
186
Python
{ "docstring": "\n Args:\n feats (list[Tensor]): Feature maps of each level. Each has\n shape of (batch_size, c, h, w).\n img_metas (list[dict]): List of image information. Pass in\n for creating more accurate padding mask. Not used here.\n\n Returns:\n tuple: a tuple containing the following:\n\n - mask_feature (Tensor): Shape (batch_size, c, h, w).\n - memory (Tensor): Output of last stage of backbone.\\\n Shape (batch_size, c, h, w).\n ", "language": "en", "n_whitespaces": 196, "n_words": 62, "vocab_size": 47 }
def forward(self, feats, img_metas): y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_fpn = self.lateral_convs[i](x) y = cur_fpn + \ F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module()
89,709
290,594
97
tests/components/bluetooth/test_models.py
23
12
async def test_remote_scanner_expires_non_connectable(hass): manager = _get_manager() switchbot_device = BLEDevice(
Move bluetooth remote scanner implementation into a base class (#82012)
test_remote_scanner_expires_non_connectable
f584efa0c24df19ef1f805ecf95a95cecec5ff99
core
test_models.py
12
64
https://github.com/home-assistant/core.git
1
301
0
19
95
Python
{ "docstring": "Test the remote scanner expires stale non connectable data.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def test_remote_scanner_expires_non_connectable(hass): manager = _get_manager() switchbot_device = BLEDevice( "44:44:33:11:23:45", "wohand", {}, rssi=-100, ) switchbot_device_adv = generate_advertisement_data( local_name="wohand", service_uuids=[], manufacturer_data={1: b"\x01"}, rssi=-100, )
78,972
267,605
73
lib/ansible/plugins/inventory/toml.py
30
9
def convert_yaml_objects_to_native(obj): if isinstance(obj, dict):
Support for Python 3.11+ tomllib for inventory (#77435)
convert_yaml_objects_to_native
bcdc2e167af61cf978e589c753764f76e301a6fa
ansible
toml.py
12
9
https://github.com/ansible/ansible.git
6
72
0
21
113
Python
{ "docstring": "Older versions of the ``toml`` python library, and tomllib, don't have\n a pluggable way to tell the encoder about custom types, so we need to\n ensure objects that we pass are native types.\n\n Used with:\n - ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing\n - ``tomli`` or ``tomllib``\n\n This function recurses an object and ensures we cast any of the types from\n ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing\n the data before we hand it over to the toml library.\n\n This function doesn't directly check for the types from ``ansible.parsing.yaml.objects``\n but instead checks for the types those objects inherit from, to offer more flexibility.\n ", "language": "en", "n_whitespaces": 138, "n_words": 101, "vocab_size": 76 }
def convert_yaml_objects_to_native(obj): if isinstance(obj, dict): return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items()) elif isinstance(obj, list): return [convert_yaml_objects_to_native(v) for v in obj] elif isinstance(obj, text_type): return text_type(obj) else: return obj
437
3,302
274
python/prophet/forecaster.py
94
19
def make_future_dataframe(self, periods, freq='D', include_history=True): if self.history_dates is None: raise Exception('Model has not been fit.') if freq is None: # taking the tail makes freq inference more reliable freq = pd.infer_freq(self.history_dates.tail(5)) # returns None if inference failed if freq is None: raise Exception('Unable to infer `freq`') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:per
Speed Up Uncertainty Predictions (#2186)
make_future_dataframe
8fbf8ba2a5bfcdb892e8ca596e338894614000b5
prophet
forecaster.py
14
17
https://github.com/facebook/prophet.git
5
135
0
66
223
Python
{ "docstring": "Simulate the trend using the extrapolated generative model.\n\n Parameters\n ----------\n periods: Int number of periods to forecast forward.\n freq: Any valid frequency for pd.date_range, such as 'D' or 'M'.\n include_history: Boolean to include the historical dates in the data\n frame for predictions.\n\n Returns\n -------\n pd.Dataframe that extends forward from the end of self.history for the\n requested number of periods.\n ", "language": "en", "n_whitespaces": 140, "n_words": 59, "vocab_size": 48 }
def make_future_dataframe(self, periods, freq='D', include_history=True): if self.history_dates is None: raise Exception('Model has not been fit.') if freq is None: # taking the tail makes freq inference more reliable freq = pd.infer_freq(self.history_dates.tail(5)) # returns None if inference failed if freq is None: raise Exception('Unable to infer `freq`') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:periods] # Return correct number of periods if include_history: dates = np.concatenate((np.array(self.history_dates), dates)) return pd.DataFrame({'ds': dates})
12,044
60,251
41
code/deep/BJMMD/caffe/python/caffe/io.py
16
13
def array_to_blobproto(arr, diff=None): blob = caffe_pb2.BlobProt
Balanced joint maximum mean discrepancy for deep transfer learning
array_to_blobproto
cc4d0564756ca067516f71718a3d135996525909
transferlearning
io.py
12
7
https://github.com/jindongwang/transferlearning.git
2
67
0
15
109
Python
{ "docstring": "Converts a N-dimensional array to blob proto. If diff is given, also\n convert the diff. You need to make sure that arr and diff have the same\n shape, and this function does not do sanity check.\n ", "language": "en", "n_whitespaces": 45, "n_words": 36, "vocab_size": 32 }
def array_to_blobproto(arr, diff=None): blob = caffe_pb2.BlobProto() blob.shape.dim.extend(arr.shape) blob.data.extend(arr.astype(float).flat) if diff is not None: blob.diff.extend(diff.astype(float).flat) return blob
72,114
248,122
767
tests/test_federation.py
145
27
def test_cross_signing_keys_retry(self): remote_user_id = "@john:test_remote" remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" # Register mock device list retrieval on the federation client. federation_client = self.homeserver.get_federation_client() federation_client.query_user_devices = Mock( return_value=make_awaitable( { "user_id": remote_user_id, "stream_id": 1, "devices": [], "master_key": { "user_id": remote_user_id, "usage": ["master"], "keys": {"ed25519:" + remote_master_key: remote_master_key}, }, "self_signing_key": { "user_id": remote_user_id, "usage": ["self_signing"], "keys": { "ed25
Prefer `make_awaitable` over `defer.succeed` in tests (#12505) When configuring the return values of mocks, prefer awaitables from `make_awaitable` over `defer.succeed`. `Deferred`s are only awaitable once, so it is inappropriate for a mock to return the same `Deferred` multiple times. Also update `run_in_background` to support functions that return arbitrary awaitables. Signed-off-by: Sean Quah <seanq@element.io>
test_cross_signing_keys_retry
78b99de7c206b106340e12cdee0af9aa246bd5ad
synapse
test_federation.py
19
45
https://github.com/matrix-org/synapse.git
1
263
0
87
464
Python
{ "docstring": "Tests that resyncing a device list correctly processes cross-signing keys from\n the remote server.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
def test_cross_signing_keys_retry(self): remote_user_id = "@john:test_remote" remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" # Register mock device list retrieval on the federation client. federation_client = self.homeserver.get_federation_client() federation_client.query_user_devices = Mock( return_value=make_awaitable( { "user_id": remote_user_id, "stream_id": 1, "devices": [], "master_key": { "user_id": remote_user_id, "usage": ["master"], "keys": {"ed25519:" + remote_master_key: remote_master_key}, }, "self_signing_key": { "user_id": remote_user_id, "usage": ["self_signing"], "keys": { "ed25519:" + remote_self_signing_key: remote_self_signing_key }, }, } ) ) # Resync the device list. device_handler = self.homeserver.get_device_handler() self.get_success( device_handler.device_list_updater.user_device_resync(remote_user_id), ) # Retrieve the cross-signing keys for this user. keys = self.get_success( self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), ) self.assertTrue(remote_user_id in keys) # Check that the master key is the one returned by the mock. master_key = keys[remote_user_id]["master"] self.assertEqual(len(master_key["keys"]), 1) self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys()) self.assertTrue(remote_master_key in master_key["keys"].values()) # Check that the self-signing key is the one returned by the mock. self_signing_key = keys[remote_user_id]["self_signing"] self.assertEqual(len(self_signing_key["keys"]), 1) self.assertTrue( "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), ) self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values())
8,783
46,113
239
tests/providers/databricks/operators/test_databricks.py
50
30
def test_exec_success(self, db_mock_class): run = { 'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, } op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run) db_mock = db_mock_class.return_value db_mock.submit_run.return_value = 1 db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '') op.execute(None) expected = databricks_operator._deep_string_coerce( {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID} ) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay, retry_args=None, ) db_mock.submit_run.assert_called_once_with(expected) db_mock.get_run_page_url.assert_called_once_with(RUN_ID) db_mock.get_run_state.assert_called_once_with(RUN_ID) assert RUN_ID == op.run_
Databricks hook - retry on HTTP Status 429 as well (#21852) * Databricks hook - retry on HTTP Status 429 as well this fixes #21559 * Reimplement retries using tenacity it's now uses exponential backoff by default
test_exec_success
12e9e2c695f9ebb9d3dde9c0f7dfaa112654f0d6
airflow
test_databricks.py
11
23
https://github.com/apache/airflow.git
1
137
0
41
224
Python
{ "docstring": "\n Test the execute function in case where the run is successful.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
def test_exec_success(self, db_mock_class): run = { 'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, } op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run) db_mock = db_mock_class.return_value db_mock.submit_run.return_value = 1 db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '') op.execute(None) expected = databricks_operator._deep_string_coerce( {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID} ) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay, retry_args=None, ) db_mock.submit_run.assert_called_once_with(expected) db_mock.get_run_page_url.assert_called_once_with(RUN_ID) db_mock.get_run_state.assert_called_once_with(RUN_ID) assert RUN_ID == op.run_id
7,513
42,253
62
seaborn/palettes.py
41
18
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 15 gray = _color_to_rgb
Convert color palette docstrings to notebooks (#3034) * Convert color palette docstrings to notebooks and rerun all with py310 kernel * Add v0.12.1 release notes to index * Improve failure mode when ipywidgets is not involved * Update palettes docstrings * Remove all other doctest-style examples * Remove doctest-oriented testing infrastructure * Mention in release notes * Skip colormap patch test on matplotlib's where it's not relevant * Use more robust approach to mpl backcompat
dark_palette
e644793f0ac2b1be178425f20f529121f37f29de
seaborn
palettes.py
10
7
https://github.com/mwaskom/seaborn.git
2
93
0
35
137
Python
{ "docstring": "Make a sequential palette that blends from dark to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_dark_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex, rgb-tuple, or html color name\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n .. include:: ../docstrings/dark_palette.rst\n\n ", "language": "en", "n_whitespaces": 328, "n_words": 201, "vocab_size": 128 }
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 15 gray = _color_to_rgb((h, gray_s, gray_l), input="husl") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap)
22,462
106,834
26
py/visdom/__init__.py
12
8
def contour(self, X, win=None, env=None, opts=None): return self._surface(X=X, stype="contour", opts=opts, win=win, env=env
apply black py to all python files
contour
5b8b7f267cfaf76a2a39a727ef31a62b3909a093
visdom
__init__.py
9
2
https://github.com/fossasia/visdom.git
1
45
0
12
66
Python
{ "docstring": "\n This function draws a contour plot. It takes as input an `NxM` tensor\n `X` that specifies the value at each location in the contour plot.\n\n The following `opts` are supported:\n\n - `opts.colormap`: colormap (`string`; default = `'Viridis'`)\n - `opts.xmin` : clip minimum value (`number`; default = `X:min()`)\n - `opts.xmax` : clip maximum value (`number`; default = `X:max()`)\n ", "language": "en", "n_whitespaces": 113, "n_words": 57, "vocab_size": 43 }
def contour(self, X, win=None, env=None, opts=None): return self._surface(X=X, stype="contour", opts=opts, win=win, env=env)
57,207
224,060
230
mkdocs/utils/__init__.py
87
16
def get_themes(): themes = {} eps = set(importlib_metadata.entry_points(group='mkdocs.themes')) builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'} for theme in eps: if theme.name in builtins and
Remove spaces at the ends of docstrings, normalize quotes
get_themes
e7f07cc82ab2be920ab426ba07456d8b2592714d
mkdocs
__init__.py
19
17
https://github.com/mkdocs/mkdocs.git
7
96
0
60
224
Python
{ "docstring": "Return a dict of all installed themes as {name: EntryPoint}.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def get_themes(): themes = {} eps = set(importlib_metadata.entry_points(group='mkdocs.themes')) builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'} for theme in eps: if theme.name in builtins and theme.dist.name != 'mkdocs': raise exceptions.ConfigurationError( f"The theme '{theme.name}' is a builtin theme but the package '{theme.dist.name}' " "attempts to provide a theme with the same name." ) elif theme.name in themes: log.warning( f"A theme named '{theme.name}' is provided by the Python packages '{theme.dist.name}' " f"and '{themes[theme.name].dist.name}'. The one in '{theme.dist.name}' will be used." ) themes[theme.name] = theme return themes
19,757
100,127
94
tests/sentry/api/endpoints/test_user_notification_details.py
15
16
def test_subscribe_by_default(self): NotificationSetting
ref(tests): Remove `get_valid_response()` (#34822)
test_subscribe_by_default
096b5511e244eecd8799b2a0324655207ce8985e
sentry
test_user_notification_details.py
9
9
https://github.com/getsentry/sentry.git
1
50
0
15
82
Python
{ "docstring": "\n Test that we expect project-independent issue alert preferences to be\n returned as `subscribe_by_default`.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
def test_subscribe_by_default(self): NotificationSetting.objects.update_settings( ExternalProviders.EMAIL, NotificationSettingTypes.ISSUE_ALERTS, NotificationSettingOptionValues.NEVER, user=self.user, ) response = self.get_success_response("me") assert response.data.get("subscribeByDefault") is False
16,175
73,918
98
wagtail/core/permission_policies/base.py
19
15
def _get_users_with_any_permission_codenames_filter(self, permission_codenames): permissions = Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) return ( Q(is_superuser=True)
Reformat with black
_get_users_with_any_permission_codenames_filter
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
base.py
12
9
https://github.com/wagtail/wagtail.git
1
56
0
17
89
Python
{ "docstring": "\n Given a list of permission codenames, return a filter expression which\n will find all users which have any of those permissions - either\n through group permissions, user permissions, or implicitly through\n being a superuser.\n ", "language": "en", "n_whitespaces": 70, "n_words": 34, "vocab_size": 28 }
def _get_users_with_any_permission_codenames_filter(self, permission_codenames): permissions = Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) return ( Q(is_superuser=True) | Q(user_permissions__in=permissions) | Q(groups__permissions__in=permissions) ) & Q(is_active=True)
88,977
289,847
541
homeassistant/components/ibeacon/coordinator.py
56
21
def _async_update_rssi_and_transients(self) -> None: for ( unique_id, ibeacon_advert
Update ibeacon-ble to 1.0.1 (#80785)
_async_update_rssi_and_transients
e15f2e050e7afadbb19d32973104e4e2f5a172ae
core
coordinator.py
14
40
https://github.com/home-assistant/core.git
6
139
0
39
213
Python
{ "docstring": "Check to see if the rssi has changed and update any devices.\n\n We don't callback on RSSI changes so we need to check them\n here and send them over the dispatcher periodically to\n ensure the distance calculation is update.\n\n If the transient flag is set we also need to check to see\n if the device is still transmitting and increment the counter\n ", "language": "en", "n_whitespaces": 104, "n_words": 62, "vocab_size": 43 }
def _async_update_rssi_and_transients(self) -> None: for ( unique_id, ibeacon_advertisement, ) in self._last_ibeacon_advertisement_by_unique_id.items(): address = unique_id.split("_")[-1] service_info = bluetooth.async_last_service_info( self.hass, address, connectable=False ) if not service_info: continue if address in self._transient_seen_count: self._transient_seen_count[address] += 1 if self._transient_seen_count[address] == MIN_SEEN_TRANSIENT_NEW: self._transient_seen_count.pop(address) _async_dispatch_update( self.hass, unique_id, service_info, ibeacon_advertisement, True, True, ) continue if service_info.rssi != ibeacon_advertisement.rssi: ibeacon_advertisement.update_rssi(service_info.rssi) async_dispatcher_send( self.hass, signal_seen(unique_id), ibeacon_advertisement, )
52,162
207,935
43
celery/contrib/testing/worker.py
22
11
def setup_app_for_worker(app, loglevel, logfile) -> None: # type: (Celery, Union[str, int], str) -> None app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile)
Add `mypy` to the pipeline (#7383) * Add typing to Celery This is a simple bootstrap of the process, adding some types to a few selected functions, based on comment annotations. MyPy is chosen as the default static analyzer for the types. * Add mypy to the pipeline * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove unused command from tox * Install mypy only on CPython * Remove wrong annotations * Update celery/utils/saferepr.py Co-authored-by: Mads Jensen <mje@inducks.org> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
setup_app_for_worker
fbda0089f08d7f2a8f00925dbc0b6e10bd779251
celery
worker.py
10
7
https://github.com/celery/celery.git
1
51
0
21
85
Python
{ "docstring": "Setup the app to be used for starting an embedded worker.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def setup_app_for_worker(app, loglevel, logfile) -> None: # type: (Celery, Union[str, int], str) -> None app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile)
47,598
196,098
44
sympy/combinatorics/graycode.py
12
6
def rank(self): if self._rank is None: self._rank
Updated import locations
rank
498015021131af4dbb07eb110e5badaba8250c7b
sympy
graycode.py
13
4
https://github.com/sympy/sympy.git
2
32
0
10
53
Python
{ "docstring": "\n Ranks the Gray code.\n\n A ranking algorithm determines the position (or rank)\n of a combinatorial object among all the objects w.r.t.\n a given order. For example, the 4 bit binary reflected\n Gray code (BRGC) '0101' has a rank of 6 as it appears in\n the 6th position in the canonical ordering of the family\n of 4 bit Gray codes.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> a = GrayCode(3)\n >>> list(a.generate_gray())\n ['000', '001', '011', '010', '110', '111', '101', '100']\n >>> GrayCode(3, start='100').rank\n 7\n >>> GrayCode(3, rank=7).current\n '100'\n\n See Also\n ========\n\n unrank\n\n References\n ==========\n\n .. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html\n\n ", "language": "en", "n_whitespaces": 266, "n_words": 97, "vocab_size": 73 }
def rank(self): if self._rank is None: self._rank = int(gray_to_bin(self.current), 2) return self._rank
88,457
289,315
500
homeassistant/components/rest/data.py
91
38
async def async_update(self, log_errors=True): if not self._async_client: self._async_client = get_async_client( self._hass, verify_ssl=self._verify_ssl ) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) _LOGGER.debug("Updating from %s", self._resource) try: response = await self._async_client.request( self._method, self._resource, headers=rendered_headers, params=rendered_params, auth=self._auth, content=self._request_data, timeout=self._timeout, follow_redirects=True, ) self.data = response.text self.headers = response.headers except httpx.TimeoutException as ex: if log_errors: _LOGGER.error("Timeout while fetching data: %s", self._resource) self.last_exception = ex self.data = None self.headers = None except httpx.RequestError as ex:
Fix payload in rest (#80544)
async_update
599d61a4da096227ce4d5ba1dc0eaabceea56f49
core
data.py
13
35
https://github.com/home-assistant/core.git
6
202
0
56
317
Python
{ "docstring": "Get the latest data from REST service with provided method.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
async def async_update(self, log_errors=True): if not self._async_client: self._async_client = get_async_client( self._hass, verify_ssl=self._verify_ssl ) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) _LOGGER.debug("Updating from %s", self._resource) try: response = await self._async_client.request( self._method, self._resource, headers=rendered_headers, params=rendered_params, auth=self._auth, content=self._request_data, timeout=self._timeout, follow_redirects=True, ) self.data = response.text self.headers = response.headers except httpx.TimeoutException as ex: if log_errors: _LOGGER.error("Timeout while fetching data: %s", self._resource) self.last_exception = ex self.data = None self.headers = None except httpx.RequestError as ex: if log_errors: _LOGGER.error( "Error fetching data: %s failed with %s", self._resource, ex ) self.last_exception = ex self.data = None self.headers = None
85,683
286,285
33
openbb_terminal/helper_funcs.py
14
8
def set_default_timezone() -> None: dotenv.load_dotenv(USER_ENV_FILE) user_tz = os.ge
[SDK] Allow silencing verbose output in commands that use stocks/load (#3180) * remove verbose on load * Revert implementation of the verbosity setting in stocks controller * Edit docstrings to comply with pydocstyle linting rules * Fix typos in variable names and help text * Add verbosity setting to forex load helper as it uses the stocks helper * Update docstrings to comply with pydocstyle linting rules * Update tests * Fix test relying on local sources settings * Remove old test cassettes * Add new test data * WIP: Fix futures tests * Clean up test file * Fix futures tests having a time component * Fix futures model tests Co-authored-by: James Maslek <jmaslek11@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com>
set_default_timezone
47549cbd9f52a436c06b040fda5b88a7d2bf700a
OpenBBTerminal
helper_funcs.py
10
6
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
35
0
14
65
Python
{ "docstring": "Set a default (America/New_York) timezone if one doesn't exist.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def set_default_timezone() -> None: dotenv.load_dotenv(USER_ENV_FILE) user_tz = os.getenv("OPENBB_TIMEZONE") if not user_tz: dotenv.set_key(USER_ENV_FILE, "OPENBB_TIMEZONE", "America/New_York")
46,839
191,733
30
langchain/agents/agent.py
16
5
def return_stopped_response(self) -> dict: return {k: "Agent stopped due to max iterations." for k in self.return_values}
add logic for agent stopping (#420)
return_stopped_response
d0f194de73c942cb89d731dbfa5ae809111fb07a
langchain
agent.py
8
3
https://github.com/hwchase17/langchain.git
2
20
0
16
35
Python
{ "docstring": "Return response when agent has been stopped due to max iterations.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def return_stopped_response(self) -> dict: return {k: "Agent stopped due to max iterations." for k in self.return_values}
77,696
264,354
48
netbox/utilities/forms/fields/dynamic.py
16
7
def clean(self, value): if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE: return Non
Refactor & document supported form fields
clean
cf3ca5a661cc015baf4ef462be07e91c09db0ede
netbox
dynamic.py
9
4
https://github.com/netbox-community/netbox.git
3
33
0
14
54
Python
{ "docstring": "\n When null option is enabled and \"None\" is sent as part of a form to be submitted, it is sent as the\n string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType.\n ", "language": "en", "n_whitespaces": 61, "n_words": 38, "vocab_size": 30 }
def clean(self, value): if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE: return None return super().clean(value)
72,397
248,647
320
tests/test_event_auth.py
76
24
def test_unexpected_auth_events(self): creator = "@creator:example.com" create_event = _create_event(RoomVersions.V9, creator) join_event = _join_event(RoomVersions.V9, creator) pl_event = _power_levels_event( RoomVersions.V9, creator, {"state_default": 30, "users": {"creator": 100}}, ) join_rules_event = _join_rules_event(RoomVersions.V9, creator, "public") event_store = _StubEventSourceStore() event_store.add_events([create_event, join_event, pl_event, join_rules_event]) good_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event] ) # join rules
Fix inconsistencies in event validation (#13088)
test_unexpected_auth_events
d4b1c0d800eaa83c4d56a9cf17881ad362b9194b
synapse
test_event_auth.py
13
27
https://github.com/matrix-org/synapse.git
1
154
0
52
239
Python
{ "docstring": "Events with excess auth_events should be rejected\n\n https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules\n 2. Reject if event has auth_events that:\n 2. have entries whose type and state_key don’t match those specified by the\n auth events selection algorithm described in the server specification.\n ", "language": "en", "n_whitespaces": 81, "n_words": 37, "vocab_size": 34 }
def test_unexpected_auth_events(self): creator = "@creator:example.com" create_event = _create_event(RoomVersions.V9, creator) join_event = _join_event(RoomVersions.V9, creator) pl_event = _power_levels_event( RoomVersions.V9, creator, {"state_default": 30, "users": {"creator": 100}}, ) join_rules_event = _join_rules_event(RoomVersions.V9, creator, "public") event_store = _StubEventSourceStore() event_store.add_events([create_event, join_event, pl_event, join_rules_event]) good_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event] ) # join rules should *not* be included in the auth events. bad_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event, join_rules_event], ) get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, good_event) ) with self.assertRaises(AuthError): get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, bad_event) )
44,000
182,900
506
src/textual/devtools/service.py
67
27
async def _consume_incoming(self) -> None: while True: message_json = await self.incoming_queue.get() if message_json is None: self.incoming_queue.task_done() break type = message_json["type"] if type == "client_log": path = message_json["payload"]["path"] line_number = message_json["payload"]["line_number"] timestamp = message_json["payload"]["timestamp"] encoded_segments = message_json["payload"]["encoded_segments"] decoded_segments = base64.b64decode(encoded_segments) segments = pickle.loads(decoded_segments) self.service.console.print( DevtoolsLogMessage( segments=segments, path=path, line_number=line_number, unix_timestamp=timestamp, ) ) elif type == "client_spillover":
Seperate server and client handling logic into classes for devtools
_consume_incoming
a72e347ed99333a090377ee438eaf63477cbf98b
textual
service.py
16
32
https://github.com/Textualize/textual.git
5
170
0
49
299
Python
{ "docstring": "Consume messages from the incoming (client -> server) Queue, and print\n the corresponding renderables to the console for each message.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 18 }
async def _consume_incoming(self) -> None: while True: message_json = await self.incoming_queue.get() if message_json is None: self.incoming_queue.task_done() break type = message_json["type"] if type == "client_log": path = message_json["payload"]["path"] line_number = message_json["payload"]["line_number"] timestamp = message_json["payload"]["timestamp"] encoded_segments = message_json["payload"]["encoded_segments"] decoded_segments = base64.b64decode(encoded_segments) segments = pickle.loads(decoded_segments) self.service.console.print( DevtoolsLogMessage( segments=segments, path=path, line_number=line_number, unix_timestamp=timestamp, ) ) elif type == "client_spillover": spillover = int(message_json["payload"]["spillover"]) info_renderable = DevtoolsInternalMessage( f"Discarded {spillover} messages", level="warning" ) self.service.console.print(info_renderable) self.incoming_queue.task_done()
33,397
145,231
75
docker/kuberay-autoscaler/test_autoscaling_config.py
21
12
def _get_basic_ray_cr() -> dict: cr_path = str( Path(__file__).resolve().paren
[KubeRay] Format autoscaling config based on RayCluster CR (#22348) Closes #21655. At the start of each autoscaler iteration, we read the Ray Cluster CR from K8s and use it to extract the autoscaling config.
_get_basic_ray_cr
a402e956a4e1ebe9bc4e2b404536466967c497af
ray
test_autoscaling_config.py
19
11
https://github.com/ray-project/ray.git
1
49
0
17
92
Python
{ "docstring": "Returns the example Ray CR included in the Ray documentation.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
def _get_basic_ray_cr() -> dict: cr_path = str( Path(__file__).resolve().parents[2] / "python" / "ray" / "autoscaler" / "kuberay" / "ray-cluster.complete.yaml" ) return yaml.safe_load(open(cr_path).read())
41,994
176,595
293
networkx/generators/spectral_graph_forge.py
169
45
def spectral_graph_forge(G, alpha, transformation="identity", seed=None): import numpy as np import scipy as sp import scipy.stats # call as sp.stats available_transformations = ["identity", "modularity"] alpha = np.clip(alpha, 0, 1) A = nx.to_numpy_array(G) n = A.shape[1] level = int(round(n * alpha)) if transformation not in available_transformations: msg = f"{transformation!r} is not a valid transformation. " msg += f"Transformations: {available_transformations}" raise nx.NetworkXError(msg) K = np.ones((1, n)) @ A B = A if transformation == "modularity": B -= K.T @ K / K.sum() # Compute low-rank approximation of B evals, evecs = np.
Remove `_mat_spect_approx` in favor of simpler procedure (#5624) * Replace _mat_spect_approx func internal usage. * Rm _mat_spect_approx helper function.
spectral_graph_forge
8bea55e3071ed71eab4fb6650a45f0cdf5c911d4
networkx
spectral_graph_forge.py
13
30
https://github.com/networkx/networkx.git
5
306
0
105
494
Python
{ "docstring": "Returns a random simple graph with spectrum resembling that of `G`\n\n This algorithm, called Spectral Graph Forge (SGF), computes the\n eigenvectors of a given graph adjacency matrix, filters them and\n builds a random graph with a similar eigenstructure.\n SGF has been proved to be particularly useful for synthesizing\n realistic social networks and it can also be used to anonymize\n graph sensitive data.\n\n Parameters\n ----------\n G : Graph\n alpha : float\n Ratio representing the percentage of eigenvectors of G to consider,\n values in [0,1].\n transformation : string, optional\n Represents the intended matrix linear transformation, possible values\n are 'identity' and 'modularity'\n seed : integer, random_state, or None (default)\n Indicator of numpy random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n H : Graph\n A graph with a similar eigenvector structure of the input one.\n\n Raises\n ------\n NetworkXError\n If transformation has a value different from 'identity' or 'modularity'\n\n Notes\n -----\n Spectral Graph Forge (SGF) generates a random simple graph resembling the\n global properties of the given one.\n It leverages the low-rank approximation of the associated adjacency matrix\n driven by the *alpha* precision parameter.\n SGF preserves the number of nodes of the input graph and their ordering.\n This way, nodes of output graphs resemble the properties of the input one\n and attributes can be directly mapped.\n\n It considers the graph adjacency matrices which can optionally be\n transformed to other symmetric real matrices (currently transformation\n options include *identity* and *modularity*).\n The *modularity* transformation, in the sense of Newman's modularity matrix\n allows the focusing on community structure related properties of the graph.\n\n SGF applies a low-rank approximation whose fixed rank is computed from the\n ratio *alpha* of the input graph adjacency matrix dimension.\n This step performs a filtering on the input eigenvectors similar to the low\n pass filtering common in telecommunications.\n\n The filtered values (after truncation) are used as input to a Bernoulli\n sampling for constructing a random adjacency matrix.\n\n References\n ----------\n .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, \"Spectral Graph Forge:\n Graph Generation Targeting Modularity\", IEEE Infocom, '18.\n https://arxiv.org/abs/1801.01715\n .. [2] M. Newman, \"Networks: an introduction\", Oxford university press,\n 2010\n\n Examples\n --------\n >>> G = nx.karate_club_graph()\n >>> H = nx.spectral_graph_forge(G, 0.3)\n >>>\n ", "language": "en", "n_whitespaces": 582, "n_words": 358, "vocab_size": 213 }
def spectral_graph_forge(G, alpha, transformation="identity", seed=None): import numpy as np import scipy as sp import scipy.stats # call as sp.stats available_transformations = ["identity", "modularity"] alpha = np.clip(alpha, 0, 1) A = nx.to_numpy_array(G) n = A.shape[1] level = int(round(n * alpha)) if transformation not in available_transformations: msg = f"{transformation!r} is not a valid transformation. " msg += f"Transformations: {available_transformations}" raise nx.NetworkXError(msg) K = np.ones((1, n)) @ A B = A if transformation == "modularity": B -= K.T @ K / K.sum() # Compute low-rank approximation of B evals, evecs = np.linalg.eigh(B) k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 B = evecs @ np.diag(evals) @ evecs.T if transformation == "modularity": B += K.T @ K / K.sum() B = np.clip(B, 0, 1) np.fill_diagonal(B, 0) for i in range(n - 1): B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) H = nx.from_numpy_array(B) return H
22,260
106,051
30
src/datasets/features/features.py
9
5
def encode_example(self, example): example = cast_to_python_objects(example) return encode_nested_exa
Clean up remaining Main Classes docstrings (#5349) clean up docstrings
encode_example
c78559cacbb0ca6e0bc8bfc313cc0359f8c23ead
datasets
features.py
8
3
https://github.com/huggingface/datasets.git
1
21
0
9
35
Python
{ "docstring": "\n Encode example into a format for Arrow.\n\n Args:\n example (`dict[str, Any]`):\n Data in a Dataset row.\n\n Returns:\n `dict[str, Any]`\n ", "language": "en", "n_whitespaces": 85, "n_words": 19, "vocab_size": 17 }
def encode_example(self, example): example = cast_to_python_objects(example) return encode_nested_example(self, example)
80,920
271,980
333
keras/engine/training_v1.py
117
11
def _add_unique_metric_name(self, metric_name, metric_fn, output_index): # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the string # "accuracy") rather than a `Metric` object, in which case we want to # prepend the output name even if we are loading a serialized model. if not getattr(metric_fn
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_add_unique_metric_name
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_v1.py
14
13
https://github.com/keras-team/keras.git
4
75
0
80
127
Python
{ "docstring": "Makes the metric name unique.\n\n If there are multiple outputs for which the metrics are calculated, the\n metric names have to be made unique by appending an integer.\n\n Args:\n metric_name: Metric name that corresponds to the metric specified by the\n user. For example: 'acc'.\n metric_fn: The Metric object.\n output_index: The index of the model output for which the metric name is\n being added.\n\n Returns:\n string, name of the model's unique metric name\n ", "language": "en", "n_whitespaces": 171, "n_words": 72, "vocab_size": 48 }
def _add_unique_metric_name(self, metric_name, metric_fn, output_index): # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the string # "accuracy") rather than a `Metric` object, in which case we want to # prepend the output name even if we are loading a serialized model. if not getattr(metric_fn, "_from_serialized", False): metric_name = "%s_%s" % ( self.output_names[output_index], metric_name, ) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = "%s_%d" % (base_metric_name, j) j += 1 return metric_name
@not_implemented_for("undirected")
41,793
176,253
783
networkx/algorithms/components/strongly_connected.py
126
23
def strongly_connected_components(G): preorder = {} lowlink = {} scc_found = set() scc_queue = [] i = 0 # Preorder counter neighbors = {v: iter(G[v]) for v in G} for source in G: if source not in scc_found: queue = [source] while queue: v = queue[-1] if v not in preorder: i = i + 1 preorder[v] = i done = True for w in neighbors[v]: if w not in preorder: queue.append(w) done = False break if done: lowlink[v] = preorder[v] for w in G[v]: if w not in scc_found: if preorder[w] > preorder[v]: lowlink[v] = min([lowlink[v], lowlink[w]]) else:
Fixing Tarjan's strongly connected components algorithm implementation to have O(|E|+|V|) time complexity instead of O(|V|^3). (#5288) Prevent unnecessary traversal of edges multiple times
strongly_connected_components
77c49c16e10693dbe566d20601b28dd2b1e8df03
networkx
strongly_connected.py
25
39
https://github.com/networkx/networkx.git
15
257
1
66
413
Python
{ "docstring": "Generate nodes in strongly connected components of graph.\n\n Parameters\n ----------\n G : NetworkX Graph\n A directed graph.\n\n Returns\n -------\n comp : generator of sets\n A generator of sets of nodes, one for each strongly connected\n component of G.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is undirected.\n\n Examples\n --------\n Generate a sorted list of strongly connected components, largest first.\n\n >>> G = nx.cycle_graph(4, create_using=nx.DiGraph())\n >>> nx.add_cycle(G, [10, 11, 12])\n >>> [\n ... len(c)\n ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True)\n ... ]\n [4, 3]\n\n If you only want the largest component, it's more efficient to\n use max instead of sort.\n\n >>> largest = max(nx.strongly_connected_components(G), key=len)\n\n See Also\n --------\n connected_components\n weakly_connected_components\n kosaraju_strongly_connected_components\n\n Notes\n -----\n Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_.\n Nonrecursive version of algorithm.\n\n References\n ----------\n .. [1] Depth-first search and linear graph algorithms, R. Tarjan\n SIAM Journal of Computing 1(2):146-160, (1972).\n\n .. [2] On finding the strongly connected components in a directed graph.\n E. Nuutila and E. Soisalon-Soinen\n Information Processing Letters 49(1): 9-14, (1994)..\n\n ", "language": "en", "n_whitespaces": 324, "n_words": 162, "vocab_size": 118 }
def strongly_connected_components(G): preorder = {} lowlink = {} scc_found = set() scc_queue = [] i = 0 # Preorder counter neighbors = {v: iter(G[v]) for v in G} for source in G: if source not in scc_found: queue = [source] while queue: v = queue[-1] if v not in preorder: i = i + 1 preorder[v] = i done = True for w in neighbors[v]: if w not in preorder: queue.append(w) done = False break if done: lowlink[v] = preorder[v] for w in G[v]: if w not in scc_found: if preorder[w] > preorder[v]: lowlink[v] = min([lowlink[v], lowlink[w]]) else: lowlink[v] = min([lowlink[v], preorder[w]]) queue.pop() if lowlink[v] == preorder[v]: scc = {v} while scc_queue and preorder[scc_queue[-1]] > preorder[v]: k = scc_queue.pop() scc.add(k) scc_found.update(scc) yield scc else: scc_queue.append(v) @not_implemented_for("undirected")
75,771
259,437
524
sklearn/linear_model/_glm/glm.py
172
33
def score(self, X, y, sample_weight=None): # TODO: Adapt link to User Guide in the docstring, once # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. # # Note, default score defined in RegressorMixin is R^2 score. # TODO: make D^2 a score function in module metrics (and thereby get # input validation and so on) raw_prediction = self._linear_predictor(X) # validates X # required by losses y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) if sample_weight is not None: # Note that _check_sample_weight calls check_array(order="C") required by # losses. sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) base_loss = self._linear_loss.base_loss if not base_loss.in_y_true_range(y): raise ValueError( "Some value(s) of y are out of the valid range of the loss" f" {self._base_loss.__name__}." ) # Note that constant_to_optimal_zero is already multiplied by sample_weight. constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y)) if sample_weight is not None: constant *= sample_weight.shape[0] / np.sum(sample_weight) # Missing factor of 2 in deviance cancels out. deviance = base_loss( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1, ) y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) deviance_null = base_loss(
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
score
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
scikit-learn
glm.py
14
28
https://github.com/scikit-learn/scikit-learn.git
4
209
0
115
340
Python
{ "docstring": "Compute D^2, the percentage of deviance explained.\n\n D^2 is a generalization of the coefficient of determination R^2.\n R^2 uses squared error and D^2 uses the deviance of this GLM, see the\n :ref:`User Guide <regression_metrics>`.\n\n D^2 is defined as\n :math:`D^2 = 1-\\\\frac{D(y_{true},y_{pred})}{D_{null}}`,\n :math:`D_{null}` is the null deviance, i.e. the deviance of a model\n with intercept alone, which corresponds to :math:`y_{pred} = \\\\bar{y}`.\n The mean :math:`\\\\bar{y}` is averaged by sample_weight.\n Best possible score is 1.0 and it can be negative (because the model\n can be arbitrarily worse).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,)\n True values of target.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n D^2 of self.predict(X) w.r.t. y.\n ", "language": "en", "n_whitespaces": 304, "n_words": 127, "vocab_size": 89 }
def score(self, X, y, sample_weight=None): # TODO: Adapt link to User Guide in the docstring, once # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. # # Note, default score defined in RegressorMixin is R^2 score. # TODO: make D^2 a score function in module metrics (and thereby get # input validation and so on) raw_prediction = self._linear_predictor(X) # validates X # required by losses y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) if sample_weight is not None: # Note that _check_sample_weight calls check_array(order="C") required by # losses. sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) base_loss = self._linear_loss.base_loss if not base_loss.in_y_true_range(y): raise ValueError( "Some value(s) of y are out of the valid range of the loss" f" {self._base_loss.__name__}." ) # Note that constant_to_optimal_zero is already multiplied by sample_weight. constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y)) if sample_weight is not None: constant *= sample_weight.shape[0] / np.sum(sample_weight) # Missing factor of 2 in deviance cancels out. deviance = base_loss( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1, ) y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) deviance_null = base_loss( y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1, ) return 1 - (deviance + constant) / (deviance_null + constant)
1,607
9,407
317
reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py
198
34
def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 # Check weight shape.
initialize ostec
upsample_conv_2d
7375ee364e0df2a417f92593e09557f1b2a3575a
insightface
upfirdn_2d.py
16
48
https://github.com/deepinsight/insightface.git
4
387
0
110
602
Python
{ "docstring": "Fused `upsample_2d()` followed by `tf.nn.conv2d()`.\n\n Padding is performed only once at the beginning, not between the operations.\n The fused op is considerably more efficient than performing the same calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.\n Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).\n The default is `[1] * factor`, which corresponds to nearest-neighbor\n upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or\n `[N, H * factor, W * factor, C]`, and same datatype as `x`.\n ", "language": "en", "n_whitespaces": 358, "n_words": 158, "vocab_size": 114 }
def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 # Check weight shape. w = tf.convert_to_tensor(w) assert w.shape.rank == 4 convH = w.shape[0].value convW = w.shape[1].value inC = _shape(w, 2) outC = _shape(w, 3) assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor ** 2)) p = (k.shape[0] - factor) - (convW - 1) # Determine data dimensions. if data_format == 'NCHW': stride = [1, 1, factor, factor] output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW] num_groups = _shape(x, 1) // inC else: stride = [1, factor, factor, 1] output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC] num_groups = _shape(x, 3) // inC # Transpose weights. w = tf.reshape(w, [convH, convW, inC, num_groups, -1]) w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2]) w = tf.reshape(w, [convH, convW, -1, num_groups * inC]) # Execute. x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format) return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl) #----------------------------------------------------------------------------
1,218
7,491
19
ludwig/contribs/aim.py
5
7
def normalize_config(config): return json.loads(json.dumps(config, cls=NumpyEn
Fixes to serialization, and update to allow set repo location. (#2367) * Updating AimCallback to add init for optional repo. * Fixed numpy serialization for config objects. * Removed print statements, added logging for progress tracker.
normalize_config
7ec0cd13cf5e77d6fe68acbbeef9a7c694fc83c2
ludwig
aim.py
10
2
https://github.com/ludwig-ai/ludwig.git
1
22
0
5
37
Python
{ "docstring": "Convert to json string and back again to remove numpy types.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def normalize_config(config): return json.loads(json.dumps(config, cls=NumpyEncoder))
46,467
191,243
487
thumbor/utils.py
203
42
def ensure_srgb(img, srgb_profile=None): img_info = dict(img.info) icc = img_info.pop("icc_profile", None) if not icc: return img if ImageCms is None: raise RuntimeError("ImageCms is required for color profile utilities") if srgb_profile is not None: srgb_profile = ImageCms.ImageCmsProfile(srgb_profile) else: srgb_profile = DEFAULT_SRGB_PROFILE buf = BytesIO(icc) try: orig_profile = ImageCms.ImageCmsProfile(buf) color_space = orig_profile.profile.xcolor_space except (AttributeError, OSError, TypeError, ValueError): return None finally: buf.close() if color_space == "RGB ": logger.debug("Returning img (RGB)") return img if color_space not in ("GRAY", "CMYK
feat: Support AVIF format encoding (#1476) * feat: Support AVIF format encoding * Increase test coverage * test coverage for remaining uncovered lines * Add simple integration test * Add "filters:format(avif)" integration test * Gracefully handle AVIF encoding when codec unavailable * Don't pass quality="keep" to AVIF save * Fix no-member pylint error
ensure_srgb
1d9deef4e99f52a08eed9aa973572c4567754f5a
thumbor
utils.py
12
51
https://github.com/thumbor/thumbor.git
10
268
0
126
452
Python
{ "docstring": "\n Ensures that an image either has no ICC profile (and so is implicitly\n sRGB) or has an sRGB color profile. If the image is sRGB, it is returned\n unchanged. If it has a CMYK or Gray color profile, this function will\n return an image converted to sRGB. Any color profiles in other color\n spaces will return None.\n ", "language": "en", "n_whitespaces": 76, "n_words": 57, "vocab_size": 41 }
def ensure_srgb(img, srgb_profile=None): img_info = dict(img.info) icc = img_info.pop("icc_profile", None) if not icc: return img if ImageCms is None: raise RuntimeError("ImageCms is required for color profile utilities") if srgb_profile is not None: srgb_profile = ImageCms.ImageCmsProfile(srgb_profile) else: srgb_profile = DEFAULT_SRGB_PROFILE buf = BytesIO(icc) try: orig_profile = ImageCms.ImageCmsProfile(buf) color_space = orig_profile.profile.xcolor_space except (AttributeError, OSError, TypeError, ValueError): return None finally: buf.close() if color_space == "RGB ": logger.debug("Returning img (RGB)") return img if color_space not in ("GRAY", "CMYK"): # Other color spaces are rare, but best not to try to convert them. # Upstream understands a None return as meaning it should not # use it for the target encoder. logger.debug( "Cannot convert to sRGB; color space = %s", (color_space.strip()), ) return None # Probably not possible to have an animated image with CMYK or GRAY icc # profile, but best leave it alone if we have one if getattr(img, "is_animated", False): return None if color_space == "GRAY": pil_mode = "L" else: pil_mode = "CMYK" logger.debug("Converting from %s to sRGB", color_space) transform = ImageCms.ImageCmsTransform( orig_profile, srgb_profile, pil_mode, "RGBA", intent=ImageCms.INTENT_RELATIVE_COLORIMETRIC, flags=TRANSFORM_FLAGS, ) src_im = Image.new(pil_mode, img.size, "white") src_im.paste(img) dst_im = Image.new("RGBA", img.size, "white") dst_im.info = img_info dst_im = transform.apply(src_im, dst_im) dst_im = dst_im.convert("RGB") dst_im.info = img_info return dst_im
80,550
270,733
93
keras/engine/base_layer.py
41
7
def _cast_single_input(self, x): if self._should_cast_single_input(x): return tf.cast(x, self._compute_dtype_obje
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_cast_single_input
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
base_layer.py
10
5
https://github.com/keras-team/keras.git
2
31
0
35
54
Python
{ "docstring": "Cast a single Tensor or TensorSpec to the compute dtype.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def _cast_single_input(self, x): if self._should_cast_single_input(x): return tf.cast(x, self._compute_dtype_object) else: return x # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property.
4,915
25,698
78
saleor/csv/utils/export.py
23
11
def queryset_in_batches(queryset): start_pk = 0 while True: qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE] pks = list(qs.values_list("pk", flat=True)) if not pks: break yield pks
Feature/gift cards post mvp (#7977) * Add giftCardBulkCreate mutation * Extend OrderFilter with giftCardUsed and giftCardBought fields * Allow exporting gift cards * Update the name of the email template for export * Add exportGiftCards muttaion * Add used gift card filter * Export only unused gift cards * Block mutations for expired gift cards (#8115) * Block mutations for expired gift cards * Block only resending and activating expired gift cards * Add celery schedule task for deactivate expired cards (#8100) * Add gift card section to invoice (#8148) * Add filtering on gift card events (#8090) * Add filtering on gift card events * Filter gift card events by orders instead of order_id * Update populatedb with gift card data (#8016) * Generate gift cards with events in populate db * Set product types kinds and add placeholder for gift card product * Add dedicated gift card product images * Change order of order emails (#8168) * Drop duplicated kind field from producttype in populatedb (#8224) * Change gift card display_code field to last_4 (#8445) * Change gift card display_code field to last_4 * Change last4 to last4CodeChars * Fix github test env action configuration * Drop filtering gift cards by tag * Fix export gift card tags test * Re-add gift card tags query (#8412) * Update populatedb with gift card data (#8016) * Generate gift cards with events in populate db * Set product types kinds and add placeholder for gift card product * Add dedicated gift card product images * Add giftCardTags model * Add giftCardTags query Co-authored-by: Iga Karbowiak <40886528+IKarbowiak@users.noreply.github.com> Co-authored-by: IKarbowiak <iga.karbowiak@mirumee.com> * Do not create EXPIRY_DATE_UPDATED gift card event when expiry date is not changed (#8882) Co-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>
queryset_in_batches
f5a45de4a22fecacfcd5b2cd18c07e5cf95ce27c
saleor
export.py
13
9
https://github.com/saleor/saleor.git
3
55
0
18
94
Python
{ "docstring": "Slice a queryset into batches.\n\n Input queryset should be sorted be pk.\n ", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 10 }
def queryset_in_batches(queryset): start_pk = 0 while True: qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE] pks = list(qs.values_list("pk", flat=True)) if not pks: break yield pks start_pk = pks[-1]
14,353
66,831
56
erpnext/patches/v13_0/update_returned_qty_in_pr_dn.py
81
19
def execute(): frappe.reload_doc("stock", "doctype", "purchase_receipt") frappe.reload_doc("stock", "doctype", "purchase_receipt_item") frappe.reload_doc("stock", "doctype", "delivery_note") frappe.reload_doc("stock", "doctype", "delivery_note_item") frappe.reload_doc("stock", "doctype", "stock_settings") def update_from_return_docs(doctype): for return_doc in frappe.get_all( doctype, filters={"is_return": 1, "docstatus":
style: format code with black
execute
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
update_returned_qty_in_pr_dn.py
15
14
https://github.com/frappe/erpnext.git
2
77
0
63
297
Python
{ "docstring": " update `tabPurchase Receipt Item`\n\t\tset received_stock_qty = received_qty * conversion_factor\n\t\twhere docstatus = 1 ", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
def execute(): frappe.reload_doc("stock", "doctype", "purchase_receipt") frappe.reload_doc("stock", "doctype", "purchase_receipt_item") frappe.reload_doc("stock", "doctype", "delivery_note") frappe.reload_doc("stock", "doctype", "delivery_note_item") frappe.reload_doc("stock", "doctype", "stock_settings") def update_from_return_docs(doctype): for return_doc in frappe.get_all( doctype, filters={"is_return": 1, "docstatus": 1, "return_against": ("!=", "")} ): # Update original receipt/delivery document from return return_doc = frappe.get_cached_doc(doctype, return_doc.name) try: return_doc.update_prevdoc_status() except OverAllowanceError: frappe.db.rollback() continue return_against = frappe.get_doc(doctype, return_doc.return_against) return_against.update_billing_status() frappe.db.commit() # Set received qty in stock uom in PR, as returned qty is checked against it frappe.db.sql( ) for doctype in ("Purchase Receipt", "Delivery Note"): update_from_return_docs(doctype)
85,618
286,204
379
openbb_terminal/cryptocurrency/discovery/discovery_controller.py
55
27
def call_dex(self, other_args): parser = argparse.ArgumentParser( prog="dex", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", nargs="+", help="Sort by given column. Default: Daily Volume [$]", default="Daily Volume [
Combining commands and renaming others (#3000) * Combining commands and renaming others Combining commands with different sources but the same functionality. I also removed indications of source from command names * Fix tests and hugo * Test fixes Co-authored-by: james <jmaslek11@gmail.com>
call_dex
38a53e5f43bccb716e6a6494605f97293077a679
OpenBBTerminal
discovery_controller.py
13
36
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
126
0
47
205
Python
{ "docstring": "Process dex command\n Shows top decentralized exchanges [Source: https://dappradar.com/]\n Accepts --sort {Name,Daily Users,Daily Volume [$]}\n to sort by column\n ", "language": "en", "n_whitespaces": 63, "n_words": 19, "vocab_size": 19 }
def call_dex(self, other_args): parser = argparse.ArgumentParser( prog="dex", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", nargs="+", help="Sort by given column. Default: Daily Volume [$]", default="Daily Volume [$]", ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: dappradar_view.display_top_dexes( sortby=" ".join(ns_parser.sortby), limit=ns_parser.limit, export=ns_parser.export, )
12,562
61,419
257
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
86
16
def get_backend_for_dir(self, location): # type: (str) -> Optional[VersionControl] vcs_backends = {} for vcs_backend in self._registry.values(): repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue logger.debug('Determine that %s uses VCS: %s', location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: return None # Choose the VCS in the inner-most directory. Since all repository # roots found here would be eith
upd; format
get_backend_for_dir
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
versioncontrol.py
10
13
https://github.com/jindongwang/transferlearning.git
4
75
0
67
126
Python
{ "docstring": "\n Return a VersionControl object if a repository of that type is found\n at the given directory.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
def get_backend_for_dir(self, location): # type: (str) -> Optional[VersionControl] vcs_backends = {} for vcs_backend in self._registry.values(): repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue logger.debug('Determine that %s uses VCS: %s', location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: return None # Choose the VCS in the inner-most directory. Since all repository # roots found here would be either `location` or one of its # parents, the longest path should have the most path components, # i.e. the backend representing the inner-most repository. inner_most_repo_path = max(vcs_backends, key=len) return vcs_backends[inner_most_repo_path]
8,173
44,123
177
airflow/www/security.py
54
17
def has_access(self, action_name, resource_name, user=None) -> bool: if not user: user = g.user if
Remove `:type` lines now sphinx-autoapi supports typehints (#20951) * Remove `:type` lines now sphinx-autoapi supports typehints Since we have no updated sphinx-autoapi to a more recent version it supports showing type hints in the documentation, so we don't need to have the type hints _and_ the `:type` lines -- which is good, as the ones in the doc strings are easy to get out of date! The following settings have been set: `autodoc_typehints = 'description'` -- show types in description (where previous `:type` used to show up) `autodoc_typehints_description_target = 'documented'` -- only link to types that are documented. (Without this we have some missing return types that aren't documented, and aren't linked to in our current python API docs, so this caused a build failure) `autodoc_typehints_format = 'short'` -- Shorten type hints where possible, i.e. `StringIO` instead of `io.StringIO` * Add argument type names to local spelling dictionary Now that we are using the type hints in the docs, sphinxcontrib-spelling picks them up as words to be checked, so we have to ignore them. I've chosen to add the provider specific ones to local dictionary files rather than the global, as for example, `mgmt` is an error in most places, but not in some of the Azure provider.
has_access
602abe8394fafe7de54df7e73af56de848cdf617
airflow
security.py
13
25
https://github.com/apache/airflow.git
6
96
0
41
150
Python
{ "docstring": "\n Verify whether a given user could perform a certain action\n (e.g can_read, can_write) on the given resource.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n ", "language": "en", "n_whitespaces": 105, "n_words": 48, "vocab_size": 30 }
def has_access(self, action_name, resource_name, user=None) -> bool: if not user: user = g.user if user.is_anonymous: user.roles = self.get_user_roles(user) has_access = self._has_access(user, action_name, resource_name) # FAB built-in view access method. Won't work for AllDag access. if self.is_dag_resource(resource_name): if action_name == permissions.ACTION_CAN_READ: has_access |= self.can_read_dag(resource_name, user) elif action_name == permissions.ACTION_CAN_EDIT: has_access |= self.can_edit_dag(resource_name, user) return has_access
26,290
118,557
30
lib/streamlit/forward_msg_cache.py
8
5
def has_refs(self) -> bool: return len(self._session_report_run_counts) > 0
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
has_refs
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
forward_msg_cache.py
9
6
https://github.com/streamlit/streamlit.git
1
17
0
8
30
Python
{ "docstring": "True if this Entry has references from any AppSession.\n\n If not, it can be removed from the cache.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
def has_refs(self) -> bool: return len(self._session_report_run_counts) > 0
47,191
195,092
117
projects/director/director_agent.py
32
15
def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices): val_id_shape = batch.
Added director agent and safety experiment commands. (#4602) * Added director agent and safety. * ran autoformat.sh
_reshape_to_record_metrics
2ef5586ed0d644abe18cd3ff45ef9fa01981e87c
ParlAI
director_agent.py
10
11
https://github.com/facebookresearch/ParlAI.git
1
79
0
25
116
Python
{ "docstring": "\n MultitaskAgent shuffles and combines examples from both classifier and the\n generator tasks in a single batch. We compute losses only for those exs in the\n batch resulting in losses and num_target_tokens vectors that are smaller than\n the.\n\n This method reshapes the losses and num_target_tokens vectors back to the batch size. This is needed to record local metrics as the metrics need to be of batch size.\n\n Args:\n batch: batch being processed in this iteration.\n losses: classifier or generator loss vector (shape: b' X 1), where b' <= b.\n num_target_tokens: number of tokens in each examples for classification or generation tasks. (shape: b' X 1), where b' <= b.\n indices: indices of (either classification or generation) exs for which the loss was computed.\n\n Returns:\n A tuple of reshaped losses and num_target_tokens, both of shape: b X 1.\n ", "language": "en", "n_whitespaces": 248, "n_words": 136, "vocab_size": 85 }
def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices): val_id_shape = batch.valid_indices.shape reshaped_losses = torch.zeros( val_id_shape, device=losses.device, dtype=losses.dtype ) reshaped_num_target_tokens = torch.zeros( val_id_shape, device=num_target_tokens.device, dtype=num_target_tokens.dtype ) reshaped_losses[indices] = losses reshaped_num_target_tokens[indices] = num_target_tokens return (reshaped_losses, reshaped_num_target_tokens)
48,805
198,126
65
sympy/physics/continuum_mechanics/truss.py
22
7
def add_support(self, location, type): if location not in s
Truss class initialized and documentation added
add_support
158f441d4fae4bd406597a41ba8af142e5eeb593
sympy
truss.py
11
5
https://github.com/sympy/sympy.git
2
33
0
22
55
Python
{ "docstring": "\n This method adds a pinned or roller support at a particular node\n\n Parameters\n ==========\n\n location: String or Symbol\n Label of the Node at which support is added.\n\n type: String\n Type of the support being provided at the node.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> from sympy import symbols\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.add_support('A', 'pinned')\n >>> t.supports\n {'A': 'pinned', 'B': 'none'}\n ", "language": "en", "n_whitespaces": 206, "n_words": 71, "vocab_size": 52 }
def add_support(self, location, type): if location not in self._node_labels: raise ValueError("Support must be added on a known node") else: self._supports[location] = type
54,789
217,444
132
python3.10.4/Lib/ftplib.py
43
18
def makeport(self): sock = socket.create_server(("", 0), family=self.af, backlog=1) port = sock.getsockname()[1] # Get proper port host =
add python 3.10.4 for windows
makeport
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
ftplib.py
11
11
https://github.com/XX-net/XX-Net.git
3
99
0
30
159
Python
{ "docstring": "Create a new socket and send a PORT command for it.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def makeport(self): sock = socket.create_server(("", 0), family=self.af, backlog=1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) return sock
@pytest.mark.parametrize("url, expected_matches", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ])
117,470
320,967
69
tests/unit/javascript/test_greasemonkey.py
30
10
def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == "qutebrowser test userscript") @pytest.mark.parametrize("url, expected_matches", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0)
greasemonkey: Don't implicitly load scripts Needed for #7245 and also seems like cleaner code.
test_all
21419c9ef5a90ea36a27afaf2503a57f8f9f8536
qutebrowser
test_greasemonkey.py
11
5
https://github.com/qutebrowser/qutebrowser.git
1
32
1
25
109
Python
{ "docstring": "Test that a script gets read from file, parsed and returned.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == "qutebrowser test userscript") @pytest.mark.parametrize("url, expected_matches", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ])
77,443
263,700
130
bootloader/waflib/Utils.py
59
24
def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): global msysroot if not msysroot: msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
split_path_msys
64ccb7aea824fbec57f7ed1bbe483ec486183c13
pyinstaller
Utils.py
16
8
https://github.com/pyinstaller/pyinstaller.git
5
88
0
40
247
Python
{ "docstring": "\nSplits a path by / or \\\\; do not confuse this function with with ``os.path.split``\n\n:type path: string\n:param path: path to split\n:return: list of string\n", "language": "en", "n_whitespaces": 28, "n_words": 27, "vocab_size": 23 }
def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): global msysroot if not msysroot: msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): split_path = split_path_msys else: split_path = split_path_win32 else: split_path = split_path_unix split_path.__doc__ =
16,631
77,104
19
wagtail/images/utils.py
10
10
def find_image_duplicates(image, user, permission_policy): instances = permi
Add duplicate detection to multiple image upload view Add utility function to find an image's potential duplicates Add logic to detect duplicates on multiple images upload view Add template shown when a user is prompted to confirm a duplicate upload Add client-side logic to confirm a duplicate upload Add/update styles Add tests for duplicate image uploads Index Image file_hash field Ensure that a user can choose an image from duplicates returned by find_image_duplicates Use CSS classes instead of HTML elements to hide edit form on duplicate upload Add ImagesPermissionPolicy helper to retrieve the permission policy dynamically This allows test cases that override the base image model to pick up the corresponding permission policy, should they need it. Remove usage of sibling selector Use wagtail image templatetag to generate image Renamed ImagesPermissionPolicy to ImagesPermissionPolicyGetter Fail loudly when setting permission policy and a wromg image model is provided Add decorator to disconnect a signal's receiver during a test execution and use it in get_image_model tests Improve warning message on duplicate upload in multiple upload view Show matching form when confirming a duplicate upload
find_image_duplicates
c136f461bc052cef362991458e1bd1fca37a3da9
wagtail
utils.py
11
3
https://github.com/wagtail/wagtail.git
1
40
0
10
65
Python
{ "docstring": "\n Finds all the duplicates of a given image.\n To keep things simple, two images are considered to be duplicates if they have the same `file_hash` value.\n This function also ensures that the `user` can choose one of the duplicate images returned (if any).\n ", "language": "en", "n_whitespaces": 56, "n_words": 43, "vocab_size": 37 }
def find_image_duplicates(image, user, permission_policy): instances = permission_policy.instances_user_has_permission_for(user, "choose") return instances.exclude(pk=image.pk).filter(file_hash=image.file_hash)
3,427
20,562
43
pipenv/patched/notpip/_vendor/pyparsing/core.py
21
10
def _trim_arity(func, maxargs=2): global _trim_arity_call_line if f
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_trim_arity
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
core.py
10
19
https://github.com/pypa/pipenv.git
3
100
0
20
56
Python
{ "docstring": "decorator to trim function calls to match the arity of the target", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
def _trim_arity(func, maxargs=2): global _trim_arity_call_line if func in _single_arg_builtins: return lambda s, l, t: func(t) limit = 0 found_arity = False
85,794
286,407
728
openbb_terminal/cryptocurrency/overview/overview_controller.py
101
36
def call_exmarkets(self, other_args): parser = argparse.ArgumentParser( prog="exmarkets", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-e", "--exchange", help="Identifier of exchange e.g for Binance Exchange -> binance", dest="exchange", default="binance", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="display N number records", default=10, ) parser.add_argument( "-s", "--sortby", dest="sortby", type=str, help="Sort by given column. Default: reported_volume_24h_share", default="reported_volume_24h_share", choices=coinpaprika_model.EXMARKETS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=False, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help=, default=False, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-e") ns_pa
More Fixes to Crypto + key sort (#3244) * fix #3095 - autocomplete and command working + key sort * fix #3056 * fix [Bug] bugs #3048 * fix [Bug] bug #3017 * sort -> sortby, not ascend, tests * fix my goof ups Co-authored-by: james <jmaslek11@gmail.com>
call_exmarkets
09f753da1c2a2f03c41fe6a3ca2eb79f6ea58995
OpenBBTerminal
overview_controller.py
12
69
https://github.com/OpenBB-finance/OpenBBTerminal.git
4
241
0
85
388
Python
{ "docstring": "Process exmarkets commandGet all exchange markets found for given exchange\n You can display only N number of records with --limit parameter.\n You can sort data by pair, base_currency_name, quote_currency_name, market_url, category,\n reported_volume_24h_share, trust_score --sortby parameter and also with --descend flag to sort descending.\n You can use additional flag --urls to see urls for each market\n Displays:\n exchange_id, pair, base_currency_name, quote_currency_name, market_url,\n category, reported_volume_24h_share, trust_score,Flag to show urls. If you will use that flag you will see only:\n exchange, pair, trust_score, market_url columns", "language": "en", "n_whitespaces": 209, "n_words": 82, "vocab_size": 59 }
def call_exmarkets(self, other_args): parser = argparse.ArgumentParser( prog="exmarkets", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-e", "--exchange", help="Identifier of exchange e.g for Binance Exchange -> binance", dest="exchange", default="binance", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="display N number records", default=10, ) parser.add_argument( "-s", "--sortby", dest="sortby", type=str, help="Sort by given column. Default: reported_volume_24h_share", default="reported_volume_24h_share", choices=coinpaprika_model.EXMARKETS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=False, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help=, default=False, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-e") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_exchange_markets( exchange=ns_parser.exchange, limit=ns_parser.limit, export=ns_parser.export, sortby=ns_parser.sortby, ascend=not ns_parser.descend, links=ns_parser.urls, )
91,872
292,803
27
tests/components/lcn/test_cover.py
15
11
async def test_unload_config_entry(hass, entry, lcn_connection): await hass.config_entries.async_unload(entry.entry_id) assert hass.states.get("cover.cover_outputs").state == STATE_UNAVAILABLE assert hass.s
Add tests for LCN cover platform (#64832)
test_unload_config_entry
684f01f4664ad490a314ae983194c0f439445a16
core
test_cover.py
10
4
https://github.com/home-assistant/core.git
1
47
0
12
80
Python
{ "docstring": "Test the cover is removed when the config entry is unloaded.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
async def test_unload_config_entry(hass, entry, lcn_connection): await hass.config_entries.async_unload(entry.entry_id) assert hass.states.get("cover.cover_outputs").state == STATE_UNAVAILABLE assert hass.states.get("cover.cover_relays").state == STATE_UNAVAILABLE
36,511
156,029
35
dask/array/core.py
14
8
def topk(self, k, axis=-1, split_every=None): from dask.array.reductions import topk return topk(self, k, axis=axis, split_every
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
topk
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
core.py
8
3
https://github.com/dask/dask.git
1
40
0
12
58
Python
{ "docstring": "The top k elements of an array.\n\n See :func:`dask.array.topk` for docstring.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 11 }
def topk(self, k, axis=-1, split_every=None): from dask.array.reductions import topk return topk(self, k, axis=axis, split_every=split_every)
68,398
240,287
49
packages/python/plotly/plotly/graph_objs/_figure.py
17
8
def for_each_ternary(self, fn, selector=None, row=None, col=None) -> "Figure": for obj in self.select_ternaries(selector=selector, row=row, col=col): fn(obj) r
type annotations for chainable Figure methods
for_each_ternary
c95b4fa4388f29e50b6966e45c94c5980013a01d
plotly.py
_figure.py
9
32
https://github.com/plotly/plotly.py.git
2
48
0
17
73
Python
{ "docstring": "\n Apply a function to all ternary objects that satisfy the\n specified selection criteria\n\n Parameters\n ----------\n fn:\n Function that inputs a single ternary object.\n selector: dict, function, or None (default None)\n Dict to use as selection criteria.\n ternary objects will be selected if they contain\n properties corresponding to all of the dictionary's keys, with\n values that exactly match the supplied values. If None\n (the default), all ternary objects are selected. If a\n function, it must be a function accepting a single argument and\n returning a boolean. The function will be called on each\n ternary and those for which the function returned True will\n be in the selection.\n row, col: int or None (default None)\n Subplot row and column index of ternary objects to select.\n To select ternary objects by row and column, the Figure\n must have been created using plotly.subplots.make_subplots.\n If None (the default), all ternary objects are selected.\n Returns\n -------\n self\n Returns the Figure object that the method was called on\n ", "language": "en", "n_whitespaces": 404, "n_words": 161, "vocab_size": 95 }
def for_each_ternary(self, fn, selector=None, row=None, col=None) -> "Figure": for obj in self.select_ternaries(selector=selector, row=row, col=col): fn(obj) return self
13,507
63,798
69
.venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py
19
8
def call(self, *args, **kwargs): warnings.warn(
upd; format
call
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
__init__.py
9
6
https://github.com/jindongwang/transferlearning.git
1
34
0
19
58
Python
{ "docstring": "Use ``__call__`` instead because this method is deprecated.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def call(self, *args, **kwargs): warnings.warn( "'call()' method is deprecated. " + "Use '__call__()' instead", DeprecationWarning, ) return self.__call__(*args, **kwargs)
79,913
269,121
156
keras/distribute/distributed_training_utils_v1.py
86
19
def validate_per_replica_inputs(distribution_strategy, x): # Convert the inputs and targets into a list of PerReplica objects. per_replica_list = tf.nest.flatten(x) x_values_list = [] for x in per_replica_list: # At this point x should contain only tensors. x_values = distribution_strategy.unwrap(x) for value in x_values: if not tf.is_tensor(value): raise ValueError('Dataset input to the model should be tensors instead ' 'they are of type {}'.format(type(value))) if not tf.executing_eagerly(): # Validate that the shape and dtype of all the elements in x are
Rework a test to avoid instantiating DistributedValues directly. PiperOrigin-RevId: 438824819
validate_per_replica_inputs
2d7dc6080f0824200e317f255e3290da60e0f98a
keras
distributed_training_utils_v1.py
17
14
https://github.com/keras-team/keras.git
5
94
0
64
159
Python
{ "docstring": "Validates PerReplica dataset input list.\n\n Args:\n distribution_strategy: The current DistributionStrategy used to call\n `fit`, `evaluate` and `predict`.\n x: A list of PerReplica objects that represent the input or\n target values.\n\n Returns:\n List containing the first element of each of the PerReplica objects in\n the input list.\n\n Raises:\n ValueError: If any of the objects in the `per_replica_list` is not a tensor.\n\n ", "language": "en", "n_whitespaces": 89, "n_words": 60, "vocab_size": 44 }
def validate_per_replica_inputs(distribution_strategy, x): # Convert the inputs and targets into a list of PerReplica objects. per_replica_list = tf.nest.flatten(x) x_values_list = [] for x in per_replica_list: # At this point x should contain only tensors. x_values = distribution_strategy.unwrap(x) for value in x_values: if not tf.is_tensor(value): raise ValueError('Dataset input to the model should be tensors instead ' 'they are of type {}'.format(type(value))) if not tf.executing_eagerly(): # Validate that the shape and dtype of all the elements in x are the same. validate_all_tensor_shapes(x, x_values) validate_all_tensor_types(x, x_values) x_values_list.append(x_values[0]) return x_values_list
112,213
313,595
116
homeassistant/components/lifx/light.py
33
12
def get_mac_addr(self): if ( self.bulb.host_firmware_version and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW ): octets = [int(octet, 16) for octet in self.mac_addr.split(":")] octets[5] = (octets[5] + 1) % 256 return ":".join(f"{octet:02x}" for octet in octets) return self.ma
Refactor LIFX discovery to prevent duplicate discovery response handling (#72213) * Partially revert #70458 and allow duplicate LIFX discoveries Signed-off-by: Avi Miller <me@dje.li> * Only process one discovery at a time * Revert all LIFX duplicate/inflight discovery checks Also remember LIFX Switches and do as little processing for them as possible. Signed-off-by: Avi Miller <me@dje.li> * Bump aiolifx version to support the latest LIFX devices LIFX added 22 new product definitions to their public product list at the end of January and those new products are defined in aiolifx v0.8.1, so bump the dependency version. Also switched to testing for relays instead of maintaining a seperate list of switch product IDs. Fixes #72894. Signed-off-by: Avi Miller <me@dje.li> * Refactor LIFX discovery to better handle duplicate responses Signed-off-by: Avi Miller <me@dje.li> * Update clear_inflight_discovery with review suggestion Signed-off-by: Avi Miller <me@dje.li> * Move the existing entity check to before the asyncio lock Signed-off-by: Avi Miller <me@dje.li> * Bail out of discovery early and if an entity was created Also ensure that the entity always has a unique ID even if the bulb was not successfully discovered. Signed-off-by: Avi Miller <me@dje.li> Co-authored-by: J. Nick Koston <nick@koston.org>
get_mac_addr
a0974e0c7297537149985f93544dd6f8ed8cfded
core
light.py
13
9
https://github.com/home-assistant/core.git
5
78
0
28
131
Python
{ "docstring": "Increment the last byte of the mac address by one for FW>3.70.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
def get_mac_addr(self): if ( self.bulb.host_firmware_version and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW ): octets = [int(octet, 16) for octet in self.mac_addr.split(":")] octets[5] = (octets[5] + 1) % 256 return ":".join(f"{octet:02x}" for octet in octets) return self.mac_addr
43,384
181,595
19
tests/driver_tests.py
10
2
def test_positive_integer_or_none_4(): assert positive_integer_or_none('none') is None
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_positive_integer_or_none_4
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
driver_tests.py
9
3
https://github.com/EpistasisLab/tpot.git
1
19
0
7
38
Python
{ "docstring": "Assert that the TPOT CLI interface's positive_integer_or_none parsing return None when value is string 'None' or 'none'.", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 17 }
def test_positive_integer_or_none_4(): assert positive_integer_or_none('none') is None assert positive_integer_or_none('None') is None
56,941
223,511
361
python3.10.4/Lib/email/_header_value_parser.py
112
26
def get_local_part(value): local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError( "expected local-part but found '{}'".format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get
add python 3.10.4 for windows
get_local_part
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_header_value_parser.py
15
35
https://github.com/XX-net/XX-Net.git
13
222
0
71
377
Python
{ "docstring": " local-part = dot-atom / quoted-string / obs-local-part\n\n ", "language": "en", "n_whitespaces": 11, "n_words": 7, "vocab_size": 6 }
def get_local_part(value): local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError( "expected local-part but found '{}'".format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get_obs_local_part(str(local_part) + value) if obs_local_part.token_type == 'invalid-obs-local-part': local_part.defects.append(errors.InvalidHeaderDefect( "local-part is not dot-atom, quoted-string, or obs-local-part")) else: local_part.defects.append(errors.ObsoleteHeaderDefect( "local-part is not a dot-atom (contains CFWS)")) local_part[0] = obs_local_part try: local_part.value.encode('ascii') except UnicodeEncodeError: local_part.defects.append(errors.NonASCIILocalPartDefect( "local-part contains non-ASCII characters)")) return local_part, value
5,984
32,803
238
tests/test_feature_extraction_common.py
57
20
def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(feature_extract_te
Add VideoMAE (#17821) * First draft * Add VideoMAEForVideoClassification * Improve conversion script * Add VideoMAEForPreTraining * Add VideoMAEFeatureExtractor * Improve VideoMAEFeatureExtractor * Improve docs * Add first draft of model tests * Improve VideoMAEForPreTraining * Fix base_model_prefix * Make model take pixel_values of shape (B, T, C, H, W) * Add loss computation of VideoMAEForPreTraining * Improve tests * Improve model testsé * Make all tests pass * Add VideoMAE to main README * Add tests for VideoMAEFeatureExtractor * Add integration test * Improve conversion script * Rename patch embedding class * Remove VideoMAELayer from init * Update design of patch embeddings * Improve comments * Improve conversion script * Improve conversion script * Add conversion of pretrained model * Add loss verification of pretrained model * Add loss verification of unnormalized targets * Add integration test for pretraining model * Apply suggestions from code review * Fix bug to make feature extractor resize only shorter edge * Address more comments * Improve normalization of videos * Add doc examples * Move constants to dedicated script * Remove scripts * Transfer checkpoints, fix docs * Update script * Update image mean and std * Fix doc tests * Set return_tensors to NumPy by default * Revert the previous change Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
prepare_video_inputs
f9a0008d2d3082a665f711b24f5314e4a8205fab
transformers
test_feature_extraction_common.py
16
19
https://github.com/huggingface/transformers.git
4
111
0
49
169
Python
{ "docstring": "This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if\n one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True.\n\n One can specify whether the videos are of the same resolution or not.\n ", "language": "en", "n_whitespaces": 60, "n_words": 51, "vocab_size": 30 }
def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(feature_extract_tester.batch_size): if equal_resolution: width = height = feature_extract_tester.max_resolution else: width, height = np.random.choice( np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2 ) video = prepare_video( feature_extract_tester=feature_extract_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs
45,468
186,372
191
certbot-apache/certbot_apache/_internal/configurator.py
38
18
def _verify_no_matching_http_header(self, ssl_vhost, header_substring): header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path) if header_path: # "Existing Header directive for virtualhost" pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.parser.aug.get(matc
Various clean-ups in certbot-apache. Use f-strings. (#9132) * Various clean-ups in certbot-apache. Use f-strings. * Smaller tweaks
_verify_no_matching_http_header
eeca208c8f57304590ac1af80b496e61021aaa45
certbot
configurator.py
17
9
https://github.com/certbot/certbot.git
4
79
0
32
130
Python
{ "docstring": "Checks to see if there is an existing Header directive that\n contains the string header_substring.\n\n :param ssl_vhost: vhost to check\n :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n :param header_substring: string that uniquely identifies a header.\n e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.\n :type str\n\n :returns: boolean\n :rtype: (bool)\n\n :raises errors.PluginEnhancementAlreadyPresent When header\n header_substring exists\n\n ", "language": "en", "n_whitespaces": 139, "n_words": 46, "vocab_size": 41 }
def _verify_no_matching_http_header(self, ssl_vhost, header_substring): header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path) if header_path: # "Existing Header directive for virtualhost" pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.parser.aug.get(match).lower()): raise errors.PluginEnhancementAlreadyPresent( "Existing %s header" % header_substring)
@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)
26,809
120,281
99
jax/_src/numpy/lax_numpy.py
67
30
def indices(dimensions, dtype=int32, sparse=False): dimensions = tuple( core.concrete_or_error(operator.index, d, "dimensions argument of jnp.indices") for d in dimensions) N = len(dimensions) output = [] s = dimensio
replace int with operator.index part2 This change align the behavior of `ravel_multi_index`, `split` and `indices` to their `numpy` counterparts. Also ensure size argument of `nonzero` should be integer. The changes with `*space` are only simplification
indices
667d63aa2d4fbf7c9da73aab0e24c5c4c33cb5ba
jax
lax_numpy.py
15
15
https://github.com/google/jax.git
6
141
1
50
237
Python
{ "docstring": "\\\nJax adds the optional `total_repeat_length` parameter which specifies the total\nnumber of repeat, and defaults to sum(repeats). It must be specified for repeat\nto be compilable. If `sum(repeats)` is larger than the specified\n`total_repeat_length` the remaining values will be discarded. In the case of\n`sum(repeats)` being smaller than the specified target length, the final value\nwill be repeated.\n", "language": "en", "n_whitespaces": 52, "n_words": 59, "vocab_size": 42 }
def indices(dimensions, dtype=int32, sparse=False): dimensions = tuple( core.concrete_or_error(operator.index, d, "dimensions argument of jnp.indices") for d in dimensions) N = len(dimensions) output = [] s = dimensions for i, dim in enumerate(dimensions): idx = lax.iota(dtype, dim) if sparse: s = (1,)*i + (dim,) + (1,)*(N - i - 1) output.append(lax.broadcast_in_dim(idx, s, (i,))) if sparse: return tuple(output) return stack(output, 0) if output else array([], dtype=dtype) _TOTAL_REPEAT_LENGTH_DOC = @_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)
73,887
251,914
27
test/mitmproxy/proxy/layers/test_tcp.py
15
8
def test_open_connection(tctx): assert Playbook(tcp.TCPLayer(tctx,
make it black!
test_open_connection
b3587b52b25077f68116b9852b041d33e7fc6601
mitmproxy
test_tcp.py
10
4
https://github.com/mitmproxy/mitmproxy.git
1
48
0
11
74
Python
{ "docstring": "\n If there is no server connection yet, establish one,\n because the server may send data first.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 15 }
def test_open_connection(tctx): assert Playbook(tcp.TCPLayer(tctx, True)) << OpenConnection(tctx.server) tctx.server.timestamp_start = 1624544785 assert Playbook(tcp.TCPLayer(tctx, True)) << None
77,903
264,900
74
netbox/dcim/api/serializers.py
20
11
def get_connected_endpoints(self, obj): endpoints = obj.connected_endpoints if endpoints: serializer = get_serializer_for_model(endpoints[0], prefix='Nested') context = {'request': self.context['request']}
Update connected_endpoint serializer field to support multiple objects
get_connected_endpoints
4c51dbba809b6b199a96da30f32f4dd3cd6ea6ed
netbox
serializers.py
12
6
https://github.com/netbox-community/netbox.git
2
56
0
18
92
Python
{ "docstring": "\n Return the appropriate serializer for the type of connected object.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
def get_connected_endpoints(self, obj): endpoints = obj.connected_endpoints if endpoints: serializer = get_serializer_for_model(endpoints[0], prefix='Nested') context = {'request': self.context['request']} return serializer(endpoints, many=True, context=context).data
41,930
176,485
60
networkx/algorithms/tree/tests/test_operations.py
25
11
def test_basic(self): trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] actual = nx.join(trees) expected = nx.full_rary_tree(2, 2**3 - 1)
Update black (#5438) * CI: sync up black dev requirements version with precommit * Run black Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com>
test_basic
f6755ffa00211b523c6c0bec5398bc6c3c43c8b1
networkx
test_operations.py
12
5
https://github.com/networkx/networkx.git
2
64
0
22
99
Python
{ "docstring": "Tests for joining multiple subtrees at a root node.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_basic(self): trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] actual = nx.join(trees) expected = nx.full_rary_tree(2, 2**3 - 1) assert nx.is_isomorphic(actual, expected)
51,805
206,949
251
tests/admin_changelist/tests.py
77
36
def test_result_list_html(self): new_parent = Parent.objects.create(name="parent") new_child = Child.objects.create(name="name", parent=new_parent) request = self.factory.get("/child/") request.user = self.superuser m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) cl.formset = None template = Template( "{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}
Refs #33476 -- Reformatted code with Black.
test_result_list_html
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
11
22
https://github.com/django/django.git
1
150
0
59
251
Python
{ "docstring": "\n Inclusion tag result_list generates a table when with default\n ModelAdmin settings.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
def test_result_list_html(self): new_parent = Parent.objects.create(name="parent") new_child = Child.objects.create(name="name", parent=new_parent) request = self.factory.get("/child/") request.user = self.superuser m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) cl.formset = None template = Template( "{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}" ) context = Context({"cl": cl, "opts": Child._meta}) table_output = template.render(context) link = reverse("admin:admin_changelist_child_change", args=(new_child.id,)) row_html = build_tbody_html( new_child.id, link, '<td class="field-parent nowrap">%s</td>' % new_parent ) self.assertNotEqual( table_output.find(row_html), -1, "Failed to find expected row element: %s" % table_output, )
16,376
75,179
176
wagtail/images/tests/test_admin_views.py
37
22
def test_delete_uploaded_image(self): # Send request response = self.client.post( reverse( "wagtailimages:delete_upload_multiple", args=(self.uploaded_image.id,) ) )
Reformat with black
test_delete_uploaded_image
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_admin_views.py
14
13
https://github.com/wagtail/wagtail.git
1
97
0
29
166
Python
{ "docstring": "\n This tests that a POST request to the delete view deletes the UploadedImage\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
def test_delete_uploaded_image(self): # Send request response = self.client.post( reverse( "wagtailimages:delete_upload_multiple", args=(self.uploaded_image.id,) ) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Make sure the image is deleted self.assertFalse( UploadedImage.objects.filter(id=self.uploaded_image.id).exists() ) # Check JSON response_json = json.loads(response.content.decode()) self.assertTrue(response_json["success"])
20,625
101,204
120
lib/align/aligned_face.py
30
12
def matrix(self) -> np.ndarray:
lib.align.aligned_face updates - Typing - Legacy support for pre-aligned faces - Coverage support for pre-aligned faces - Standardized retrieval of sub-crops
matrix
a2de4a97985dc62db3b140a924aeac2be733abf8
faceswap
aligned_face.py
12
11
https://github.com/deepfakes/faceswap.git
2
89
0
26
144
Python
{ "docstring": " :class:`numpy.ndarray`: The 3x2 transformation matrix for extracting and aligning the\n core face area out of the original frame, with no padding or sizing applied. The returned\n matrix is offset for the given :attr:`centering`. ", "language": "en", "n_whitespaces": 48, "n_words": 33, "vocab_size": 28 }
def matrix(self) -> np.ndarray: if not np.any(self._matrices[self._centering]): matrix = self._matrices["legacy"].copy() matrix[:, 2] -= self.pose.offset[self._centering] self._matrices[self._centering] = matrix logger.trace("original matrix: %s, new matrix: %s", # type: ignore self._matrices["legacy"], matrix) return self._matrices[self._centering]
78,755
267,137
364
lib/ansible/parsing/plugin_docs.py
100
21
def read_docstub(filename): in_documentation = False capturing = False indent_detection = '' doc_stub = [] with open(filename, 'r') as t_module_data: for line in t_module_data: if in_documentation:
expand ansible-doc coverage (#74963) * Expand ansible-doc to tests/filters and fix existing issues enable filter/test docs if in single file or companion yaml add docs for several filters/tests plugins allow .yml companion for docs for other plugins, must be colocated verify plugins are valid (not modules, cannot) fix 'per collection' filtering limit old style deprecation (_ prefix) to builtin/legacy start move to pathlib for saner path handling moved some funcitons, kept backwards compat shims with deprecation notice Co-authored-by: Abhijeet Kasurde <akasurde@redhat.com> Co-authored-by: Felix Fontein <felix@fontein.de> Co-authored-by: Sandra McCann <samccann@redhat.com>
read_docstub
b439e41a915ccec0ccbabecc966919ea406db74e
ansible
plugin_docs.py
23
21
https://github.com/ansible/ansible.git
11
162
0
63
287
Python
{ "docstring": "\n Quickly find short_description using string methods instead of node parsing.\n This does not return a full set of documentation strings and is intended for\n operations like ansible-doc -l.\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 27 }
def read_docstub(filename): in_documentation = False capturing = False indent_detection = '' doc_stub = [] with open(filename, 'r') as t_module_data: for line in t_module_data: if in_documentation: # start capturing the stub until indentation returns if capturing and line.startswith(indent_detection): doc_stub.append(line) elif capturing and not line.startswith(indent_detection): break elif line.lstrip().startswith('short_description:'): capturing = True # Detect that the short_description continues on the next line if it's indented more # than short_description itself. indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1) doc_stub.append(line) elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line): in_documentation = True short_description = r''.join(doc_stub).strip().rstrip('.') data = AnsibleLoader(short_description, file_name=filename).get_single_data() return data
25,535
115,740
71
mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py
27
12
def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == R
lw handler tests
test_02_train_predictor
91e73cdd2402a12373379b85ef1934d8ecfa364e
mindsdb
test_lightwood_handler.py
9
8
https://github.com/mindsdb/mindsdb.git
1
31
0
14
66
Python
{ "docstring": "\n CREATE PREDICTOR {self.test_model_name_1}\n FROM {PG_HANDLER_NAME} (SELECT * FROM demo_data.home_rentals limit 50)\n PREDICT rental_price\n ", "language": "en", "n_whitespaces": 54, "n_words": 13, "vocab_size": 12 }
def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) # def test_03_retrain_predictor(self): # query = f"RETRAIN {self.test_model_name_1}" # response = self.handler.native_query(query) # self.assertTrue(response.type == RESPONSE_TYPE.OK)
36,541
156,079
68
dask/core.py
32
9
def get_dependencies(dsk, key=None, task=no_default, as_list=False): if key is not None: arg = dsk[key] elif task is not no_default: arg = task else: raise ValueError("Provide either key or task") return keys_in_tasks(dsk, [arg], as_list=as_list)
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
get_dependencies
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
core.py
11
8
https://github.com/dask/dask.git
3
59
0
26
92
Python
{ "docstring": "Get the immediate tasks on which this task depends\n\n Examples\n --------\n >>> inc = lambda x: x + 1\n >>> add = lambda x, y: x + y\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> get_dependencies(dsk, 'x')\n set()\n\n >>> get_dependencies(dsk, 'y')\n {'x'}\n\n >>> get_dependencies(dsk, 'z') # doctest: +SKIP\n {'x', 'y'}\n\n >>> get_dependencies(dsk, 'w') # Only direct dependencies\n {'z'}\n\n >>> get_dependencies(dsk, 'a') # Ignore non-keys\n {'x'}\n\n >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly\n {'x'}\n ", "language": "en", "n_whitespaces": 190, "n_words": 92, "vocab_size": 61 }
def get_dependencies(dsk, key=None, task=no_default, as_list=False): if key is not None: arg = dsk[key] elif task is not no_default: arg = task else: raise ValueError("Provide either key or task") return keys_in_tasks(dsk, [arg], as_list=as_list)
33,589
146,016
87
python/ray/ml/tests/test_checkpoints.py
24
13
def test_dict_checkpoint_fs(self): checkpoint = self._prepare_dict_checkpoint() # Convert into fs c
[ml] Add Ray ML / AIR checkpoint implementation (#22691) This PR splits up the changes in #22393 and introduces an implementation of the ML Checkpoint interface used by Ray Tune. This means, the TuneCheckpoint class implements the to/from_[bytes|dict|directory|object_ref|uri] conversion functions, as well as more high-level functions to transition between the different TuneCheckpoint classes. It also includes test cases for Tune's main conversion modes, i.e. dict - intermediate - dict and fs - intermediate - fs. These changes will be the basis for refactoring the tune interface to use TuneCheckpoint objects instead of TrialCheckpoints (externally) and instead of paths/objects (internally).
test_dict_checkpoint_fs
b267be475863a66e9feedb2be5f0a30a2ed8c493
ray
test_checkpoints.py
8
7
https://github.com/ray-project/ray.git
1
50
0
18
87
Python
{ "docstring": "Test conversion from dict to FS checkpoint and back.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_dict_checkpoint_fs(self): checkpoint = self._prepare_dict_checkpoint() # Convert into fs checkpoint path = checkpoint.to_directory() self.assertIsInstance(path, str) # Create from path checkpoint = Checkpoint.from_directory(path) self.assertTrue(checkpoint._local_path) self._assert_dict_checkpoint(checkpoint)