ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23,469 | 109,189 | 201 | lib/matplotlib/tri/triinterpolate.py | 89 | 20 | def _roll_vectorized(M, roll_indices, axis):
assert axis in [0, 1]
ndim = M.ndim
assert ndim == 3
ndim_roll = roll_indices.ndim
assert ndim_roll == 1
sh = M.shape
r, c = sh[-2:]
assert sh[0] == roll_indices.shape[0]
vec_indices = np.arange(sh[0], dtype=np.int32)
# Builds the rolled matrix
M_roll = np.empty_like(M)
if axis == 0:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
else: # 1
for ir in range(r) | Clean up code in tri | _roll_vectorized | c739787b88f6bf36f5ac5603b84773287bcd98b7 | matplotlib | triinterpolate.py | 18 | 20 | https://github.com/matplotlib/matplotlib.git | 6 | 177 | 0 | 51 | 266 | Python | {
"docstring": "\n Roll an array of matrices along *axis* (0: rows, 1: columns) according to\n an array of indices *roll_indices*.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 18,
"vocab_size": 15
} | def _roll_vectorized(M, roll_indices, axis):
assert axis in [0, 1]
ndim = M.ndim
assert ndim == 3
ndim_roll = roll_indices.ndim
assert ndim_roll == 1
sh = M.shape
r, c = sh[-2:]
assert sh[0] == roll_indices.shape[0]
vec_indices = np.arange(sh[0], dtype=np.int32)
# Builds the rolled matrix
M_roll = np.empty_like(M)
if axis == 0:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
else: # 1
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
return M_roll
|
|
999 | 6,463 | 256 | tests/integration_tests/test_visualization_api.py | 44 | 25 | def test_calibration_1_vs_all_vis_api(experiment_to_use):
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = os.path.join(tmpvizdir, f"*.{viz_output}")
visualize.calibration_1_vs_all(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[6],
labels_limit=0,
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format= | Added lightweight preprocessor for categorical features (#1761)
* Added lightweight preprocessor for categorical features
* Fix visualization tests.
* Get the number of classes from metadata instead of figuring it out on the fly from ground truth labels.
Co-authored-by: Justin Zhao <justinxzhao@gmail.com> | test_calibration_1_vs_all_vis_api | f277e3bff51842c93f99187605dfaf19b5790b29 | ludwig | test_visualization_api.py | 14 | 20 | https://github.com/ludwig-ai/ludwig.git | 2 | 110 | 0 | 40 | 178 | Python | {
"docstring": "Ensure pdf and png figures can be saved via visualization API call.\n\n :param experiment_to_use: Object containing trained model and results to\n test visualization\n :return: None\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 25,
"vocab_size": 23
} | def test_calibration_1_vs_all_vis_api(experiment_to_use):
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = os.path.join(tmpvizdir, f"*.{viz_output}")
visualize.calibration_1_vs_all(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[6],
labels_limit=0,
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 7 == len(figure_cnt)
|
|
71,968 | 247,872 | 800 | tests/rest/admin/test_server_notice.py | 240 | 42 | def test_send_server_notice_delete_room(self) -> None:
# user has no room memberships
self._check_invite_and_join_status(self.other_user, 0, 0)
# send first message
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={
"user_id": self.other_user,
"content": {"msgtype": "m.text", "body": "test msg one"},
},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
first_room_id = invited_rooms[0].room_id
# user joins the room and is member now
self.helper.join(
room=first_room_id, user=self.other_user, tok=self.other_user_token
)
self._check_invite_and_join_status(self.other_user, 0, 1)
# get messages
messages = self._sync_and_get_messages(first_room_id, self.other_user_token)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]["content"]["body"], "test msg one")
self.assertEqual(messages[0]["sender"], "@notices:test")
# shut down and purge room
self.get_success(
self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user)
)
self.get_success(self.pagination_handler.purge_room(first_room_id))
# user is not member anymore
self._check_invite_and_join_status(self.other_user, 0, 0)
# It doesn't really matter what API we use here, we just want to assert
# that the room doesn't exist.
summary = self.get_success(self.store.get_room_summary(first_room_id))
# The summary should be empty since the room doesn't exist.
self.assertEqual(summary, {})
# invalidate cache of server notices room_ids
# if server tries to send to a cached room_id it gives an error
self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
# send second message
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={
"user_id": self.other_user,
"content": {"msgtype": "m.text", "body": "test msg two"},
},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
| Remove redundant `get_success` calls in test code (#12346)
There are a bunch of places we call get_success on an immediate value, which is unnecessary. Let's rip them out, and remove the redundant functionality in get_success and friends. | test_send_server_notice_delete_room | 33ebee47e4e96a2b6fdf72091769e59034dc550f | synapse | test_server_notice.py | 14 | 55 | https://github.com/matrix-org/synapse.git | 1 | 443 | 0 | 122 | 715 | Python | {
"docstring": "\n Tests that the user get server notice in a new room\n after the first server notice room was deleted.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 15
} | def test_send_server_notice_delete_room(self) -> None:
# user has no room memberships
self._check_invite_and_join_status(self.other_user, 0, 0)
# send first message
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={
"user_id": self.other_user,
"content": {"msgtype": "m.text", "body": "test msg one"},
},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
first_room_id = invited_rooms[0].room_id
# user joins the room and is member now
self.helper.join(
room=first_room_id, user=self.other_user, tok=self.other_user_token
)
self._check_invite_and_join_status(self.other_user, 0, 1)
# get messages
messages = self._sync_and_get_messages(first_room_id, self.other_user_token)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]["content"]["body"], "test msg one")
self.assertEqual(messages[0]["sender"], "@notices:test")
# shut down and purge room
self.get_success(
self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user)
)
self.get_success(self.pagination_handler.purge_room(first_room_id))
# user is not member anymore
self._check_invite_and_join_status(self.other_user, 0, 0)
# It doesn't really matter what API we use here, we just want to assert
# that the room doesn't exist.
summary = self.get_success(self.store.get_room_summary(first_room_id))
# The summary should be empty since the room doesn't exist.
self.assertEqual(summary, {})
# invalidate cache of server notices room_ids
# if server tries to send to a cached room_id it gives an error
self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
# send second message
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={
"user_id": self.other_user,
"content": {"msgtype": "m.text", "body": "test msg two"},
},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
second_room_id = invited_rooms[0].room_id
# user joins the room and is member now
self.helper.join(
room=second_room_id, user=self.other_user, tok=self.other_user_token
)
self._check_invite_and_join_status(self.other_user, 0, 1)
# get message
messages = self._sync_and_get_messages(second_room_id, self.other_user_token)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]["content"]["body"], "test msg two")
self.assertEqual(messages[0]["sender"], "@notices:test")
# second room has new ID
self.assertNotEqual(first_room_id, second_room_id)
|
|
47,232 | 195,244 | 46 | projects/bb3/agents/utils.py | 14 | 6 | def is_request_failed_response(resp):
return len(
resp.get('failures', [])
) > 0 or APIUtils.METASEQ_FAIL_MESSAGE_TEXT in resp.get('text', '')
| Patch 8322 (#4709)
* add dafetymix teacher
* safety_mix teacher
* safety_mix teacher pos and neg teachers
* add tests for teacher
* add license info
* improvement
* add task list
* add task list and lint
* add init.py
* adding some patch to director
* seeker changes
* th
* 3
* jing
* changes
* z and r
* remove .opts
* fix docs
* add contrractions
* lint
Co-authored-by: Dexter Ju <da.ju.fr@gmail.com>
Co-authored-by: Jing Xu <jingxu23@fb.com> | is_request_failed_response | b1acb681207559da56a787ba96e16f0e23697d92 | ParlAI | utils.py | 12 | 4 | https://github.com/facebookresearch/ParlAI.git | 2 | 34 | 0 | 14 | 61 | Python | {
"docstring": "\n Whether the requests to Metaseq worker have failed.\n\n It checks this based on the existences of the failure reasons as they get\n accumulated in `_make_request` functionn calls.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 27,
"vocab_size": 25
} | def is_request_failed_response(resp):
return len(
resp.get('failures', [])
) > 0 or APIUtils.METASEQ_FAIL_MESSAGE_TEXT in resp.get('text', '')
|
|
23,337 | 108,776 | 96 | lib/matplotlib/patches.py | 36 | 9 | def draw(self, renderer):
if not self.get_visibl | Remove ineffective exclusion of Arcs without parent Axes.
The `if not hasattr(self, 'axes'): raise RuntimeError(...)` check was
ineffectual, as artists now always have an Axes attribute, which can
just be None for some artists. In fact, small Arcs are drawn just fine
without a parent Axes; e.g.
```
from pylab import *
from matplotlib.patches import *
fig = figure()
fig.add_artist(Ellipse((.2, .2), .1, .3, angle=45)) # for comparison
fig.add_artist(Arc((.2, .2), .1, .3, angle=45, theta1=0, theta2=45))
```
works just fine. Remove the check, and adjust the docs accordingly.
On the other hand, large arcs *did* previously fail,
but that occurred a bit further down, when computing
`transforms.BboxTransformTo(self.axes.bbox)` (`self.axes` is None -->
AttributeError). Fix that by using the figure bbox in that case (as the
point is to limit the drawing to the unclipped area, which is the whole
figure for Arcs without a parent Axes). | draw | cf995d1304bfa7f660e7158b5121a46e54f869f2 | matplotlib | patches.py | 9 | 50 | https://github.com/matplotlib/matplotlib.git | 11 | 404 | 0 | 31 | 74 | Python | {
"docstring": "\n Draw the arc to the given *renderer*.\n\n Notes\n -----\n Ellipses are normally drawn using an approximation that uses\n eight cubic Bezier splines. The error of this approximation\n is 1.89818e-6, according to this unverified source:\n\n Lancaster, Don. *Approximating a Circle or an Ellipse Using\n Four Bezier Cubic Splines.*\n\n https://www.tinaja.com/glib/ellipse4.pdf\n\n There is a use case where very large ellipses must be drawn\n with very high accuracy, and it is too expensive to render the\n entire ellipse with enough segments (either splines or line\n segments). Therefore, in the case where either radius of the\n ellipse is large enough that the error of the spline\n approximation will be visible (greater than one pixel offset\n from the ideal), a different technique is used.\n\n In that case, only the visible parts of the ellipse are drawn,\n with each visible arc using a fixed number of spline segments\n (8). The algorithm proceeds as follows:\n\n 1. The points where the ellipse intersects the axes (or figure)\n bounding box are located. (This is done by performing an inverse\n transformation on the bbox such that it is relative to the unit\n circle -- this makes the intersection calculation much easier than\n doing rotated ellipse intersection directly.)\n\n This uses the \"line intersecting a circle\" algorithm from:\n\n Vince, John. *Geometry for Computer Graphics: Formulae,\n Examples & Proofs.* London: Springer-Verlag, 2005.\n\n 2. The angles of each of the intersection points are calculated.\n\n 3. Proceeding counterclockwise starting in the positive\n x-direction, each of the visible arc-segments between the\n pairs of vertices are drawn using the Bezier arc\n approximation technique implemented in `.Path.arc`.\n ",
"language": "en",
"n_whitespaces": 541,
"n_words": 258,
"vocab_size": 160
} | def draw(self, renderer):
if not self.get_visible():
return
self._recompute_transform()
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
# If the width and height of ellipse are not equal, take into account
# stretching when calculating angles to draw between |
|
76,944 | 261,701 | 519 | sklearn/utils/_param_validation.py | 160 | 18 | def validate_parameter_constraints(parameter_constraints, params, caller_name):
for param_name, param_val in params.items():
# We allow parameters to not have a constraint so that third party estimators
# can inherit from sklearn estimators without having to necessarily use the
# validation tools.
if param_name not in parameter_constraints:
continue
constraints = parameter_constraints[param_name]
if constraints == "no_validation":
continue
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
if constraint.is_satisfied_by(param_val):
# this constraint is satisfied, no need to check further.
break
else:
| MAINT Make param validation more lenient towards downstream dependencies (#25088) | validate_parameter_constraints | 5e25f8e06dcf853d4079dbefa40e3e9558a1d976 | scikit-learn | _param_validation.py | 25 | 32 | https://github.com/scikit-learn/scikit-learn.git | 11 | 137 | 0 | 105 | 261 | Python | {
"docstring": "Validate types and values of given parameters.\n\n Parameters\n ----------\n parameter_constraints : dict or {\"no_validation\"}\n If \"no_validation\", validation is skipped for this parameter.\n\n If a dict, it must be a dictionary `param_name: list of constraints`.\n A parameter is valid if it satisfies one of the constraints from the list.\n Constraints can be:\n - an Interval object, representing a continuous or discrete range of numbers\n - the string \"array-like\"\n - the string \"sparse matrix\"\n - the string \"random_state\"\n - callable\n - None, meaning that None is a valid value for the parameter\n - any type, meaning that any instance of this type is valid\n - an Options object, representing a set of elements of a given type\n - a StrOptions object, representing a set of strings\n - the string \"boolean\"\n - the string \"verbose\"\n - the string \"cv_object\"\n - the string \"missing_values\"\n - a HasMethods object, representing method(s) an object must have\n - a Hidden object, representing a constraint not meant to be exposed to the user\n\n params : dict\n A dictionary `param_name: param_value`. The parameters to validate against the\n constraints.\n\n caller_name : str\n The name of the estimator or function or method that called this function.\n ",
"language": "en",
"n_whitespaces": 367,
"n_words": 195,
"vocab_size": 103
} | def validate_parameter_constraints(parameter_constraints, params, caller_name):
for param_name, param_val in params.items():
# We allow parameters to not have a constraint so that third party estimators
# can inherit from sklearn estimators without having to necessarily use the
# validation tools.
if param_name not in parameter_constraints:
continue
constraints = parameter_constraints[param_name]
if constraints == "no_validation":
continue
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
if constraint.is_satisfied_by(param_val):
# this constraint is satisfied, no need to check further.
break
else:
# No constraint is satisfied, raise with an informative message.
# Ignore constraints that we don't want to expose in the error message,
# i.e. options that are for internal purpose or not officially supported.
constraints = [
constraint for constraint in constraints if not constraint.hidden
]
if len(constraints) == 1:
constraints_str = f"{constraints[0]}"
else:
constraints_str = (
f"{', '.join([str(c) for c in constraints[:-1]])} or"
f" {constraints[-1]}"
)
raise ValueError(
f"The {param_name!r} parameter of {caller_name} must be"
f" {constraints_str}. Got {param_val!r} instead."
)
|
|
77,461 | 263,844 | 162 | PyInstaller/building/build_main.py | 92 | 13 | def _get_module_collection_mode(mode_dict, name):
mode = 'pyc' # Default mode
# No settings available - return default.
if not mode_dict:
return mode
# Search the parent modules/pa | building & hooks: implement module collection mode setting
Implement a mechanism for controlling the collection mode of
modules and packages, with granularity ranging from top-level
packages to individual sub-modules. Therefore, the hooks can
now specify whether the hooked package should be collected as
byte-compiled .pyc modules into embedded PYZ archive (the
default behavior), or as source .py files collected as external
data files (without corresponding modules in the PYZ archive).
The latter option should let us avoid unnecessary .pyc module
collection when the source files are required by the code, or
work around the situations where having a .pyc module in
PYZ archive causes issues due to FrozenImporter's incompatibility
with sys.path manipulation that some packages attempt to perform.
This feature adds a new optional global hook variable, called
`module_collection_mode`. The value can be either a string
("py" or "pyc") or a dictionary of module names and setting
strings.
In the case of a string, the setting affects the hooked module
or a package, and is applied recursively to all sub-packages and
sub-modules, unless another hook overrides it.
The dictionary setting allows a hook to specify different settings
for the package and it subpackages, or even different settings
for other packages.
A corresponding `set_module_collection_mode` method has been
added to the `hook_api` object for adjusting the collection
mode from within the `hook()` function.
The `Analysis` object can now also be passed a dictionary via
an optional `module_collection_mode` argument; the corresponding
settings are applied last, which allows advanced users to both
supplement and override the settings made by the hooks. | _get_module_collection_mode | 5b2ab7067ba954bd7950a79ed31e5ee177ff3f43 | pyinstaller | build_main.py | 13 | 11 | https://github.com/pyinstaller/pyinstaller.git | 4 | 71 | 0 | 64 | 125 | Python | {
"docstring": "\n Determine the module/package collection mode for the given module name , based on the provided collection\n mode settings dictionary.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 19,
"vocab_size": 15
} | def _get_module_collection_mode(mode_dict, name):
mode = 'pyc' # Default mode
# No settings available - return default.
if not mode_dict:
return mode
# Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that
# a setting given for the top-level package is recursively propagated to all its subpackages and submodules,
# but also allows individual sub-modules to override the setting again.
name_parts = name.split('.')
for i in range(len(name_parts)):
modlevel = ".".join(name_parts[:i + 1])
modlevel_mode = mode_dict.get(modlevel, None)
if modlevel_mode is not None:
mode = modlevel_mode
return mode
|
|
81,781 | 276,928 | 46 | keras/utils/kernelized_utils.py | 25 | 13 | def exact_gaussian_kernel(x, y, stddev):
r
x_aligned, y_aligned = _align_matrices(x, y) | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | exact_gaussian_kernel | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | kernelized_utils.py | 11 | 29 | https://github.com/keras-team/keras.git | 1 | 56 | 0 | 23 | 85 | Python | {
"docstring": "Computes exact Gaussian kernel value(s) for tensors x and y and stddev.\n\n The Gaussian kernel for vectors u, v is defined as follows:\n K(u, v) = exp(-||u-v||^2 / (2* stddev^2))\n where the norm is the l2-norm. x, y can be either vectors or matrices. If they\n are vectors, they must have the same dimension. If they are matrices, they\n must have the same number of columns. In the latter case, the method returns\n (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and\n v is a row from y.\n\n Args:\n x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].\n y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].\n stddev: The width of the Gaussian kernel.\n\n Returns:\n A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix\n of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for\n all (u,v) pairs where u, v are rows from x and y respectively.\n\n Raises:\n ValueError: if the shapes of x, y are not compatible.\n ",
"language": "en",
"n_whitespaces": 273,
"n_words": 196,
"vocab_size": 107
} | def exact_gaussian_kernel(x, y, stddev):
r
x_aligned, y_aligned = _align_matrices(x, y)
diff_squared_l2_norm = tf.reduce_sum(
tf.math.squared_difference(x_aligned, y_aligned), 2
)
return tf.exp(-diff_squared_l2_norm / (2 * stddev * stddev))
|
|
78,221 | 265,867 | 190 | netbox/extras/models/models.py | 42 | 27 | def enqueue_job(cls, func, name, obj_type, user, schedule_at=None, *args, **kwargs):
job_result: JobResult = cls.objects.create(
name=name,
obj_type=obj_type,
user=user,
job_id=uuid.uuid4()
)
queue = django_rq.get_queue("default")
if schedule_at:
job_result.status = JobResultStatusChoices.STATUS_SCHEDULED
job_result.scheduled_time = schedule_at
job_result.save()
queue.enqueue_at(schedule_at, func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)
else:
queue.enqueue(func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)
return job_result
| Job scheduling review changes | enqueue_job | ed2f7f12369fe0b8f2dc2a5910840c928126a1b8 | netbox | models.py | 14 | 16 | https://github.com/netbox-community/netbox.git | 2 | 132 | 0 | 35 | 199 | Python | {
"docstring": "\n Create a JobResult instance and enqueue a job using the given callable\n\n func: The callable object to be enqueued for execution\n name: Name for the JobResult instance\n obj_type: ContentType to link to the JobResult instance obj_type\n user: User object to link to the JobResult instance\n schedule_at: Schedule the job to be executed at the passed date and time\n args: additional args passed to the callable\n kwargs: additional kargs passed to the callable\n ",
"language": "en",
"n_whitespaces": 136,
"n_words": 72,
"vocab_size": 39
} | def enqueue_job(cls, func, name, obj_type, user, schedule_at=None, *args, **kwargs):
job_result: JobResult = cls.objects.create(
name=name,
obj_type=obj_type,
user=user,
job_id=uuid.uuid4()
)
queue = django_rq.get_queue("default")
if schedule_at:
job_result.status = JobResultStatusChoices.STATUS_SCHEDULED
job_result.scheduled_time = schedule_at
job_result.save()
queue.enqueue_at(schedule_at, func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)
else:
queue.enqueue(func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)
return job_result
|
|
44,160 | 183,357 | 73 | src/textual/dom.py | 27 | 8 | def text_style(self) -> Style:
# TODO: Feels like there may be opportunity for caching here.
style = Style()
for node in reversed(self.an | auto sizing | text_style | efd4273a4ca8282b677e43f4732013e60926753b | textual | dom.py | 10 | 14 | https://github.com/Textualize/textual.git | 2 | 32 | 0 | 24 | 55 | Python | {
"docstring": "Get the text style object.\n\n A widget's style is influenced by its parent. For instance if a widgets background has an alpha,\n then its parent's background color will show through. Additionally, widgets will inherit their\n parent's text style (i.e. bold, italic etc).\n\n Returns:\n Style: Rich Style object.\n ",
"language": "en",
"n_whitespaces": 93,
"n_words": 47,
"vocab_size": 38
} | def text_style(self) -> Style:
# TODO: Feels like there may be opportunity for caching here.
style = Style()
for node in reversed(self.ancestors):
style += node.styles.text_style
return style
|
|
50,401 | 203,482 | 94 | django/contrib/admin/sites.py | 34 | 13 | def get_app_list(self, request):
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically. | Refs #33476 -- Reformatted code with Black. | get_app_list | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | sites.py | 14 | 6 | https://github.com/django/django.git | 2 | 64 | 0 | 28 | 110 | Python | {
"docstring": "\n Return a sorted list of all the installed apps that have been\n registered in this site.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 16
} | def get_app_list(self, request):
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x["name"].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app["models"].sort(key=lambda x: x["name"])
return app_list
|
|
56,635 | 222,558 | 431 | python3.10.4/Lib/distutils/ccompiler.py | 241 | 6 | def find_library_file (self, dirs, lib, debug=0):
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extensio | add python 3.10.4 for windows | find_library_file | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | ccompiler.py | 6 | 2 | https://github.com/XX-net/XX-Net.git | 1 | 16 | 0 | 128 | 57 | Python | {
"docstring": "Search the specified list of directories for a static or shared\n library file 'lib' and return the full path to that file. If\n 'debug' true, look for a debugging version (if that makes sense on\n the current platform). Return None if 'lib' wasn't found in any of\n the specified directories.\n ",
"language": "en",
"n_whitespaces": 87,
"n_words": 50,
"vocab_size": 41
} | def find_library_file (self, dirs, lib, debug=0):
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
|
|
71,988 | 247,900 | 200 | tests/storage/databases/main/test_lock.py | 74 | 13 | def test_simple_lock(self):
# First to acquire this lock, so it should complete
lock = self.get_success(self.store.try_acquire_lock("name", "key"))
assert lock is not None
# Enter the context manager
self.get_success(lock.__aenter__())
# Attempting to acquire the lock again fails.
lock2 = self.get_success(self.store.try_acquire_lock("name", "key"))
self.assertIsNone(lock2)
# Calling `is_still_valid` reports true.
self.assertTrue(self.get_success(lock.is_still_valid()))
# Drop the lock
self.get_success(lock.__aexit__(None, None, None))
# We can now acquire the lock again.
lock3 = self.get_success(self.store.try_acquire_lock("name", "key"))
assert lock3 is not None
self.get_success(lock3.__aenter__())
self.get_success(lock3.__aexit__(None, None, None))
| Add type hints for `tests/unittest.py`. (#12347)
In particular, add type hints for get_success and friends, which are then helpful in a bunch of places. | test_simple_lock | f0b03186d96305fd44d74a89bf4230beec0c5c31 | synapse | test_lock.py | 11 | 12 | https://github.com/matrix-org/synapse.git | 1 | 138 | 0 | 46 | 236 | Python | {
"docstring": "Test that we can take out a lock and that while we hold it nobody\n else can take it out.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 20,
"vocab_size": 15
} | def test_simple_lock(self):
# First to acquire this lock, so it should complete
lock = self.get_success(self.store.try_acquire_lock("name", "key"))
assert lock is not None
# Enter the context manager
self.get_success(lock.__aenter__())
# Attempting to acquire the lock again fails.
lock2 = self.get_success(self.store.try_acquire_lock("name", "key"))
self.assertIsNone(lock2)
# Calling `is_still_valid` reports true.
self.assertTrue(self.get_success(lock.is_still_valid()))
# Drop the lock
self.get_success(lock.__aexit__(None, None, None))
# We can now acquire the lock again.
lock3 = self.get_success(self.store.try_acquire_lock("name", "key"))
assert lock3 is not None
self.get_success(lock3.__aenter__())
self.get_success(lock3.__aexit__(None, None, None))
|
|
23,536 | 109,341 | 69 | lib/matplotlib/font_manager.py | 19 | 8 | def set_family(self, family):
if family is None:
family | Get rcParams from mpl | set_family | 438d30b227b1fef7e8733578f851e76a8e360f24 | matplotlib | font_manager.py | 10 | 6 | https://github.com/matplotlib/matplotlib.git | 3 | 39 | 0 | 13 | 65 | Python | {
"docstring": "\n Change the font family. May be either an alias (generic name\n is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',\n 'fantasy', or 'monospace', a real font name or a list of real\n font names. Real font names are not supported when\n :rc:`text.usetex` is `True`. Default: :rc:`font.family`\n ",
"language": "en",
"n_whitespaces": 90,
"n_words": 45,
"vocab_size": 37
} | def set_family(self, family):
if family is None:
family = mpl.rcParams['font.family']
if isinstance(family, str):
family = [family]
self._family = family
|
|
24,992 | 113,656 | 26 | nni/compression/pytorch/quantization/qat_quantizer.py | 17 | 4 | def update_ema(biased_ema, value, decay):
| [Compression] remove pruning v1 & refactor directory (#5228) | update_ema | d68c786ff81bad19c04619d6a999ff34aaa724e7 | nni | qat_quantizer.py | 10 | 3 | https://github.com/microsoft/nni.git | 1 | 25 | 0 | 14 | 40 | Python | {
"docstring": "\n calculate biased stat and unbiased stat in each step using exponential moving average method\n\n Parameters\n ----------\n biased_ema : float\n previous stat value\n value : float\n current stat value\n decay : float\n the weight of previous stat value, larger means smoother curve\n\n Returns\n -------\n float, float\n ",
"language": "en",
"n_whitespaces": 97,
"n_words": 45,
"vocab_size": 33
} | def update_ema(biased_ema, value, decay):
biased_ema = biased_ema * decay + (1 - decay) * value
return biased_ema
|
|
47,456 | 195,869 | 318 | sympy/ntheory/partitions_.py | 158 | 38 | def npartitions(n, verbose=False):
n = int(n)
if n < 0:
return 0
if n <= 5:
return [1, 1, 2, 3, 5, 7][n]
if '_factor' not in globals():
_pre()
# Estimate number of bits in p(n). This formula could be tidied
pbits = int((
math.pi*(2*n/3.)**0.5 -
math.log(4*n))/math.log(10) + 1) * \
math.log(10, 2)
prec = p = int(pbits*1.1 + 100)
s = fzero
M = max(6, int(0.24*n**0.5 + 4))
if M > 10**5:
raise ValueError("Input too big") # Corresponds to n > 1.7e11
sq23pi = mpf_mul(mpf_sqrt(from_rational(2, 3, p), p), mpf_pi(p), p)
sqrt8 = mpf_sqrt(from_int(8), p)
for q in range(1, M):
a = _a(n, q, p)
d = _d(n, q, p, sq23pi, sqrt8)
s = mpf_add(s, mpf_mul(a, d), prec)
if verbose:
print("step", q, "of", M, to_str(a, 10), to_str(d, 10))
# On average, the terms decrease rapidly in magnitude.
# Dynamically reducing the precision greatly improves
# performance.
p = bitcount(abs(to_int(d))) + 50
return int(to_int(mpf_add(s, fhalf, prec)))
_ | Improved documentation formatting | npartitions | cda8dfe6f45dc5ed394c2f5cda706cd6c729f713 | sympy | partitions_.py | 18 | 27 | https://github.com/sympy/sympy.git | 7 | 298 | 0 | 116 | 450 | Python | {
"docstring": "\n Calculate the partition function P(n), i.e. the number of ways that\n n can be written as a sum of positive integers.\n\n P(n) is computed using the Hardy-Ramanujan-Rademacher formula [1]_.\n\n\n The correctness of this implementation has been tested through $10^10$.\n\n Examples\n ========\n\n >>> from sympy.ntheory import npartitions\n >>> npartitions(25)\n 1958\n\n References\n ==========\n\n .. [1] http://mathworld.wolfram.com/PartitionFunctionP.html\n\n ",
"language": "en",
"n_whitespaces": 94,
"n_words": 54,
"vocab_size": 49
} | def npartitions(n, verbose=False):
n = int(n)
if n < 0:
return 0
if n <= 5:
return [1, 1, 2, 3, 5, 7][n]
if '_factor' not in globals():
_pre()
# Estimate number of bits in p(n). This formula could be tidied
pbits = int((
math.pi*(2*n/3.)**0.5 -
math.log(4*n))/math.log(10) + 1) * \
math.log(10, 2)
prec = p = int(pbits*1.1 + 100)
s = fzero
M = max(6, int(0.24*n**0.5 + 4))
if M > 10**5:
raise ValueError("Input too big") # Corresponds to n > 1.7e11
sq23pi = mpf_mul(mpf_sqrt(from_rational(2, 3, p), p), mpf_pi(p), p)
sqrt8 = mpf_sqrt(from_int(8), p)
for q in range(1, M):
a = _a(n, q, p)
d = _d(n, q, p, sq23pi, sqrt8)
s = mpf_add(s, mpf_mul(a, d), prec)
if verbose:
print("step", q, "of", M, to_str(a, 10), to_str(d, 10))
# On average, the terms decrease rapidly in magnitude.
# Dynamically reducing the precision greatly improves
# performance.
p = bitcount(abs(to_int(d))) + 50
return int(to_int(mpf_add(s, fhalf, prec)))
__all__ = ['npartitions']
|
|
79,642 | 268,742 | 45 | test/lib/ansible_test/_internal/host_profiles.py | 17 | 13 | def build_sleep_command(self) -> list[str]:
doc | ansible-test - Improve container management. (#78550)
See changelogs/fragments/ansible-test-container-management.yml for details. | build_sleep_command | cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc | ansible | host_profiles.py | 11 | 15 | https://github.com/ansible/ansible.git | 1 | 46 | 0 | 16 | 90 | Python | {
"docstring": "\n Build and return the command to put the container to sleep.\n\n The sleep duration below was selected to:\n\n - Allow enough time to perform necessary operations in the container before waking it.\n - Make the delay obvious if the wake command doesn't run or succeed.\n - Avoid hanging indefinitely or for an unreasonably long time.\n\n NOTE: The container must have a POSIX-compliant default shell \"sh\" with a non-builtin \"sleep\" command.\n ",
"language": "en",
"n_whitespaces": 126,
"n_words": 70,
"vocab_size": 56
} | def build_sleep_command(self) -> list[str]:
docker_pull(self.args, self.config.image)
inspect = docker_image_inspect(self.args, self.config.image)
return ['sh', '-c', f'sleep 60; exec {shlex.join(inspect.cmd)}']
|
|
30,874 | 136,321 | 456 | rllib/evaluation/tests/test_envs_that_crash.py | 94 | 23 | def test_crash_only_one_worker_during_sampling_but_ignore(self):
config = (
pg.PGConfig()
.rollouts(
num_rollout_workers=2,
num_envs_per_worker=3,
# Ignore worker failures (continue with worker #2).
ignore_worker_failures=True,
)
.environment(
env=CartPoleCrashing,
env_config={
# Crash prob=80%.
"p_crash": 0.8,
# Only crash on worker with index 1.
"crash_on_worker_indices": [1],
# Make sure nothing happens during pre-checks.
"skip_env_checking": True,
},
)
.debugging(worker_cls=ForwardHealthCheckToEnvWorker)
)
# Pre-checking disables, so building the Algorithm is save.
algo = config.build()
# Expect some errors being logged here, but in gener | [RLlib] Fault tolerant and elastic WorkerSets used across RLlib's algorithms (for sampling and evaluation). (#30118) | test_crash_only_one_worker_during_sampling_but_ignore | 76cb42c578adf19a70a6b4401098a7a21e0d3b29 | ray | test_envs_that_crash.py | 15 | 22 | https://github.com/ray-project/ray.git | 1 | 98 | 0 | 78 | 164 | Python | {
"docstring": "Expect some sub-envs to fail (and not recover), but ignore.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_crash_only_one_worker_during_sampling_but_ignore(self):
config = (
pg.PGConfig()
.rollouts(
num_rollout_workers=2,
num_envs_per_worker=3,
# Ignore worker failures (continue with worker #2).
ignore_worker_failures=True,
)
.environment(
env=CartPoleCrashing,
env_config={
# Crash prob=80%.
"p_crash": 0.8,
# Only crash on worker with index 1.
"crash_on_worker_indices": [1],
# Make sure nothing happens during pre-checks.
"skip_env_checking": True,
},
)
.debugging(worker_cls=ForwardHealthCheckToEnvWorker)
)
# Pre-checking disables, so building the Algorithm is save.
algo = config.build()
# Expect some errors being logged here, but in general, should continue
# as we ignore worker failures.
algo.train()
# One worker has been removed -> Only one left.
self.assertEqual(algo.workers.num_healthy_remote_workers(), 1)
algo.stop()
|
|
75,660 | 259,225 | 77 | sklearn/preprocessing/tests/test_encoders.py | 45 | 17 | def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop):
X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 | ENH Adds infrequent categories to OneHotEncoder (#16018)
* ENH Completely adds infrequent categories
* STY Linting
* STY Linting
* DOC Improves wording
* DOC Lint
* BUG Fixes
* CLN Address comments
* CLN Address comments
* DOC Uses math to description float min_frequency
* DOC Adds comment regarding drop
* BUG Fixes method name
* DOC Clearer docstring
* TST Adds more tests
* FIX Fixes mege
* CLN More pythonic
* CLN Address comments
* STY Flake8
* CLN Address comments
* DOC Fix
* MRG
* WIP
* ENH Address comments
* STY Fix
* ENH Use functiion call instead of property
* ENH Adds counts feature
* CLN Rename variables
* DOC More details
* CLN Remove unneeded line
* CLN Less lines is less complicated
* CLN Less diffs
* CLN Improves readiabilty
* BUG Fix
* CLN Address comments
* TST Fix
* CLN Address comments
* CLN Address comments
* CLN Move docstring to userguide
* DOC Better wrapping
* TST Adds test to handle_unknown='error'
* ENH Spelling error in docstring
* BUG Fixes counter with nan values
* BUG Removes unneeded test
* BUG Fixes issue
* ENH Sync with main
* DOC Correct settings
* DOC Adds docstring
* DOC Immprove user guide
* DOC Move to 1.0
* DOC Update docs
* TST Remove test
* DOC Update docstring
* STY Linting
* DOC Address comments
* ENH Neater code
* DOC Update explaination for auto
* Update sklearn/preprocessing/_encoders.py
Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com>
* TST Uses docstring instead of comments
* TST Remove call to fit
* TST Spelling error
* ENH Adds support for drop + infrequent categories
* ENH Adds infrequent_if_exist option
* DOC Address comments for user guide
* DOC Address comments for whats_new
* DOC Update docstring based on comments
* CLN Update test with suggestions
* ENH Adds computed property infrequent_categories_
* DOC Adds where the infrequent column is located
* TST Adds more test for infrequent_categories_
* DOC Adds docstring for _compute_drop_idx
* CLN Moves _convert_to_infrequent_idx into its own method
* TST Increases test coverage
* TST Adds failing test
* CLN Careful consideration of dropped and inverse_transform
* STY Linting
* DOC Adds docstrinb about dropping infrequent
* DOC Uses only
* DOC Numpydoc
* TST Includes test for get_feature_names_out
* DOC Move whats new
* DOC Address docstring comments
* DOC Docstring changes
* TST Better comments
* TST Adds check for handle_unknown='ignore' for infrequent
* CLN Make _infrequent_indices private
* CLN Change min_frequency default to None
* DOC Adds comments
* ENH adds support for max_categories=1
* ENH Describe lexicon ordering for ties
* DOC Better docstring
* STY Fix
* CLN Error when explicity dropping an infrequent category
* STY Grammar
Co-authored-by: Joel Nothman <joel.nothman@gmail.com>
Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com>
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | test_ohe_infrequent_three_levels_drop_infrequent_errors | 7f0006c8aad1a09621ad19c3db19c3ff0555a183 | scikit-learn | test_encoders.py | 16 | 8 | https://github.com/scikit-learn/scikit-learn.git | 1 | 82 | 0 | 38 | 149 | Python | {
"docstring": "Test three levels and dropping the infrequent category.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop):
X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T
ohe = OneHotEncoder(
handle_unknown="infrequent_if_exist", sparse=False, max_categories=3, drop=drop
)
msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent"
with pytest.raises(ValueError, match=msg):
ohe.fit(X_train)
|
|
120,552 | 334,185 | 79 | utils/check_repo.py | 30 | 4 | def is_a_private_model(model):
if model in PRIVATE_MODELS:
| upload some cleaning tools | is_a_private_model | 95f4256fc905b6e29e5ea0f245dcf88f72a9ddd1 | diffusers | check_repo.py | 8 | 10 | https://github.com/huggingface/diffusers.git | 5 | 45 | 0 | 20 | 82 | Python | {
"docstring": "Returns True if the model should not be in the main init.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def is_a_private_model(model):
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
return False
|
|
@freeze_time("2019-01-10 10:00:00-08:00") | 93,329 | 294,292 | 78 | tests/components/tod/test_binary_sensor.py | 33 | 10 | async def test_midnight_turnover_before_midnight_outside_period(hass):
config = {
"binary_sensor": [
{"platform": "tod", "name": "Night", "after": "22:00", "before": "5:00"}
]
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
state = hass.states.get("bina | Update Times of the Day tests to use freezegun (#68327) | test_midnight_turnover_before_midnight_outside_period | 23a630e0bcbd2aec6a598a19ebaf2929eba97e5b | core | test_binary_sensor.py | 12 | 10 | https://github.com/home-assistant/core.git | 1 | 62 | 1 | 31 | 131 | Python | {
"docstring": "Test midnight turnover setting before midnight outside period.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 7
} | async def test_midnight_turnover_before_midnight_outside_period(hass):
config = {
"binary_sensor": [
{"platform": "tod", "name": "Night", "after": "22:00", "before": "5:00"}
]
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.night")
assert state.state == STATE_OFF
@freeze_time("2019-01-10 10:00:00-08:00") |
29,327 | 130,601 | 94 | python/ray/data/impl/block_list.py | 28 | 4 | def _check_if_cleared(self) -> None:
if self._blocks is None:
raise ValueError(
"This Dataset's blocks have been moved, which means that you "
"can no longer use this Dataset."
)
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _check_if_cleared | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | block_list.py | 11 | 7 | https://github.com/ray-project/ray.git | 2 | 21 | 0 | 27 | 41 | Python | {
"docstring": "Raise an error if this BlockList has been previously cleared.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _check_if_cleared(self) -> None:
if self._blocks is None:
raise ValueError(
"This Dataset's blocks have been moved, which means that you "
"can no longer use this Dataset."
)
|
|
71,954 | 247,834 | 155 | tests/rest/client/test_sync.py | 38 | 17 | def test_join_leave(self) -> None:
channel = self.make_request("GET", "/sync", access_token=self.tok)
self.assertEqual(channel.code, 200, channel.result)
self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"])
self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"])
self.helper.leave(self.excluded_room_id, self.user_id, tok=self.tok)
self.helper.leave(self.included_room_id, self.user_id, tok=self.tok)
channel = self.make_request(
"GET",
"/sync?since=" + channel.json_body["next_batch"],
access_token=self.tok,
)
| Add a configuration to exclude rooms from sync response (#12310) | test_join_leave | 437a8ed9efdf8f1aefa092d0761076da3ae78100 | synapse | test_sync.py | 11 | 18 | https://github.com/matrix-org/synapse.git | 1 | 188 | 0 | 27 | 301 | Python | {
"docstring": "Tests that rooms are correctly excluded from the 'join' and 'leave' sections of\n sync responses.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 15
} | def test_join_leave(self) -> None:
channel = self.make_request("GET", "/sync", access_token=self.tok)
self.assertEqual(channel.code, 200, channel.result)
self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"])
self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"])
self.helper.leave(self.excluded_room_id, self.user_id, tok=self.tok)
self.helper.leave(self.included_room_id, self.user_id, tok=self.tok)
channel = self.make_request(
"GET",
"/sync?since=" + channel.json_body["next_batch"],
access_token=self.tok,
)
self.assertEqual(channel.code, 200, channel.result)
self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["leave"])
self.assertIn(self.included_room_id, channel.json_body["rooms"]["leave"])
|
|
15,992 | 73,213 | 38 | wagtail/contrib/modeladmin/tests/test_page_modeladmin.py | 10 | 7 | def test_short_description_is_used_as_field_label(self):
| Reformat with black | test_short_description_is_used_as_field_label | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_page_modeladmin.py | 9 | 4 | https://github.com/wagtail/wagtail.git | 1 | 32 | 0 | 10 | 59 | Python | {
"docstring": "\n A custom field has been added to the inspect view's `inspect_view_fields` and since\n this field has a `short_description` we expect it to be used as the field's label,\n and not use the name of the function.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 36,
"vocab_size": 29
} | def test_short_description_is_used_as_field_label(self):
response = self.client.get("/admin/modeladmintest/author/inspect/1/")
self.assertContains(response, "Birth information")
self.assertNotContains(response, "author_birth_string")
|
|
14,584 | 67,630 | 103 | erpnext/stock/doctype/item/item.py | 127 | 14 | def check_stock_uom_with_bin(item, stock_uom):
if stock_uom == frappe.db.get_value("Item", item, "stock_uom"):
return
ref_uom = frappe.db.get_value("Stock Ledger Entry", {"item_code": item}, "stock_uom")
if ref_uom:
if cstr(ref_uom) != cstr(stock_uom):
frappe.throw(
_(
"Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM."
).format(item)
)
bin_list = frappe.db.sql(
,
(item, stock_uom),
| style: format code with black | check_stock_uom_with_bin | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | item.py | 16 | 27 | https://github.com/frappe/erpnext.git | 5 | 122 | 0 | 84 | 204 | Python | {
"docstring": "\n\t\t\tselect * from tabBin where item_code = %s\n\t\t\t\tand (reserved_qty > 0 or ordered_qty > 0 or indented_qty > 0 or planned_qty > 0)\n\t\t\t\tand stock_uom != %s\n\t\t\tupdate tabBin set stock_uom=%s where item_code=%s",
"language": "en",
"n_whitespaces": 30,
"n_words": 34,
"vocab_size": 23
} | def check_stock_uom_with_bin(item, stock_uom):
if stock_uom == frappe.db.get_value("Item", item, "stock_uom"):
return
ref_uom = frappe.db.get_value("Stock Ledger Entry", {"item_code": item}, "stock_uom")
if ref_uom:
if cstr(ref_uom) != cstr(stock_uom):
frappe.throw(
_(
"Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM."
).format(item)
)
bin_list = frappe.db.sql(
,
(item, stock_uom),
as_dict=1,
)
if bin_list:
frappe.throw(
_(
"Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You need to either cancel the linked documents or create a new Item."
).format(item)
)
# No SLE or documents against item. Bin UOM can be changed safely.
frappe.db.sql(, (stock_uom, item))
|
|
@click.command()
@click.option(
"--with-docs/--without-docs",
default=False,
help="Synchronize and repair embedded documentation. This " "is disabled by default.",
)
@configuration | 18,700 | 90,853 | 66 | src/sentry/runner/commands/repair.py | 33 | 16 | def fix_group_counters():
from django.db import connection
click.echo("Correcting Group.num_comments counter")
cursor = connection.cursor()
cursor.execute(
,
[ActivityType.NOTE.value],
)
@click.command()
@click.option(
"--with-docs/--without-docs",
default=False,
help="Synchronize and repa | ref(models): `ActivityType` (#34978)
## Objective:
We want to separate enum logic from Model logic. This breaks a lot of circular dependencies. | fix_group_counters | b9f5a910dc841b85f58d46266ec049ae5a7fd305 | sentry | repair.py | 10 | 13 | https://github.com/getsentry/sentry.git | 1 | 38 | 1 | 32 | 109 | Python | {
"docstring": "\n UPDATE sentry_groupedmessage SET num_comments = (\n SELECT COUNT(*) from sentry_activity\n WHERE type = %s and group_id = sentry_groupedmessage.id\n )\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 19,
"vocab_size": 17
} | def fix_group_counters():
from django.db import connection
click.echo("Correcting Group.num_comments counter")
cursor = connection.cursor()
cursor.execute(
,
[ActivityType.NOTE.value],
)
@click.command()
@click.option(
"--with-docs/--without-docs",
default=False,
help="Synchronize and repair embedded documentation. This " "is disabled by default.",
)
@configuration |
18,769 | 91,564 | 1,158 | src/sentry/search/events/filter.py | 307 | 53 | def get_filter(query=None, params=None, parser_config_overrides=None):
# NOTE: this function assumes project permissions check already happened
| fix(metric_alerts): Prevent date fields from being used as filters in metric alerts (#35762)
Since metric alerts are relative alerts based on the current time and the timestamp, it doesn't make
sense to filter on dates within them. There were also cases where people would use relative syntax
like `-24h`, which would convert to 24 hours before the current time and then store that absolute
value in the query. Accepting these filters is confusing because it implies that the relative date
would be applied to the alert.
Not sure if overrides is the best way to do this, but it's simple enough to implement this way. | get_filter | 0d29073264ceea2dd8b1528f98bedb6e6771d383 | sentry | filter.py | 19 | 87 | https://github.com/getsentry/sentry.git | 32 | 568 | 0 | 185 | 980 | Python | {
"docstring": "\n Returns an eventstore filter given the search text provided by the user and\n URL params\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 15,
"vocab_size": 14
} | def get_filter(query=None, params=None, parser_config_overrides=None):
# NOTE: this function assumes project permissions check already happened
parsed_terms = []
if query is not None:
try:
parsed_terms = parse_search_query(
query, params=params, config_overrides=parser_config_overrides
)
except ParseError as e:
raise InvalidSearchQuery(f"Parse error: {e.expr.name} (column {e.column():d})")
kwargs = {
"start": None,
"end": None,
"conditions": [],
"having": [],
"user_id": None,
"organization_id": None,
"team_id": [],
"project_ids": [],
"group_ids": [],
"condition_aggregates": [],
"aliases": params.get("aliases", {}) if params is not None else {},
}
projects_to_filter = []
if any(
isinstance(term, ParenExpression) or SearchBoolean.is_operator(term)
for term in parsed_terms
):
(
condition,
having,
found_projects_to_filter,
group_ids,
) = convert_search_boolean_to_snuba_query(parsed_terms, params)
if condition:
and_conditions = flatten_condition_tree(condition, SNUBA_AND)
for func in and_conditions:
kwargs["conditions"].append(convert_function_to_condition(func))
if having:
kwargs["condition_aggregates"] = [
term.key.name for term in parsed_terms if isinstance(term, AggregateFilter)
]
and_having = flatten_condition_tree(having, SNUBA_AND)
for func in and_having:
kwargs["having"].append(convert_function_to_condition(func))
if found_projects_to_filter:
projects_to_filter = list(set(found_projects_to_filter))
if group_ids is not None:
kwargs["group_ids"].extend(list(set(group_ids)))
else:
projects_to_filter = set()
for term in parsed_terms:
if isinstance(term, SearchFilter):
conditions, found_projects_to_filter, group_ids = format_search_filter(term, params)
if len(conditions) > 0:
kwargs["conditions"].extend(conditions)
if found_projects_to_filter:
projects_to_filter.update(found_projects_to_filter)
if group_ids is not None:
kwargs["group_ids"].extend(group_ids)
elif isinstance(term, AggregateFilter):
converted_filter = convert_aggregate_filter_to_snuba_query(term, params)
kwargs["condition_aggregates"].append(term.key.name)
if converted_filter:
kwargs["having"].append(converted_filter)
projects_to_filter = list(projects_to_filter)
# Keys included as url params take precedent if same key is included in search
# They are also considered safe and to have had access rules applied unlike conditions
# from the query string.
if params:
for key in ("start", "end"):
kwargs[key] = params.get(key, None)
if "user_id" in params:
kwargs["user_id"] = params["user_id"]
if "organization_id" in params:
kwargs["organization_id"] = params["organization_id"]
if "team_id" in params:
kwargs["team_id"] = params["team_id"]
# OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids
if "project_id" in params:
if projects_to_filter:
kwargs["project_ids"] = projects_to_filter
else:
kwargs["project_ids"] = params["project_id"]
if "environment" in params:
term = SearchFilter(SearchKey("environment"), "=", SearchValue(params["environment"]))
kwargs["conditions"].append(convert_search_filter_to_snuba_query(term))
if "group_ids" in params:
kwargs["group_ids"] = to_list(params["group_ids"])
# Deprecated alias, use `group_ids` instead
if ISSUE_ID_ALIAS in params:
kwargs["group_ids"] = to_list(params["issue.id"])
return eventstore.Filter(**kwargs)
|
|
4,094 | 21,956 | 139 | pipenv/patched/pip/_vendor/chardet/universaldetector.py | 36 | 14 | def reset(self):
self.result = {"en | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | reset | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | universaldetector.py | 10 | 13 | https://github.com/pypa/pipenv.git | 4 | 89 | 0 | 28 | 147 | Python | {
"docstring": "\n Reset the UniversalDetector and all of its probers back to their\n initial states. This is called by ``__init__``, so you only need to\n call this directly in between analyses of different documents.\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 32,
"vocab_size": 30
} | def reset(self):
self.result = {"encoding": None, "confidence": 0.0, "language": None}
self.done = False
self._got_data = False
self._has_win_bytes = False
self._input_state = InputState.PURE_ASCII
self._last_char = b""
if self._esc_charset_prober:
self._esc_charset_prober.reset()
if self._utf1632_prober:
self._utf1632_prober.reset()
for prober in self._charset_probers:
prober.reset()
|
|
117,128 | 320,322 | 294 | src/paperless/tests/test_settings.py | 30 | 7 | def test_redis_socket_parsing(self):
for input, expected in [
(None, ("redis://localhost:6379", "redis://localhost:6379")),
(
"redis+socket:///run/redis/redis.sock",
(
"redis+socket:///run/redis/redis.sock",
"unix:///run/redis/redis.sock",
),
),
(
"unix:///run/redis/redis.sock",
(
"redis+socket:///run/redis/redis.sock",
"unix:///run/redis/redis.sock",
),
),
]:
result = _parse_redis_url(input)
self.assertTupleEqua | Adds a layer to translate between differing formats of socket based Redis URLs | test_redis_socket_parsing | 01d070b882ef9027bef9a046852c2060119edd5d | paperless-ngx | test_settings.py | 10 | 20 | https://github.com/paperless-ngx/paperless-ngx.git | 2 | 62 | 0 | 20 | 103 | Python | {
"docstring": "\n GIVEN:\n - Various Redis connection URI formats\n WHEN:\n - The URI is parsed\n THEN:\n - Socket based URIs are translated\n - Non-socket URIs are unchanged\n - None provided uses default\n ",
"language": "en",
"n_whitespaces": 114,
"n_words": 30,
"vocab_size": 23
} | def test_redis_socket_parsing(self):
for input, expected in [
(None, ("redis://localhost:6379", "redis://localhost:6379")),
(
"redis+socket:///run/redis/redis.sock",
(
"redis+socket:///run/redis/redis.sock",
"unix:///run/redis/redis.sock",
),
),
(
"unix:///run/redis/redis.sock",
(
"redis+socket:///run/redis/redis.sock",
"unix:///run/redis/redis.sock",
),
),
]:
result = _parse_redis_url(input)
self.assertTupleEqual(expected, result)
|
|
73,612 | 251,156 | 79 | mitmproxy/http.py | 35 | 12 | def path_components(self) -> tuple[str, ...]:
path = urllib.parse.urlparse(self.url).path
# This needs to be a tuple so that it's immutable.
# Otherwise, this would fail silently:
# request.path_comp | `pyupgrade --py39-plus **/*.py` | path_components | e83ec8390ad6be6a86cfcfc57bce14cb8861bf32 | mitmproxy | http.py | 11 | 7 | https://github.com/mitmproxy/mitmproxy.git | 3 | 48 | 0 | 33 | 81 | Python | {
"docstring": "\n The URL's path components as a tuple of strings.\n Components are unquoted.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 12
} | def path_components(self) -> tuple[str, ...]:
path = urllib.parse.urlparse(self.url).path
# This needs to be a tuple so that it's immutable.
# Otherwise, this would fail silently:
# request.path_components.append("foo")
return tuple(url.unquote(i) for i in path.split("/") if i)
|
|
76,769 | 261,349 | 164 | sklearn/utils/__init__.py | 68 | 15 | def gen_batches(n, batch_size, *, min_batch_size=0):
if not isinstance(batch_size, numbers.Integral):
raise TypeError(
"gen_batches got batch_size=%s, must be an integer" % batch_size
)
if batch_size <= 0:
raise ValueError("gen_batches got batch_size=%s, must be positive" % batch_size)
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
if end + min_batch_size > n:
continue
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
| DOC Ensure that gen_batches passes numpydoc validation (#24609) | gen_batches | 0003ee0492e420783fd9aa665903d9d489736369 | scikit-learn | __init__.py | 11 | 16 | https://github.com/scikit-learn/scikit-learn.git | 6 | 91 | 0 | 46 | 155 | Python | {
"docstring": "Generator to create slices containing `batch_size` elements from 0 to `n`.\n\n The last slice may contain less than `batch_size` elements, when\n `batch_size` does not divide `n`.\n\n Parameters\n ----------\n n : int\n Size of the sequence.\n batch_size : int\n Number of elements in each batch.\n min_batch_size : int, default=0\n Minimum number of elements in each batch.\n\n Yields\n ------\n slice of `batch_size` elements\n\n See Also\n --------\n gen_even_slices: Generator to create n_packs slices going up to n.\n\n Examples\n --------\n >>> from sklearn.utils import gen_batches\n >>> list(gen_batches(7, 3))\n [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]\n >>> list(gen_batches(6, 3))\n [slice(0, 3, None), slice(3, 6, None)]\n >>> list(gen_batches(2, 3))\n [slice(0, 2, None)]\n >>> list(gen_batches(7, 3, min_batch_size=0))\n [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]\n >>> list(gen_batches(7, 3, min_batch_size=2))\n [slice(0, 3, None), slice(3, 7, None)]\n ",
"language": "en",
"n_whitespaces": 233,
"n_words": 131,
"vocab_size": 71
} | def gen_batches(n, batch_size, *, min_batch_size=0):
if not isinstance(batch_size, numbers.Integral):
raise TypeError(
"gen_batches got batch_size=%s, must be an integer" % batch_size
)
if batch_size <= 0:
raise ValueError("gen_batches got batch_size=%s, must be positive" % batch_size)
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
if end + min_batch_size > n:
continue
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
|
|
70,917 | 245,868 | 917 | tests/test_models/test_dense_heads/test_condinst_head.py | 228 | 55 | def test_condinst_maskhead_loss(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
condinst_bboxhead = CondInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
mask_feature_head = _fake_mask_feature_head()
condinst_maskhead = CondInstMaskHead(
mask_feature_head=mask_feature_head,
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
eps=5e-6,
loss_weight=1.0))
# Fcos head expects a multiple levels of features per image
feats = []
for i in range(len(condinst_bboxhead.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))
feats = t | [Feature]: Support Condinst (#9223)
* [Feature]: support condinst for instance segmentation
* update
* update
* update
* fix config name and add test unit
* fix squeeze error
* add README and chang mask to poly | test_condinst_maskhead_loss | 79c8295801acedee0cbdbf128a01b9fe162646b0 | mmdetection | test_condinst_head.py | 16 | 56 | https://github.com/open-mmlab/mmdetection.git | 2 | 412 | 0 | 134 | 641 | Python | {
"docstring": "Tests condinst maskhead loss when truth is empty and non-empty.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_condinst_maskhead_loss(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
condinst_bboxhead = CondInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
mask_feature_head = _fake_mask_feature_head()
condinst_maskhead = CondInstMaskHead(
mask_feature_head=mask_feature_head,
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
eps=5e-6,
loss_weight=1.0))
# Fcos head expects a multiple levels of features per image
feats = []
for i in range(len(condinst_bboxhead.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))
feats = tuple(feats)
cls_scores, bbox_preds, centernesses, param_preds =\
condinst_bboxhead.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
_ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses, param_preds,
[gt_instances], img_metas)
# When truth is empty then all mask loss
# should be zero for random inputs
positive_infos = condinst_bboxhead.get_positive_infos()
mask_outs = condinst_maskhead.forward(feats, positive_infos)
empty_gt_mask_losses = condinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask = empty_gt_mask_losses['loss_mask']
self.assertEqual(loss_mask, 0, 'mask loss should be zero')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
_ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses, param_preds,
[gt_instances], img_metas)
positive_infos = condinst_bboxhead.get_positive_infos()
mask_outs = condinst_maskhead.forward(feats, positive_infos)
one_gt_mask_losses = condinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask = one_gt_mask_losses['loss_mask']
self.assertGreater(loss_mask, 0, 'mask loss should be nonzero')
|
|
43,431 | 181,643 | 23 | tests/feature_transformers_tests.py | 11 | 10 | def test_ContinuousSelector_2():
cs = ContinuousSelector(threshold=5, svd_solver='randomized')
X_transformed = cs.transform(iris_da | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | test_ContinuousSelector_2 | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | feature_transformers_tests.py | 10 | 4 | https://github.com/EpistasisLab/tpot.git | 1 | 43 | 0 | 10 | 71 | Python | {
"docstring": "Assert that ContinuousSelector works as expected with threshold=5.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_ContinuousSelector_2():
cs = ContinuousSelector(threshold=5, svd_solver='randomized')
X_transformed = cs.transform(iris_data[0:16, :])
assert_equal(X_transformed.shape[1],3)
|
|
52,111 | 207,807 | 1,100 | tests/admin_views/tests.py | 342 | 62 | def test_add_view(self):
add_dict = {
"title": "Døm ikke",
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
# Change User should not have access to add articles
self.client.force_login(self.changeuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.get(reverse("admin:logout"))
# View User should not have access to add articles
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
# Now give the user permission to add but not change.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.context["title"], "Add article")
self.assertContains(response, "<title>Add article | Django site admin</title>")
self.assertContains(
response, '<input type="submit" value="Save and view" name="_continue">'
)
post = self.client.post(
reverse("admin:admin_views_article_add"), add_dict, follow=False
)
self.assertEqual(post.status_code, 302)
self.assertEqual(Article.objects.count(), 4)
article = Article.objects.latest("pk")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(article.pk,))
)
self.assertContains(
response,
'<li class="success">The article “Døm ikke” was added successfully.</li>',
)
article.delete()
self.client.get(reverse("admin:logout"))
# Add user may login and POST to add view, then redirect to admin root
self.client.force_login(self.adduser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
change_list_li | Refs #33476 -- Reformatted code with Black. | test_add_view | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 13 | 85 | https://github.com/django/django.git | 1 | 718 | 0 | 189 | 1,201 | Python | {
"docstring": "Test add view restricts access and actually adds items.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_add_view(self):
add_dict = {
"title": "Døm ikke",
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
# Change User should not have access to add articles
self.client.force_login(self.changeuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.get(reverse("admin:logout"))
# View User should not have access to add articles
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
# Now give the user permission to add but not change.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.context["title"], "Add article")
self.assertContains(response, "<title>Add article | Django site admin</title>")
self.assertContains(
response, '<input type="submit" value="Save and view" name="_continue">'
)
post = self.client.post(
reverse("admin:admin_views_article_add"), add_dict, follow=False
)
self.assertEqual(post.status_code, 302)
self.assertEqual(Article.objects.count(), 4)
article = Article.objects.latest("pk")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(article.pk,))
)
self.assertContains(
response,
'<li class="success">The article “Døm ikke” was added successfully.</li>',
)
article.delete()
self.client.get(reverse("admin:logout"))
# Add user may login and POST to add view, then redirect to admin root
self.client.force_login(self.adduser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
change_list_link = '› <a href="%s">Articles</a>' % reverse(
"admin:admin_views_article_changelist"
)
self.assertNotContains(
addpage,
change_list_link,
msg_prefix="User restricted to add permission is given link to change list view in breadcrumbs.",
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 4)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "Greetings from a created object")
self.client.get(reverse("admin:logout"))
# The addition was logged correctly
addition_log = LogEntry.objects.all()[0]
new_article = Article.objects.last()
article_ct = ContentType.objects.get_for_model(Article)
self.assertEqual(addition_log.user_id, self.adduser.pk)
self.assertEqual(addition_log.content_type_id, article_ct.pk)
self.assertEqual(addition_log.object_id, str(new_article.pk))
self.assertEqual(addition_log.object_repr, "Døm ikke")
self.assertEqual(addition_log.action_flag, ADDITION)
self.assertEqual(addition_log.get_change_message(), "Added.")
# Super can add too, but is redirected to the change list view
self.client.force_login(self.superuser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
self.assertContains(
addpage,
change_list_link,
msg_prefix="Unrestricted user is not given link to change list view in breadcrumbs.",
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, reverse("admin:admin_views_article_changelist"))
self.assertEqual(Article.objects.count(), 5)
self.client.get(reverse("admin:logout"))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.force_login(self.joepublicuser)
# Check and make sure that if user expires, data still persists
self.client.force_login(self.superuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
|
|
78,490 | 266,595 | 315 | lib/ansible/galaxy/collection/__init__.py | 76 | 34 | def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?
# type: (Candidate, str, ConcreteArtifactsManager) -> None
b_artifact_path = (
artifacts_manager.get_artifact_path if collection.is_concrete_artifact
else artifacts_manager.get_galaxy_artifact_path
)(collection)
collection_path = os.path.join(path, collection.namespace, collection.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.display(
u"Installing '{coll!s}' to '{path!s}'".
format(coll=to_text(collection), path=collection_path),
)
if os.path.exists(b_collection_path):
shutil.rmtree(b_collection_path)
if collection.is_dir:
install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)
else:
install_artifact(
b_artifact_path,
b_collection_path,
artifacts_manager._b_working_directory,
collection.signatures,
artifacts_manager.keyring
)
if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):
write_source_metadata(
collection,
b_collection_path, | ansible-galaxy - add signature verification of the MANIFEST.json (#76681)
* ansible-galaxy collection install|verify:
- Support verifying the origin of the MANIFEST.json when the Galaxy server has provided signatures.
- Allow supplemental signatures to use during verification on the CLI/requirements file.
* ansible-galaxy collection install:
- Support disabling signature verification. This silences the warning provided by ansible-galaxy if the Galaxy server provided signatures it cannot use because no keyring is configured.
- Store Galaxy server metadata alongside installed collections for provenance. This is used by 'ansible-galaxy collection verify --offline'.
* Add unit tests for method that gets signatures from a Galaxy server
* Add integration tests for user-provided signature sources
- Test CLI option combinations
- Test installing collections with valid/invalid signature sources
- Test disabling GPG verification when installing collections
- Test verifying collections with valid/invalid signature sources
* Make signature verification advisory-by-default if signatures are provided by the Galaxy server
- Make the default keyring None
- Warn if the keyring is None but the Galaxy server provided signatures
- Error if the keyring is None but the user supplied signatures
- Error if the keyring is not None but is invalid
* changelog
* add ansible-galaxy user documentation for new options
Co-authored-by: Matt Martz <matt@sivel.net>
Co-authored-by: Sviatoslav Sydorenko <wk.cvs.github@sydorenko.org.ua>
Co-authored-by: Martin Krizek <martin.krizek@gmail.com>
Co-authored-by: Sandra McCann <samccann@redhat.com>
Co-authored-by: Andy Mott <amott@redhat.com>
Co-authored-by: John R Barker <john@johnrbarker.com> | install | 43e55db20821a1341d21ffa1e4e7e6185b244105 | ansible | __init__.py | 13 | 33 | https://github.com/ansible/ansible.git | 6 | 170 | 0 | 62 | 262 | Python | {
"docstring": "Install a collection under a given path.\n\n :param collection: Collection to be installed.\n :param path: Collection dirs layout path.\n :param artifacts_manager: Artifacts manager.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 23,
"vocab_size": 18
} | def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?
# type: (Candidate, str, ConcreteArtifactsManager) -> None
b_artifact_path = (
artifacts_manager.get_artifact_path if collection.is_concrete_artifact
else artifacts_manager.get_galaxy_artifact_path
)(collection)
collection_path = os.path.join(path, collection.namespace, collection.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.display(
u"Installing '{coll!s}' to '{path!s}'".
format(coll=to_text(collection), path=collection_path),
)
if os.path.exists(b_collection_path):
shutil.rmtree(b_collection_path)
if collection.is_dir:
install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)
else:
install_artifact(
b_artifact_path,
b_collection_path,
artifacts_manager._b_working_directory,
collection.signatures,
artifacts_manager.keyring
)
if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):
write_source_metadata(
collection,
b_collection_path,
artifacts_manager
)
display.display(
'{coll!s} was installed successfully'.
format(coll=to_text(collection)),
)
|
|
52,353 | 208,492 | 1,014 | IPython/core/magics/code.py | 214 | 45 | def edit(self, parameter_s='',last_call=['','']):
opts,args = self.parse_options(parameter_s,'prxn:')
try:
filename, lineno, is_temp = self._find_edit_target(self.shell,
args, opts, last_call)
except MacroToEdit as e:
self._edit_macro(args, e.args[0])
return
except InteractivelyDefined as e:
print("Editing In[%i]" % e.index)
args = str(e.index)
filename, lineno, is_temp = self._find_edit_target(self.shell,
args, opts, last_call)
if filename is None:
# nothing was found, warnings have already been issued,
# just give up.
return
if is_temp:
self._knowntemps.add(filename)
elif (filename in self._knowntemps):
is_temp = True
# do actual editing here
print('Editing...', end=' ')
sys.stdout.flush()
filepath = Path(filename)
try:
# Quote filenames that may have spaces in them when opening
# the editor
quoted = filename = str(filepath.absolute())
if " " in quoted:
quoted = "'%s'" % quoted
self.shell.hook | Fix EncodingWarning on Python 3.10 | edit | 23276ac4770f380ce1d5808950dd412a35594af1 | ipython | code.py | 17 | 54 | https://github.com/ipython/ipython.git | 15 | 364 | 0 | 142 | 634 | Python | {
"docstring": "Bring up an editor and execute the resulting code.\n\n Usage:\n %edit [options] [args]\n\n %edit runs IPython's editor hook. The default version of this hook is\n set to call the editor specified by your $EDITOR environment variable.\n If this isn't found, it will default to vi under Linux/Unix and to\n notepad under Windows. See the end of this docstring for how to change\n the editor hook.\n\n You can also set the value of this editor via the\n ``TerminalInteractiveShell.editor`` option in your configuration file.\n This is useful if you wish to use a different editor from your typical\n default with IPython (and for Windows users who typically don't set\n environment variables).\n\n This command allows you to conveniently edit multi-line code right in\n your IPython session.\n\n If called without arguments, %edit opens up an empty editor with a\n temporary file and will execute the contents of this file when you\n close it (don't forget to save it!).\n\n\n Options:\n\n -n <number>: open the editor at a specified line number. By default,\n the IPython editor hook uses the unix syntax 'editor +N filename', but\n you can configure this by providing your own modified hook if your\n favorite editor supports line-number specifications with a different\n syntax.\n\n -p: this will call the editor with the same data as the previous time\n it was used, regardless of how long ago (in your current session) it\n was.\n\n -r: use 'raw' input. This option only applies to input taken from the\n user's history. By default, the 'processed' history is used, so that\n magics are loaded in their transformed version to valid Python. If\n this option is given, the raw input as typed as the command line is\n used instead. When you exit the editor, it will be executed by\n IPython's own processor.\n\n -x: do not execute the edited code immediately upon exit. This is\n mainly useful if you are editing programs which need to be called with\n command line arguments, which you can then do using %run.\n\n\n Arguments:\n\n If arguments are given, the following possibilities exist:\n\n - If the argument is a filename, IPython will load that into the\n editor. It will execute its contents with execfile() when you exit,\n loading any code in the file into your interactive namespace.\n\n - The arguments are ranges of input history, e.g. \"7 ~1/4-6\".\n The syntax is the same as in the %history magic.\n\n - If the argument is a string variable, its contents are loaded\n into the editor. You can thus edit any string which contains\n python code (including the result of previous edits).\n\n - If the argument is the name of an object (other than a string),\n IPython will try to locate the file where it was defined and open the\n editor at the point where it is defined. You can use `%edit function`\n to load an editor exactly at the point where 'function' is defined,\n edit it and have the file be executed automatically.\n\n - If the object is a macro (see %macro for details), this opens up your\n specified editor with a temporary file containing the macro's data.\n Upon exit, the macro is reloaded with the contents of the file.\n\n Note: opening at an exact line is only supported under Unix, and some\n editors (like kedit and gedit up to Gnome 2.8) do not understand the\n '+NUMBER' parameter necessary for this feature. Good editors like\n (X)Emacs, vi, jed, pico and joe all do.\n\n After executing your code, %edit will return as output the code you\n typed in the editor (except when it was an existing file). This way\n you can reload the code in further invocations of %edit as a variable,\n via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of\n the output.\n\n Note that %edit is also available through the alias %ed.\n\n This is an example of creating a simple function inside the editor and\n then modifying it. First, start up the editor::\n\n In [1]: edit\n Editing... done. Executing edited code...\n Out[1]: 'def foo():\\\\n print \"foo() was defined in an editing\n session\"\\\\n'\n\n We can then call the function foo()::\n\n In [2]: foo()\n foo() was defined in an editing session\n\n Now we edit foo. IPython automatically loads the editor with the\n (temporary) file where foo() was previously defined::\n\n In [3]: edit foo\n Editing... done. Executing edited code...\n\n And if we call foo() again we get the modified version::\n\n In [4]: foo()\n foo() has now been changed!\n\n Here is an example of how to edit a code snippet successive\n times. First we call the editor::\n\n In [5]: edit\n Editing... done. Executing edited code...\n hello\n Out[5]: \"print 'hello'\\\\n\"\n\n Now we call it again with the previous output (stored in _)::\n\n In [6]: edit _\n Editing... done. Executing edited code...\n hello world\n Out[6]: \"print 'hello world'\\\\n\"\n\n Now we call it with the output #8 (stored in _8, also as Out[8])::\n\n In [7]: edit _8\n Editing... done. Executing edited code...\n hello again\n Out[7]: \"print 'hello again'\\\\n\"\n\n\n Changing the default editor hook:\n\n If you wish to write your own editor hook, you can put it in a\n configuration file which you load at startup time. The default hook\n is defined in the IPython.core.hooks module, and you can use that as a\n starting example for further modifications. That file also has\n general instructions on how to set a new hook for use once you've\n defined it.",
"language": "en",
"n_whitespaces": 1675,
"n_words": 882,
"vocab_size": 385
} | def edit(self, parameter_s='',last_call=['','']):
opts,args = self.parse_options(parameter_s,'prxn:')
try:
filename, lineno, is_temp = self._find_edit_target(self.shell,
args, opts, last_call)
except MacroToEdit as e:
self._edit_macro(args, e.args[0])
return
except InteractivelyDefined as e:
print("Editing In[%i]" % e.index)
args = str(e.index)
filename, lineno, is_temp = self._find_edit_target(self.shell,
args, opts, last_call)
if filename is None:
# nothing was found, warnings have already been issued,
# just give up.
return
if is_temp:
self._knowntemps.add(filename)
elif (filename in self._knowntemps):
is_temp = True
# do actual editing here
print('Editing...', end=' ')
sys.stdout.flush()
filepath = Path(filename)
try:
# Quote filenames that may have spaces in them when opening
# the editor
quoted = filename = str(filepath.absolute())
if " " in quoted:
quoted = "'%s'" % quoted
self.shell.hooks.editor(quoted, lineno)
except TryNext:
warn('Could not open editor')
return
# XXX TODO: should this be generalized for all string vars?
# For now, this is special-cased to blocks created by cpaste
if args.strip() == "pasted_block":
self.shell.user_ns["pasted_block"] = filepath.read_text(encoding='utf-8')
if 'x' in opts: # -x prevents actual execution
print()
else:
print('done. Executing edited code...')
with preserve_keys(self.shell.user_ns, '__file__'):
if not is_temp:
self.shell.user_ns['__file__'] = filename
if 'r' in opts: # Untranslated IPython code
source = filepath.read_text(encoding='utf-8')
self.shell.run_cell(source, store_history=False)
else:
self.shell.safe_execfile(filename, self.shell.user_ns,
self.shell.user_ns)
if is_temp:
try:
return filepath.read_text(encoding='utf-8')
except IOError as msg:
if Path(msg.filename) == filepath:
warn('File not found. Did you forget to save?')
return
else:
self.shell.showtraceback()
|
|
45,438 | 186,299 | 64 | src/textual/strip.py | 25 | 7 | def cell_length(self) -> int:
# Done on demand and cached, as this is an O(n) operation | adds Strip primitive | cell_length | 6f82ad9c4a2e17812a68d3c76d7eae89aee3a515 | textual | strip.py | 11 | 5 | https://github.com/Textualize/textual.git | 2 | 31 | 0 | 22 | 53 | Python | {
"docstring": "Get the number of cells required to render this object.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def cell_length(self) -> int:
# Done on demand and cached, as this is an O(n) operation
if self._cell_length is None:
self._cell_length = Segment.get_line_length(self._segments)
return self._cell_length
|
|
117,445 | 320,933 | 22 | tests/unit/mainwindow/test_messageview.py | 10 | 10 | def test_show_message_twice(view):
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
assert len(view._messages) == 1
| Add a MessageInfo data class
Preparation for #7246 | test_show_message_twice | 5616a99eff34f7074641d1391ed77d6b4b743529 | qutebrowser | test_messageview.py | 11 | 4 | https://github.com/qutebrowser/qutebrowser.git | 1 | 49 | 0 | 8 | 83 | Python | {
"docstring": "Show the same message twice -> only one should be shown.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_show_message_twice(view):
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
assert len(view._messages) == 1
|
|
17,401 | 82,438 | 15 | cms/utils/plugins.py | 9 | 8 | def create_default_plugins(request, placeholders, template, lang):
from cms.api import add_plug | ci: Added codespell (#7355)
Co-authored-by: Christian Clauss <cclauss@me.com>
* ci: codespell config taken from #7292 | create_default_plugins | c1290c9ff89cb00caa5469129fd527e9d82cd820 | django-cms | plugins.py | 6 | 11 | https://github.com/django-cms/django-cms.git | 4 | 87 | 0 | 9 | 28 | Python | {
"docstring": "\n Create all default plugins for the given ``placeholders`` if they have\n a \"default_plugins\" configuration value in settings.\n return all plugins, children, grandchildren (etc.) created\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 24,
"vocab_size": 23
} | def create_default_plugins(request, placeholders, template, lang):
from cms.api import add_plugin
|
|
69,908 | 242,745 | 658 | src/PIL/Jpeg2KImagePlugin.py | 198 | 30 | def _parse_jp2_header(fp):
# Find the JP2 header box
reader = BoxReader(fp)
header = None
mimetype = None
while reader.has_next_box():
tbox = reader.next_box_type()
if tbox == b"jp2h":
header = reader.read_boxes()
break
elif tbox == b"ftyp":
if reader.read_fields(">4s")[0] == b"jpx ":
mimetype = "image/jpx"
size = None
mode = None
bpc = None
nc = None
dpi = None # 2-tuple of DPI info, or None
while header.has_next_box():
tbox = header.next_box_type()
if tbox == b"ihdr":
height, width, nc, bpc = header.read_fields(">IIHB")
| Remove redundant parentheses | _parse_jp2_header | ee85e387bab535e2339b9d3cd1ab87c61d23af15 | Pillow | Jpeg2KImagePlugin.py | 18 | 46 | https://github.com/python-pillow/Pillow.git | 20 | 285 | 0 | 101 | 476 | Python | {
"docstring": "Parse the JP2 header box to extract size, component count,\n color space information, and optionally DPI information,\n returning a (size, mode, mimetype, dpi) tuple.",
"language": "en",
"n_whitespaces": 29,
"n_words": 24,
"vocab_size": 23
} | def _parse_jp2_header(fp):
# Find the JP2 header box
reader = BoxReader(fp)
header = None
mimetype = None
while reader.has_next_box():
tbox = reader.next_box_type()
if tbox == b"jp2h":
header = reader.read_boxes()
break
elif tbox == b"ftyp":
if reader.read_fields(">4s")[0] == b"jpx ":
mimetype = "image/jpx"
size = None
mode = None
bpc = None
nc = None
dpi = None # 2-tuple of DPI info, or None
while header.has_next_box():
tbox = header.next_box_type()
if tbox == b"ihdr":
height, width, nc, bpc = header.read_fields(">IIHB")
size = (width, height)
if nc == 1 and (bpc & 0x7F) > 8:
mode = "I;16"
elif nc == 1:
mode = "L"
elif nc == 2:
mode = "LA"
elif nc == 3:
mode = "RGB"
elif nc == 4:
mode = "RGBA"
elif tbox == b"res ":
res = header.read_boxes()
while res.has_next_box():
tres = res.next_box_type()
if tres == b"resc":
vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB")
hres = _res_to_dpi(hrcn, hrcd, hrce)
vres = _res_to_dpi(vrcn, vrcd, vrce)
if hres is not None and vres is not None:
dpi = (hres, vres)
break
if size is None or mode is None:
raise SyntaxError("Malformed JP2 header")
return size, mode, mimetype, dpi
##
# Image plugin for JPEG2000 images.
|
|
72,125 | 248,145 | 774 | tests/storage/databases/main/test_events_worker.py | 78 | 26 | def _populate_events(self) -> None:
self.get_success(
self.store.db_pool.simple_upsert(
"rooms",
{"room_id": self.room_id},
{"room_version": RoomVersions.V4.identifier},
)
)
self.event_ids: List[str] = []
for idx in range(20):
event_json = {
"type": f"test {idx}",
"room_id": self.room_id,
}
event = make_event_from_dict(event_json, room_version=RoomVersions.V4)
event_id = event.event_id
self.get_success(
self.store.db_pool.simple_upsert(
"events",
{"event_id": event_id},
{
"event_id": event_id,
"room_id": self.room_id,
"topological_ordering": idx,
"stream_ordering": idx,
"type": event.type,
"processed": True,
"outlier": False,
},
)
)
self.get_success(
self.store.db_pool.simple_upsert(
"event_json",
{"event_id": event_id},
{
"room_id": self.room_id,
| Add a consistency check on events read from the database (#12620)
I've seen a few errors which can only plausibly be explained by the calculated
event id for an event being different from the ID of the event in the
database. It should be cheap to check this, so let's do so and raise an
exception. | _populate_events | 96e0cdbc5af0563ee805ec4e588e1df14899af66 | synapse | test_events_worker.py | 15 | 54 | https://github.com/matrix-org/synapse.git | 2 | 208 | 0 | 55 | 351 | Python | {
"docstring": "Ensure that there are test events in the database.\n\n When testing with the in-memory SQLite database, all the events are lost during\n the simulated outage.\n\n To ensure consistency between `room_id`s and `event_id`s before and after the\n outage, rows are built and inserted manually.\n\n Upserts are used to handle the non-SQLite case where events are not lost.\n ",
"language": "en",
"n_whitespaces": 98,
"n_words": 56,
"vocab_size": 43
} | def _populate_events(self) -> None:
self.get_success(
self.store.db_pool.simple_upsert(
"rooms",
{"room_id": self.room_id},
{"room_version": RoomVersions.V4.identifier},
)
)
self.event_ids: List[str] = []
for idx in range(20):
event_json = {
"type": f"test {idx}",
"room_id": self.room_id,
}
event = make_event_from_dict(event_json, room_version=RoomVersions.V4)
event_id = event.event_id
self.get_success(
self.store.db_pool.simple_upsert(
"events",
{"event_id": event_id},
{
"event_id": event_id,
"room_id": self.room_id,
"topological_ordering": idx,
"stream_ordering": idx,
"type": event.type,
"processed": True,
"outlier": False,
},
)
)
self.get_success(
self.store.db_pool.simple_upsert(
"event_json",
{"event_id": event_id},
{
"room_id": self.room_id,
"json": json.dumps(event_json),
"internal_metadata": "{}",
"format_version": EventFormatVersions.V3,
},
)
)
self.event_ids.append(event_id)
|
|
@instrumented_task(
name="sentry.tasks.weekly_reports.schedule_organizations",
queue="reports.prepare",
max_retries=5,
acks_late=True,
) | 18,455 | 88,840 | 41 | src/sentry/tasks/weekly_reports.py | 26 | 12 | def check_if_ctx_is_empty(ctx):
return all(check_if_project_is_empty(project_ctx) for project_ctx in ctx.projects.values | fix(weekly-email): skip organization report if report is empty (#41620)
This PR adds logic to skip sending the organization weekly report if
there is no context for any of the projects | check_if_ctx_is_empty | 2ab29056f48c587870e0897a4feaef9ac7fd3b53 | sentry | weekly_reports.py | 11 | 2 | https://github.com/getsentry/sentry.git | 2 | 24 | 1 | 26 | 74 | Python | {
"docstring": "\n Check if the context is empty. If it is, we don't want to send an email.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 16,
"vocab_size": 16
} | def check_if_ctx_is_empty(ctx):
return all(check_if_project_is_empty(project_ctx) for project_ctx in ctx.projects.values())
# The entry point. This task is scheduled to run every week.
@instrumented_task(
name="sentry.tasks.weekly_reports.schedule_organizations",
queue="reports.prepare",
max_retries=5,
acks_late=True,
) |
57,070 | 223,797 | 84 | python3.10.4/Lib/email/message.py | 23 | 9 | def __delitem__(self, name):
name = name.lower()
newheaders = []
for k, v in self._headers:
if k.lower() != name:
newheaders.append((k, v))
self._headers = newheade | add python 3.10.4 for windows | __delitem__ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | message.py | 12 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 52 | 0 | 20 | 85 | Python | {
"docstring": "Delete all occurrences of a header, if present.\n\n Does not raise an exception if the header is missing.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 18,
"vocab_size": 17
} | def __delitem__(self, name):
name = name.lower()
newheaders = []
for k, v in self._headers:
if k.lower() != name:
newheaders.append((k, v))
self._headers = newheaders
|
|
32,804 | 142,807 | 106 | python/ray/tune/execution/placement_groups.py | 30 | 13 | def update_status(self):
self.cleanup()
ready = True
while ready:
# Use a loop as `ready` might return futures one by one
ready, _ = ray.wait(list(self._staging_futures.keys()), timeout=0)
| [tune/structure] Introduce execution package (#26015)
Execution-specific packages are moved to tune.execution.
Co-authored-by: Xiaowei Jiang <xwjiang2010@gmail.com> | update_status | 0959f44b6fc217a4f2766ed46a721eb79b067b2c | ray | placement_groups.py | 15 | 7 | https://github.com/ray-project/ray.git | 3 | 51 | 0 | 27 | 86 | Python | {
"docstring": "Update placement group status.\n\n Moves ready placement groups from `self._staging` to\n `self._ready`.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 12,
"vocab_size": 11
} | def update_status(self):
self.cleanup()
ready = True
while ready:
# Use a loop as `ready` might return futures one by one
ready, _ = ray.wait(list(self._staging_futures.keys()), timeout=0)
for ready_fut in ready:
self.handle_ready_future(ready_fut)
|
|
13,542 | 63,968 | 72 | erpnext/patches/v13_0/trim_whitespace_from_serial_nos.py | 101 | 32 | def execute():
broken_sles = frappe.db.sql(,
(
" %", # leading whitespace
"% ", # trailing whitespace
"%\n %", # leading whitespace on newline
"% \n%", # trailing whitespace on newline
),
as_dict=True,
)
frappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sles)
if not broken_sles:
return
broken_serial_nos = set()
for sle in broken_sles:
serial_no_list = get_serial_nos(sle.serial_no)
correct_sr_no = "\n".join(serial_no_list)
if correct_sr_no == s | fix(patch): serial no whitespace trimming
old data can contain trailing/leading whitespace which doesn't work well
with code to find last SLE for serial no. | execute | 0faa116f9799f6d921ce8868a8f8eac1756ae008 | erpnext | trim_whitespace_from_serial_nos.py | 13 | 46 | https://github.com/frappe/erpnext.git | 8 | 198 | 0 | 70 | 335 | Python | {
"docstring": "\n\t\t\tselect name, serial_no\n\t\t\tfrom `tabStock Ledger Entry`\n\t\t\twhere\n\t\t\t\tis_cancelled = 0\n\t\t\t\tand (serial_no like %s or serial_no like %s or serial_no like %s or serial_no like %s)\n\t\t\t\n\t\t\t\t\t\t\tselect name\n\t\t\t\t\t\t\tfrom `tabSerial No`\n\t\t\t\t\t\t\twhere status='Active'\n\t\t\t\t\t\t\t\tand coalesce(purchase_document_type, '') = ''\n\t\t\t\t\t\t\t\tand name in %s ",
"language": "en",
"n_whitespaces": 34,
"n_words": 43,
"vocab_size": 25
} | def execute():
broken_sles = frappe.db.sql(,
(
" %", # leading whitespace
"% ", # trailing whitespace
"%\n %", # leading whitespace on newline
"% \n%", # trailing whitespace on newline
),
as_dict=True,
)
frappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sles)
if not broken_sles:
return
broken_serial_nos = set()
for sle in broken_sles:
serial_no_list = get_serial_nos(sle.serial_no)
correct_sr_no = "\n".join(serial_no_list)
if correct_sr_no == sle.serial_no:
continue
frappe.db.set_value("Stock Ledger Entry", sle.name, "serial_no", correct_sr_no, update_modified=False)
broken_serial_nos.update(serial_no_list)
if not broken_serial_nos:
return
broken_sr_no_records = [sr[0] for sr in frappe.db.sql(, (list(broken_serial_nos),)
)]
frappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sr_no_records)
patch_savepoint = "serial_no_patch"
for serial_no in broken_sr_no_records:
try:
frappe.db.savepoint(patch_savepoint)
sn = frappe.get_doc("Serial No", serial_no)
sn.update_serial_no_reference()
sn.db_update()
except Exception:
frappe.db.rollback(save_point=patch_savepoint)
|
|
72,178 | 248,247 | 91 | tests/config/test_cache.py | 20 | 11 | def test_individual_caches_from_environ(self):
config = {}
self.config._environ = {
"SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER": "2",
"SYNAPSE_NOT_CACHE": "BLAH",
}
self.config.read_config(config, config_dir_path="", data_dir_path="")
self.config.resize_all_caches()
self.assertEqual(dict(self.config.cache_factors), {"something_or_oth | Reload cache factors from disk on SIGHUP (#12673) | test_individual_caches_from_environ | d38d242411b8910dfacde1e61fd3a0ec5cbcaa66 | synapse | test_cache.py | 11 | 9 | https://github.com/matrix-org/synapse.git | 1 | 70 | 0 | 19 | 121 | Python | {
"docstring": "\n Individual cache factors will be loaded from the environment.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def test_individual_caches_from_environ(self):
config = {}
self.config._environ = {
"SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER": "2",
"SYNAPSE_NOT_CACHE": "BLAH",
}
self.config.read_config(config, config_dir_path="", data_dir_path="")
self.config.resize_all_caches()
self.assertEqual(dict(self.config.cache_factors), {"something_or_other": 2.0})
|
|
7,690 | 42,688 | 216 | kubernetes_tests/test_kubernetes_pod_operator.py | 43 | 27 | def test_already_checked_on_success(self):
pod_name = "t | Use "remote" pod when patching KPO pod as "checked" (#23676)
When patching as "checked", we have to use the current version of the pod otherwise we may get an error when trying to patch it, e.g.:
```
Operation cannot be fulfilled on pods \"test-kubernetes-pod-db9eedb7885c40099dd40cd4edc62415\": the object has been modified; please apply your changes to the latest version and try again"
```
This error would not cause a failure of the task, since errors in `cleanup` are suppressed. However, it would fail to patch.
I believe one scenario when the pod may be updated is when retrieving xcom, since the sidecar is terminated after extracting the value.
Concerning some changes in the tests re the "already_checked" label, it was added to a few "expected pods" recently, when we changed it to patch even in the case of a successful pod.
Since we are changing the "patch" code to patch with the latest read on the pod that we have (i.e. using the `remote_pod` variable), and no longer the pod object stored on `k.pod`, the label no longer shows up in those tests (that's because in k.pod isn't actually a read of the remote pod, but just happens to get mutated in the patch function before it is used to actually patch the pod).
Further, since the `remote_pod` is a local variable, we can't observe it in tests. So we have to read the pod using k8s api. _But_, our "find pod" function excludes "already checked" pods! So we have to make this configurable.
So, now we have a proper integration test for the "already_checked" behavior (there was already a unit test). | test_already_checked_on_success | 6bbe015905bd2709e621455d9f71a78b374d1337 | airflow | test_kubernetes_pod_operator.py | 12 | 19 | https://github.com/apache/airflow.git | 1 | 131 | 0 | 37 | 221 | Python | {
"docstring": "\n When ``is_delete_operator_pod=False``, pod should have 'already_checked'\n label, whether pod is successful or not.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 13,
"vocab_size": 12
} | def test_already_checked_on_success(self):
pod_name = "test-" + str(random.randint(0, 1000000))
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name=pod_name,
task_id="task" + self.get_current_task_name(),
in_cluster=False,
do_xcom_push=False,
is_delete_operator_pod=False,
)
context = create_context(k)
k.execute(context)
actual_pod = k.find_pod('default', context, exclude_checked=False)
actual_pod = self.api_client.sanitize_for_serialization(actual_pod)
assert actual_pod['metadata']['labels']['already_checked'] == 'True'
|
|
13,860 | 65,338 | 23 | erpnext/accounts/report/sales_register/sales_register.py | 40 | 20 | def get_invoice_cc_wh_map(invoice_list):
si_items = frappe.db.sql(
% ", ".join(["%s"] * len(invoice_list)),
tuple(inv.name fo | style: format code with black | get_invoice_cc_wh_map | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | sales_register.py | 17 | 20 | https://github.com/frappe/erpnext.git | 5 | 124 | 0 | 31 | 201 | Python | {
"docstring": "select parent, cost_center, warehouse\n\t\tfrom `tabSales Invoice Item` where parent in (%s)\n\t\tand (ifnull(cost_center, '') != '' or ifnull(warehouse, '') != '')",
"language": "en",
"n_whitespaces": 19,
"n_words": 22,
"vocab_size": 19
} | def get_invoice_cc_wh_map(invoice_list):
si_items = frappe.db.sql(
% ", ".join(["%s"] * len(invoice_list)),
tuple(inv.name for inv in invoice_list),
as_dict=1,
)
invoice_cc_wh_map = {}
for d in si_items:
if d.cost_center:
invoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault("cost_center", []).append(
d.cost_center
)
if d.warehouse:
invoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault("warehouse", []).append(
d.warehouse
)
return invoice_cc_wh_map
|
|
27,449 | 123,800 | 175 | lib/core/common.py | 59 | 24 | def parseSqliteTableSchema(value):
retVal = False
value = extractRegexResult(r"(?s)\((?P<result>.+)\)", value)
if value:
table = {}
columns = OrderedDict()
value = re.sub(r"\(.+?\)", "", value).strip()
for match in re.finditer(r"(?:\A|,)\s*(([\"'`]).+?\2|\w+)(?:\s+(INT|INTEGER|TINYINT|SMALLINT|MEDIUMINT|BIGINT|UNSIGNED BIG INT|INT2|INT8|INTEGER|CHARACTER|VARCHAR|VARYING CHARACTER|NCHAR|NATIVE CHARACTER|NVARCHAR|TEXT|CLOB|LONGTEXT|BLOB|NONE|REAL|DOUBLE|DOUBLE PRECISION|FLOAT|REAL|NUMERIC|DECIMAL|BOOLEAN|DATE|DATETIME|NUMERIC)\b)?", decodeStringEscape(value), re.I):
column = match.group(1).strip(match.group(2) or "")
if re.search(r"(?i)\A(CONSTRAINT|PRIMARY|UNIQUE|CHECK|FOREIGN)\b", column.strip()):
continue
retVal = Tr | Improving SQLite table schema parsing (#2678) | parseSqliteTableSchema | 86ac3025edb83ce49d563b6787df4fc6ca305ce6 | sqlmap | common.py | 15 | 16 | https://github.com/sqlmapproject/sqlmap.git | 6 | 146 | 0 | 43 | 260 | Python | {
"docstring": "\n Parses table column names and types from specified SQLite table schema\n\n >>> kb.data.cachedColumns = {}\n >>> parseSqliteTableSchema(\"CREATE TABLE users(\\\\n\\\\t\\\\tid INTEGER,\\\\n\\\\t\\\\tname TEXT\\\\n);\")\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('id', 'INTEGER'), ('name', 'TEXT'))\n True\n >>> parseSqliteTableSchema(\"CREATE TABLE dummy(`foo bar` BIGINT, \\\\\"foo\\\\\" VARCHAR, 'bar' TEXT)\");\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('foo bar', 'BIGINT'), ('foo', 'VARCHAR'), ('bar', 'TEXT'))\n True\n >>> parseSqliteTableSchema(\"CREATE TABLE suppliers(\\\\n\\\\tsupplier_id INTEGER PRIMARY KEY DESC,\\\\n\\\\tname TEXT NOT NULL\\\\n);\");\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('supplier_id', 'INTEGER'), ('name', 'TEXT'))\n True\n >>> parseSqliteTableSchema(\"CREATE TABLE country_languages (\\\\n\\\\tcountry_id INTEGER NOT NULL,\\\\n\\\\tlanguage_id INTEGER NOT NULL,\\\\n\\\\tPRIMARY KEY (country_id, language_id),\\\\n\\\\tFOREIGN KEY (country_id) REFERENCES countries (country_id) ON DELETE CASCADE ON UPDATE NO ACTION,\\\\tFOREIGN KEY (language_id) REFERENCES languages (language_id) ON DELETE CASCADE ON UPDATE NO ACTION);\");\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('country_id', 'INTEGER'), ('language_id', 'INTEGER'))\n True\n ",
"language": "en",
"n_whitespaces": 177,
"n_words": 119,
"vocab_size": 69
} | def parseSqliteTableSchema(value):
retVal = False
value = extractRegexResult(r"(?s)\((?P<result>.+)\)", value)
if value:
table = {}
columns = OrderedDict()
value = re.sub(r"\(.+?\)", "", value).strip()
for match in re.finditer(r"(?:\A|,)\s*(([\"'`]).+?\2|\w+)(?:\s+(INT|INTEGER|TINYINT|SMALLINT|MEDIUMINT|BIGINT|UNSIGNED BIG INT|INT2|INT8|INTEGER|CHARACTER|VARCHAR|VARYING CHARACTER|NCHAR|NATIVE CHARACTER|NVARCHAR|TEXT|CLOB|LONGTEXT|BLOB|NONE|REAL|DOUBLE|DOUBLE PRECISION|FLOAT|REAL|NUMERIC|DECIMAL|BOOLEAN|DATE|DATETIME|NUMERIC)\b)?", decodeStringEscape(value), re.I):
column = match.group(1).strip(match.group(2) or "")
if re.search(r"(?i)\A(CONSTRAINT|PRIMARY|UNIQUE|CHECK|FOREIGN)\b", column.strip()):
continue
retVal = True
columns[column] = match.group(3) or "TEXT"
table[safeSQLIdentificatorNaming(conf.tbl, True)] = columns
kb.data.cachedColumns[conf.db] = table
return retVal
|
|
56,955 | 223,532 | 334 | python3.10.4/Lib/email/_header_value_parser.py | 64 | 16 | def get_phrase(value):
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
| add python 3.10.4 for windows | get_phrase | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _header_value_parser.py | 20 | 26 | https://github.com/XX-net/XX-Net.git | 7 | 149 | 0 | 41 | 253 | Python | {
"docstring": " phrase = 1*word / obs-phrase\n obs-phrase = word *(word / \".\" / CFWS)\n\n This means a phrase can be a sequence of words, periods, and CFWS in any\n order as long as it starts with at least one word. If anything other than\n words is detected, an ObsoleteHeaderDefect is added to the token's defect\n list. We also accept a phrase that starts with CFWS followed by a dot;\n this is registered as an InvalidHeaderDefect, since it is not supported by\n even the obsolete grammar.\n\n ",
"language": "en",
"n_whitespaces": 115,
"n_words": 84,
"vocab_size": 63
} | def get_phrase(value):
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
|
|
98,219 | 299,285 | 311 | tests/components/cast/test_media_player.py | 172 | 33 | async def test_group_media_states(hass, mz_mock):
entity_id = "media_player.speaker"
reg = er.async_get(hass)
info = get_fake_chromecast_info()
chromecast, _ = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(
chromecast, mz_mock
)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "off"
assert entity_id == reg.async_get_entity_id("media_player", "cast", str(info.uuid))
group_media_status = MagicMock(images=None)
player_media_status = MagicMock(images=None)
# Player has no state, group is buffering -> Should report 'buffering'
group_media_status.player_state = "BUFFERING"
group_media_status_cb(str(FakeGroupUUID), group_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "buffering"
# Player has no state, group is playing -> Should report 'playing'
group_media_status.player_state = "PLAYING"
| Add state buffering to media_player and use it in cast (#70802) | test_group_media_states | 66551e6fcbd063e53c13adc8a6462b8e00ce1450 | core | test_media_player.py | 11 | 41 | https://github.com/home-assistant/core.git | 1 | 275 | 0 | 76 | 474 | Python | {
"docstring": "Test media states are read from group if entity has no state.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | async def test_group_media_states(hass, mz_mock):
entity_id = "media_player.speaker"
reg = er.async_get(hass)
info = get_fake_chromecast_info()
chromecast, _ = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(
chromecast, mz_mock
)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "off"
assert entity_id == reg.async_get_entity_id("media_player", "cast", str(info.uuid))
group_media_status = MagicMock(images=None)
player_media_status = MagicMock(images=None)
# Player has no state, group is buffering -> Should report 'buffering'
group_media_status.player_state = "BUFFERING"
group_media_status_cb(str(FakeGroupUUID), group_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "buffering"
# Player has no state, group is playing -> Should report 'playing'
group_media_status.player_state = "PLAYING"
group_media_status_cb(str(FakeGroupUUID), group_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
# Player is paused, group is playing -> Should report 'paused'
player_media_status.player_state = None
player_media_status.player_is_paused = True
media_status_cb(player_media_status)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "paused"
# Player is in unknown state, group is playing -> Should report 'playing'
player_media_status.player_state = "UNKNOWN"
media_status_cb(player_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
|
|
15,578 | 70,904 | 59 | wagtail/core/tests/test_blocks.py | 17 | 14 | def test_deserialize(self):
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.to_python(christmas_page.id), christmas_page)
# None should deserialize to None
sel | Improve asserts in wagtail.
These improvements were based on flake8-assertive, which compiled an extensive
list of patterns to replace with more precise assertions. This should make
the error messages better in case of failures. | test_deserialize | a0ef2477a68f2deb83cdc9a0bb709cb644be028b | wagtail | test_blocks.py | 10 | 5 | https://github.com/wagtail/wagtail.git | 1 | 51 | 0 | 15 | 88 | Python | {
"docstring": "The serialized value of a PageChooserBlock (an ID) should deserialize to a Page object",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def test_deserialize(self):
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.to_python(christmas_page.id), christmas_page)
# None should deserialize to None
self.assertIsNone(block.to_python(None))
|
|
80,124 | 269,492 | 53 | keras/backend.py | 31 | 6 | def _is_current_explicit_device(device_type):
device_type | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _is_current_explicit_device | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 10 | 6 | https://github.com/keras-team/keras.git | 3 | 48 | 0 | 26 | 85 | Python | {
"docstring": "Check if the current device is explicitly set on the device type specified.\n\n Args:\n device_type: A string containing `GPU` or `CPU` (case-insensitive).\n\n Returns:\n A boolean indicating if the current device scope is explicitly set on the\n device type.\n\n Raises:\n ValueError: If the `device_type` string indicates an unsupported device.\n ",
"language": "en",
"n_whitespaces": 88,
"n_words": 48,
"vocab_size": 33
} | def _is_current_explicit_device(device_type):
device_type = device_type.upper()
if device_type not in ["CPU", "GPU"]:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
|
|
78,610 | 266,828 | 212 | lib/ansible/module_utils/common/parameters.py | 72 | 20 | def _return_datastructure_name(obj):
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in _return_datastructure_name(element[1]):
| module_utils - Fix type hinting issues. | _return_datastructure_name | 12865139472f0a2fa95b94983dcedb4d57e93b10 | ansible | parameters.py | 14 | 19 | https://github.com/ansible/ansible.git | 12 | 136 | 0 | 49 | 222 | Python | {
"docstring": " Return native stringified values from datastructures.\n\n For use with removing sensitive values pre-jsonification.",
"language": "en",
"n_whitespaces": 16,
"n_words": 13,
"vocab_size": 12
} | def _return_datastructure_name(obj):
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in _return_datastructure_name(element[1]):
yield subelement
elif is_iterable(obj):
for element in obj:
for subelement in _return_datastructure_name(element):
yield subelement
elif obj is None or isinstance(obj, bool):
# This must come before int because bools are also ints
return
elif isinstance(obj, tuple(list(integer_types) + [float])):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s' % (type(obj)))
|
|
35,130 | 151,751 | 84 | freqtrade/rpc/api_server/ws/message_stream.py | 30 | 8 | async def __aiter__(self):
waiter = self._waiter
while True:
# Shield the future from being cancelled by a task waiting on it
message, t | log warning if channel too far behind, add docstrings to message stream | __aiter__ | afc00bc30a94abd64fee000535e66287fd91595f | freqtrade | message_stream.py | 11 | 5 | https://github.com/freqtrade/freqtrade.git | 2 | 31 | 0 | 27 | 55 | Python | {
"docstring": "\n Iterate over the messages in the message stream\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 7
} | async def __aiter__(self):
waiter = self._waiter
while True:
# Shield the future from being cancelled by a task waiting on it
message, ts, waiter = await asyncio.shield(waiter)
yield message, ts
|
|
23,593 | 109,448 | 22 | lib/matplotlib/axes/_base.py | 8 | 3 | def get_gridspec(self):
| Merge SubplotBase into AxesBase. | get_gridspec | c73f4c455514cf5422d27bf38c93250de8316b21 | matplotlib | _base.py | 9 | 2 | https://github.com/matplotlib/matplotlib.git | 2 | 20 | 0 | 8 | 34 | Python | {
"docstring": "Return the `.GridSpec` associated with the subplot, or None.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def get_gridspec(self):
return self._subplotspec.get_gridspec() if self._subplotspec else None
|
|
9,622 | 48,856 | 20 | modules/image/classification/esnet_x0_5_imagenet/model.py | 11 | 9 | def ESNet_x0_5(pretrained=False, use_ssld=False, **kwargs):
mo | add clas modules | ESNet_x0_5 | 33c9d1a8e4d7fd0a023819e27ba6c3819cda6b4b | PaddleHub | model.py | 11 | 3 | https://github.com/PaddlePaddle/PaddleHub.git | 1 | 37 | 0 | 10 | 56 | Python | {
"docstring": "\n ESNet_x0_5\n Args:\n pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.\n If str, means the path of the pretrained model.\n use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.\n Returns:\n model: nn.Layer. Specific `ESNet_x0_5` model depends on args.\n ",
"language": "en",
"n_whitespaces": 93,
"n_words": 40,
"vocab_size": 35
} | def ESNet_x0_5(pretrained=False, use_ssld=False, **kwargs):
model = ESNet(scale=0.5, stages_pattern=MODEL_STAGES_PATTERN["ESNet"], **kwargs)
return model
|
|
6,820 | 37,515 | 13 | src/transformers/testing_utils.py | 7 | 5 | def require_rjieba(test_case):
| Update all require decorators to use skipUnless when possible (#16999) | require_rjieba | 57e6464ac9a31156f1c93e59107323e6ec01309e | transformers | testing_utils.py | 10 | 2 | https://github.com/huggingface/transformers.git | 1 | 20 | 0 | 7 | 37 | Python | {
"docstring": "\n Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 15,
"vocab_size": 15
} | def require_rjieba(test_case):
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
|
|
23,495 | 109,257 | 54 | lib/matplotlib/axes/_base.py | 25 | 8 | def get_yaxis(self):
return self.yaxis
get_xgridlines = _axis_method_wrapper("xaxis", "get_gridlines")
get_xticklines = _axis_method_wrapper("xaxis", "g | Add discouraged admonitions
The [*Discouraged*] prefix in the summary line is added in analogy to
the [*Deprecated*] prefix we add automatically. We do this so that
these "labels" are prominently visible also in summary overviews of
the functions in the docs.
Since we rarely discourage whole functions, for now I just do this
manually. | get_yaxis | 5af97515b3823b2efa1961253a11e2d77df88637 | matplotlib | _base.py | 7 | 2 | https://github.com/matplotlib/matplotlib.git | 1 | 10 | 0 | 18 | 84 | Python | {
"docstring": "\n [*Discouraged*] Return the YAxis instance.\n\n .. admonition:: Discouraged\n\n The use of this function is discouraged. You should instead\n directly access the attribute ``ax.yaxis``.\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 23,
"vocab_size": 22
} | def get_yaxis(self):
return self.yaxis
get_xgridlines = _axis_method_wrapper("xaxis", "get_gridlines")
get_xticklines = _axis_method_wrapper("xaxis", "get_ticklines")
get_ygridlines = _axis_method_wrapper("yaxis", "get_gridlines")
get_yticklines = _axis_method_wrapper("yaxis", "get_ticklines")
# Adding and tracking artists
|
|
80,141 | 269,510 | 16 | keras/backend.py | 7 | 2 | def disable_tf_random_generator():
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | disable_tf_random_generator | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 6 | 3 | https://github.com/keras-team/keras.git | 1 | 10 | 0 | 6 | 20 | Python | {
"docstring": "Disable the `tf.random.Generator` as the RNG for Keras.\n\n See `tf.keras.backend.experimental.is_tf_random_generator_enabled` for more\n details.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 13,
"vocab_size": 11
} | def disable_tf_random_generator():
global _USE_GENERATOR_FOR_RNG
_USE_GENERATOR_FOR_RNG = False
|
|
@skip_pyarrow
@pytest.mark.parametrize(
"date_string",
["32/32/2019", "02/30/2019", "13/13/2019", "13/2019", "a3/11/2018", "10/11/2o17"],
) | 40,744 | 171,994 | 187 | pandas/tests/io/parser/test_parse_dates.py | 54 | 28 | def test_parse_timezone(all_parsers):
# see gh-22256
parser = all_parsers
data =
result = parser.read_csv(StringIO(data), parse_dates=["dt"])
dti = DatetimeIndex(
list(
date_range(
start="2018-01-04 09:01:00",
end="2018-01-04 09:05:00",
freq="1min",
tz=timezone(timedelta(minutes=540)),
)
),
freq=None,
)
expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]}
expected = DataFrame(expected_data)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"d | API: default to stdlib timezone objects for fixed-offsets (#49677)
* API: default to stdlib timezone objects for fixed-offsets
* update docstrings
* flesh out whatsnew
* handle strings
* skip on windows | test_parse_timezone | 5a372d892a8c45a2442ab1e744aea3241d2c26a8 | pandas | test_parse_dates.py | 18 | 23 | https://github.com/pandas-dev/pandas.git | 1 | 103 | 1 | 45 | 215 | Python | {
"docstring": "dt,val\n 2018-01-04 09:01:00+09:00,23350\n 2018-01-04 09:02:00+09:00,23400\n 2018-01-04 09:03:00+09:00,23400\n 2018-01-04 09:04:00+09:00,23400\n 2018-01-04 09:05:00+09:00,23400",
"language": "en",
"n_whitespaces": 75,
"n_words": 11,
"vocab_size": 7
} | def test_parse_timezone(all_parsers):
# see gh-22256
parser = all_parsers
data =
result = parser.read_csv(StringIO(data), parse_dates=["dt"])
dti = DatetimeIndex(
list(
date_range(
start="2018-01-04 09:01:00",
end="2018-01-04 09:05:00",
freq="1min",
tz=timezone(timedelta(minutes=540)),
)
),
freq=None,
)
expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]}
expected = DataFrame(expected_data)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"date_string",
["32/32/2019", "02/30/2019", "13/13/2019", "13/2019", "a3/11/2018", "10/11/2o17"],
) |
73,690 | 251,340 | 70 | mitmproxy/connection.py | 19 | 8 | def alpn_proto_negotiated(self) -> Optional[bytes]: # pragma: no cover
warnings.warn(
"Connection.alpn_proto_negotiated is deprecated, use Connection.alpn instead.",
DeprecationWarnin | make it black! | alpn_proto_negotiated | b3587b52b25077f68116b9852b041d33e7fc6601 | mitmproxy | connection.py | 8 | 7 | https://github.com/mitmproxy/mitmproxy.git | 1 | 24 | 0 | 19 | 42 | Python | {
"docstring": "*Deprecated:* An outdated alias for Connection.alpn.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | def alpn_proto_negotiated(self) -> Optional[bytes]: # pragma: no cover
warnings.warn(
"Connection.alpn_proto_negotiated is deprecated, use Connection.alpn instead.",
DeprecationWarning,
)
return self.alpn
|
|
113,101 | 314,495 | 158 | homeassistant/components/velux/cover.py | 36 | 17 | def device_class(self) -> CoverDeviceClass:
if isinstance(self.node, Awning):
return CoverDeviceClass.AWNING
if isinstance | Adjust CoverEntity property type hints in components (#73943)
* Adjust CoverEntity property type hints in components
* Revert changes to rflink
* Revert changes to wilight | device_class | 10dc38e0ec27f7bef990ee431459342f9c3c52b4 | core | cover.py | 8 | 15 | https://github.com/home-assistant/core.git | 7 | 96 | 0 | 19 | 148 | Python | {
"docstring": "Define this cover as either awning, blind, garage, gate, shutter or window.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def device_class(self) -> CoverDeviceClass:
if isinstance(self.node, Awning):
return CoverDeviceClass.AWNING
if isinstance(self.node, Blind):
return CoverDeviceClass.BLIND
if isinstance(self.node, GarageDoor):
return CoverDeviceClass.GARAGE
if isinstance(self.node, Gate):
return CoverDeviceClass.GATE
if isinstance(self.node, RollerShutter):
return CoverDeviceClass.SHUTTER
if isinstance(self.node, Window):
return CoverDeviceClass.WINDOW
return CoverDeviceClass.WINDOW
|
|
84,774 | 284,522 | 148 | openbb_terminal/economy/investingcom_model.py | 32 | 13 | def get_yieldcurve(country) -> pd.DataFrame:
data = investpy.bonds.get_bonds | Feature/yieldcurve (#1734)
* Adds yield curves
* Adds yield curves for several countries
* Adds yield curves for several countries
* Adds yield curves for several countries
* Adds yield curves for several countries
* Adds yield curves for several countries
* ycrv plots by default
* Limits source choices and renames raw columns
* Fix test
* Fix test
* lint
Co-authored-by: Jeroen Bouma <jer.bouma@gmail.com>
Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>
Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt>
Co-authored-by: James Maslek <jmaslek11@gmail.com> | get_yieldcurve | 4f692f26e6f3935b7454e8b448838c8a87b98f23 | OpenBBTerminal | investingcom_model.py | 11 | 23 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 85 | 0 | 31 | 152 | Python | {
"docstring": "Get country yield curve [Source: Investing.com]\n\n Returns\n -------\n pd.DataFrame\n Country yield curve\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 12,
"vocab_size": 10
} | def get_yieldcurve(country) -> pd.DataFrame:
data = investpy.bonds.get_bonds_overview(country)
data.drop(columns=data.columns[0], axis=1, inplace=True)
data.rename(
columns={
"name": "Tenor",
"last": "Current",
"last_close": "Previous",
"high": "High",
"low": "Low",
"change": "Change",
"change_percentage": "% Change",
},
inplace=True,
)
return data
|
|
90,659 | 291,554 | 246 | tests/components/alexa/test_capabilities.py | 53 | 7 | async def test_report_humidifier_humidity_state(hass):
hass.states.async_set(
"humidifier.dry",
"on",
{
"friendly_name": "Humidifier dry",
"supported_features": 0,
"humidity": 25,
"min_humidity": 20,
"max_humidity": 90,
},
)
| Add humidifier support for Alexa (#81329) | test_report_humidifier_humidity_state | 5d4c4a1293b00e864f6ad202fbc565388d613e71 | core | test_capabilities.py | 10 | 27 | https://github.com/home-assistant/core.git | 1 | 112 | 0 | 33 | 204 | Python | {
"docstring": "Test PercentageController, PowerLevelController reports humidifier humidity correctly.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | async def test_report_humidifier_humidity_state(hass):
hass.states.async_set(
"humidifier.dry",
"on",
{
"friendly_name": "Humidifier dry",
"supported_features": 0,
"humidity": 25,
"min_humidity": 20,
"max_humidity": 90,
},
)
hass.states.async_set(
"humidifier.wet",
"on",
{
"friendly_name": "Humidifier wet",
"supported_features": 0,
"humidity": 80,
"min_humidity": 20,
"max_humidity": 90,
},
)
properties = await reported_properties(hass, "humidifier.dry")
properties.assert_equal("Alexa.RangeController", "rangeValue", 25)
properties = await reported_properties(hass, "humidifier.wet")
properties.assert_equal("Alexa.RangeController", "rangeValue", 80)
|
|
77,612 | 264,125 | 48 | netbox/extras/models/customfields.py | 16 | 7 | def serialize(self, value):
if self.type == CustomFieldTypeChoices.TYPE_OBJECT and value is not None:
return value.pk
| Initial work on #7006 | serialize | fa1e28e860c4bdb3e585a968bd248a2ac666e1f6 | netbox | customfields.py | 8 | 4 | https://github.com/netbox-community/netbox.git | 3 | 28 | 0 | 14 | 45 | Python | {
"docstring": "\n Prepare a value for storage as JSON data.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def serialize(self, value):
if self.type == CustomFieldTypeChoices.TYPE_OBJECT and value is not None:
return value.pk
return value
|
|
19,791 | 100,290 | 246 | tools/manual/detected_faces.py | 59 | 34 | def revert_to_saved(self, frame_index):
if frame_index not in self._updated_frame_indices:
logger.debug("Alignments not amended. Returning")
return
logger.verbose("Reverting alignments for frame_index %s", frame | Bugfixes
- Sort - Fix rare help-text parsing bug
- Manual - Fix issue where frame count is incorrect when een > 1 used on extract | revert_to_saved | 23d92c1f0d83ce1cdcc51480cfe37af074a981b3 | faceswap | detected_faces.py | 11 | 22 | https://github.com/deepfakes/faceswap.git | 5 | 172 | 0 | 47 | 278 | Python | {
"docstring": " Revert the frame's alignments to their saved version for the given frame index.\n\n Parameters\n ----------\n frame_index: int\n The frame that should have their faces reverted to their saved version\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 29,
"vocab_size": 22
} | def revert_to_saved(self, frame_index):
if frame_index not in self._updated_frame_indices:
logger.debug("Alignments not amended. Returning")
return
logger.verbose("Reverting alignments for frame_index %s", frame_index)
print(frame_index)
print(len(self._sorted_frame_names))
alignments = self._alignments.data[self._sorted_frame_names[frame_index]]["faces"]
faces = self._frame_faces[frame_index]
reset_grid = self._add_remove_faces(alignments, faces)
for detected_face, face in zip(faces, alignments):
detected_face.from_alignment(face, with_thumb=True)
detected_face.load_aligned(None, force=True)
_ = detected_face.aligned.average_distance # cache the distances
self._updated_frame_indices.remove(frame_index)
if not self._updated_frame_indices:
self._tk_unsaved.set(False)
if reset_grid:
self._tk_face_count_changed.set(True)
else:
self._tk_edited.set(True)
self._globals.tk_update.set(True)
|
|
45,621 | 186,787 | 224 | tools/finish_release.py | 102 | 18 | def parse_args(args):
# Use the file's docstring for the help text and don't let argparse reformat it.
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--css', type=str, required=True, help='hostname of code signing server')
group = parser.add_mutually_exclusive_group()
# We use 'store_false' and a destination related to the other type of
# artifact to cause the flag bein | Add Signed Windows Installer Workflow (#9076)
* Add Code Signing action for Windows Installer
* Clean up variable names and input
* Amend and add to documentation per PR guidelines
* Update tools/finish_release.py
Co-authored-by: Brad Warren <bmw@users.noreply.github.com>
* Update tools/finish_release.py
Amend typo
Co-authored-by: Brad Warren <bmw@users.noreply.github.com>
* Amend release script for better work flow
- SCP commands to upload and download unsigned & signed installers from CSS
* Collapse spaces
* Update tools/finish_release.py
Co-authored-by: Brad Warren <bmw@users.noreply.github.com>
* Create new windows signer function
* Update Windows Installer Script
- Update change log
- add new function for signing and document
- @TODO Streammline SSH session
* Remove Azure and Github release methods
- Methods moved to CSS
- Reduced to a ssh function that triggers the process on a CSS
* Amend Chnagelog and Remove Unneeded Deps
* Update tools/finish_release.py
Co-authored-by: Brad Warren <bmw@users.noreply.github.com>
* Add Verison Fetch Function
- For the purpose of snap releases
- Add back package to dev extras for function
* Chaneg path in ssh command
* Amend release script
* Amend the ssh command for CSS
* Update tools/finish_release.py
Co-authored-by: Brad Warren <bmw@users.noreply.github.com>
* Update script with proper path and subprocess call
* Update ssh command
* Correct typo in path
* Fix typo in path
* Update certbot/CHANGELOG.md
Co-authored-by: ohemorange <ebportnoy@gmail.com>
* Remove missed conflict text
Co-authored-by: Brad Warren <bmw@users.noreply.github.com>
Co-authored-by: ohemorange <ebportnoy@gmail.com> | parse_args | 6e1696ba32ef8c1162bb0cd85df5a22113952828 | certbot | finish_release.py | 10 | 10 | https://github.com/certbot/certbot.git | 1 | 90 | 0 | 71 | 160 | Python | {
"docstring": "Parse command line arguments.\n\n :param args: command line arguments with the program name removed. This is\n usually taken from sys.argv[1:].\n :type args: `list` of `str`\n\n :returns: parsed arguments\n :rtype: argparse.Namespace\n\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 30,
"vocab_size": 26
} | def parse_args(args):
# Use the file's docstring for the help text and don't let argparse reformat it.
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--css', type=str, required=True, help='hostname of code signing server')
group = parser.add_mutually_exclusive_group()
# We use 'store_false' and a destination related to the other type of
# artifact to cause the flag being set to disable publishing of the other
# artifact. This makes using the parsed arguments later on a little simpler
# and cleaner.
group.add_argument('--snaps-only', action='store_false', dest='publish_windows',
help='Skip publishing other artifacts and only publish the snaps')
group.add_argument('--windows-only', action='store_false', dest='publish_snaps',
help='Skip publishing other artifacts and only publish the Windows installer')
return parser.parse_args(args)
|
|
48,545 | 197,437 | 62 | sympy/physics/vector/fieldfunctions.py | 37 | 8 | def is_conservative(field):
# Field is conservative irrespective of frame
# Take the first frame | Some pep8 cleanup of sympy.physics.vector. | is_conservative | 9a3ffc6781bd44c47cf49e128ef154389c32876a | sympy | fieldfunctions.py | 11 | 5 | https://github.com/sympy/sympy.git | 2 | 45 | 0 | 28 | 77 | Python | {
"docstring": "\n Checks if a field is conservative.\n\n Parameters\n ==========\n\n field : Vector\n The field to check for conservative property\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame\n >>> from sympy.physics.vector import is_conservative\n >>> R = ReferenceFrame('R')\n >>> is_conservative(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z)\n True\n >>> is_conservative(R[2] * R.y)\n False\n\n ",
"language": "en",
"n_whitespaces": 96,
"n_words": 46,
"vocab_size": 36
} | def is_conservative(field):
# Field is conservative irrespective of frame
# Take the first frame in the result of the separate method of Vector
if field == Vector(0):
return True
frame = list(field.separate())[0]
return curl(field, frame).simplify() == Vector(0)
|
|
56,852 | 223,020 | 213 | python3.10.4/Lib/distutils/tests/support.py | 77 | 19 | def fixup_build_ext(cmd):
if os.name == 'nt':
cmd.debug = sys.executable.endswith('_d.exe')
elif sysconfig.get_config_var('Py_ENABLE_SHARED'):
# To further add to the shared builds fun on Unix, we can't just add
# library_dirs to the Extension() instance because that doesn't get
# plumbed through to the final compiler command.
runshared = sysconfig.get_config_var('RUNSHARED')
if runshared is None:
cmd.library_dirs = ['.']
else:
| add python 3.10.4 for windows | fixup_build_ext | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | support.py | 20 | 13 | https://github.com/XX-net/XX-Net.git | 7 | 102 | 0 | 57 | 182 | Python | {
"docstring": "Function needed to make build_ext tests pass.\n\n When Python was built with --enable-shared on Unix, -L. is not enough to\n find libpython<blah>.so, because regrtest runs in a tempdir, not in the\n source directory where the .so lives.\n\n When Python was built with in debug mode on Windows, build_ext commands\n need their debug attribute set, and it is not done automatically for\n some reason.\n\n This function handles both of these things. Example use:\n\n cmd = build_ext(dist)\n support.fixup_build_ext(cmd)\n cmd.ensure_finalized()\n\n Unlike most other Unix platforms, Mac OS X embeds absolute paths\n to shared libraries into executables, so the fixup is not needed there.\n ",
"language": "en",
"n_whitespaces": 152,
"n_words": 100,
"vocab_size": 80
} | def fixup_build_ext(cmd):
if os.name == 'nt':
cmd.debug = sys.executable.endswith('_d.exe')
elif sysconfig.get_config_var('Py_ENABLE_SHARED'):
# To further add to the shared builds fun on Unix, we can't just add
# library_dirs to the Extension() instance because that doesn't get
# plumbed through to the final compiler command.
runshared = sysconfig.get_config_var('RUNSHARED')
if runshared is None:
cmd.library_dirs = ['.']
else:
if sys.platform == 'darwin':
cmd.library_dirs = []
else:
name, equals, value = runshared.partition('=')
cmd.library_dirs = [d for d in value.split(os.pathsep) if d]
|
|
14,564 | 67,581 | 6 | erpnext/stock/doctype/batch/batch.py | 9 | 7 | def batch_uses_naming_series():
use_naming_series | style: format code with black | batch_uses_naming_series | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | batch.py | 11 | 3 | https://github.com/frappe/erpnext.git | 1 | 25 | 0 | 9 | 47 | Python | {
"docstring": "\n\tVerify if the Batch is to be named using a naming series\n\t:return: bool\n\t",
"language": "en",
"n_whitespaces": 12,
"n_words": 14,
"vocab_size": 14
} | def batch_uses_naming_series():
use_naming_series = cint(frappe.db.get_single_value("Stock Settings", "use_naming_series"))
return bool(use_naming_series)
|
|
2,296 | 12,444 | 449 | jina/parsers/orchestrate/runtimes/remote.py | 110 | 15 | def mixin_http_gateway_parser(parser=None):
gp = add_arg_group(parser, title='HTTP Gateway')
gp.add_argument(
'--title',
type=str,
help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.',
)
gp.add_argument(
| refactor: remove unnecessary code (#4865)
* refactor: remove unnecessary code
* refactor: remove unnecessary code
* fix: #4866
* refactor: grpc compression arg
* style: fix overload and cli autocomplete
* fix: #4864
Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> | mixin_http_gateway_parser | 1b05b842d7a2c851b5de2150591198ad0d9987dc | jina | remote.py | 10 | 80 | https://github.com/jina-ai/jina.git | 1 | 204 | 0 | 57 | 346 | Python | {
"docstring": "Add the options to rest server\n\n :param parser: the parser\n \n If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.\n \n If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface.\n\n Any executor that has `@requests(on=...)` bind with those values will receive data requests.\n \n A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.\n \nDictionary of kwargs arguments that will be passed to Uvicorn server when starting the server\n\nMore details can be found in Uvicorn docs: https://www.uvicorn.org/settings/\n\n\n Dictionary of kwargs arguments that will be passed to the grpc server when starting the server # todo update\n \n the path to the certificate file\n \n the path to the key file\n ",
"language": "en",
"n_whitespaces": 211,
"n_words": 118,
"vocab_size": 75
} | def mixin_http_gateway_parser(parser=None):
gp = add_arg_group(parser, title='HTTP Gateway')
gp.add_argument(
'--title',
type=str,
help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.',
)
gp.add_argument(
'--description',
type=str,
help='The description of this HTTP server. It will be used in automatics docs such as Swagger UI.',
)
gp.add_argument(
'--cors',
action='store_true',
default=False,
help=,
)
gp.add_argument(
'--no-debug-endpoints',
action='store_true',
default=False,
help='If set, `/status` `/post` endpoints are removed from HTTP interface. ',
)
gp.add_argument(
'--no-crud-endpoints',
action='store_true',
default=False,
help=,
)
gp.add_argument(
'--expose-endpoints',
type=str,
help=,
)
gp.add_argument(
'--uvicorn-kwargs',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help=,
)
gp.add_argument(
'--grpc-server-kwargs',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help=,
)
gp.add_argument(
'--ssl-certfile',
type=str,
help=,
dest='ssl_certfile',
)
gp.add_argument(
'--ssl-keyfile',
type=str,
help=,
dest='ssl_keyfile',
)
|
|
2,104 | 11,747 | 56 | tests/unit/helloworld/multimodal/test_executors.py | 28 | 19 | def test_image_crafter_index(encoder_doc_array, tmpdir):
create_test_img(path=str(tmpdir), file_name='1.jpg')
with Flow().add(uses=ImageCrafter) as f:
res = f.index(inputs=encoder_doc_array)
as | test: fix tests failing after new docarray patch (#4449)
* test: fix tests failing after new docarray patch
* test: fix failing tests | test_image_crafter_index | 217a11bb8dc613ed1136b8b541a68e6d53ca4fc1 | jina | test_executors.py | 12 | 8 | https://github.com/jina-ai/jina.git | 1 | 71 | 0 | 24 | 120 | Python | {
"docstring": "In this test, we input one ``DocumentArray`` with one ``Document``,\n and the `craft` method in the ``ImageCrafter`` returns chunks.\n In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`.\n So the 2 chunks should left only 1 chunk.\n And the tensor value of the ``Document`` is not empty once we finished crafting since\n we converted image uri/datauri to tensor.\n ",
"language": "en",
"n_whitespaces": 80,
"n_words": 62,
"vocab_size": 49
} | def test_image_crafter_index(encoder_doc_array, tmpdir):
create_test_img(path=str(tmpdir), file_name='1.jpg')
with Flow().add(uses=ImageCrafter) as f:
res = f.index(inputs=encoder_doc_array)
assert len(res) == 1
doc = res[0]
assert doc.mime_type == 'image/jpeg'
assert doc.tensor is not None
|
|
54,609 | 216,492 | 173 | salt/fileserver/roots.py | 58 | 13 | def find_file(path, saltenv="base", **kwargs):
actual_saltenv = saltenv
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
path = os.path.normpath(path)
fnd = {"path": "", "rel": ""}
if os.path.isabs(path):
return fnd
if saltenv not in __opts__["file_roots"]:
if "__env__" in __opts__["file_roots"]:
log.debug(
"salt environment '%s' maps to __env__ file_roots directory", saltenv
)
| add __env__ substitution inside file and pillar root paths | find_file | 52e1d0b8116c86777c85cb6c3d940e2c04a518c4 | salt | roots.py | 12 | 38 | https://github.com/saltstack/salt.git | 13 | 256 | 0 | 40 | 170 | Python | {
"docstring": "\n Search the environment for the relative path.\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 6
} | def find_file(path, saltenv="base", **kwargs):
actual_saltenv = saltenv
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
path = os.path.normpath(path)
fnd = {"path": "", "rel": ""}
if os.path.isabs(path):
return fnd
if saltenv not in __opts__["file_roots"]:
if "__env__" in __opts__["file_roots"]:
log.debug(
"salt environment '%s' maps to __env__ file_roots directory", saltenv
)
saltenv = "__env__"
else:
return fnd
|
|
75,226 | 258,357 | 20 | haystack/nodes/prompt/prompt_node.py | 6 | 7 | def get_prompt_templates(cls) -> List[PromptTemplate]:
return list(cls.prompt_templates.values())
| feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667)
Co-authored-by: ZanSara <sarazanzo94@gmail.com> | get_prompt_templates | 9ebf164cfdfb320503b7161493420c1b0ec577a3 | haystack | prompt_node.py | 10 | 6 | https://github.com/deepset-ai/haystack.git | 1 | 22 | 0 | 6 | 38 | Python | {
"docstring": "\n Returns the list of supported prompt templates.\n :return: List of supported prompt templates.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 13,
"vocab_size": 9
} | def get_prompt_templates(cls) -> List[PromptTemplate]:
return list(cls.prompt_templates.values())
|
|
26,187 | 118,187 | 602 | tests/integration_tests/flows/test_company_independent.py | 81 | 21 | def test_views(self, postgres_db):
query =
for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:
self.sql_via_http(
query.format(f'test_view_{char}', char),
company_id=cid,
expected_resp_type=RESPONSE_TYPE.OK
)
tables = self.get_tables_in('mindsdb', cid)
self.assert_list(
tables, {
'models',
'models_versions',
f'test_view_{char}'
}
)
for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:
response = self.sql_via_http(
f"select * from mindsdb.test_view_{char}",
company_id=cid,
expected_resp_type=RESPONSE_TYPE.TABLE
)
assert len(response['data']) == 50
response = self.sql_via_http(
f"DROP VIEW mindsdb.test_view_{char}",
company_id=cid,
expected_resp_type=RESPONSE_TYPE.OK
)
tables = self.get_tables_in('mindsdb', cid)
self.assert_list(
tables, {
'models',
'models_versions'
}
)
self.sql_via_http(
f"select * from mindsdb.test_view_{char}",
co | fix tests | test_views | b96825c643cb2ce062d80868a5b7824d99bca07f | mindsdb | test_company_independent.py | 13 | 54 | https://github.com/mindsdb/mindsdb.git | 3 | 200 | 0 | 43 | 309 | Python | {
"docstring": "\n CREATE VIEW mindsdb.{}\n FROM test_integration_{} (\n select * from rentals limit 50\n )\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 13,
"vocab_size": 13
} | def test_views(self, postgres_db):
query =
for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:
self.sql_via_http(
query.format(f'test_view_{char}', char),
company_id=cid,
expected_resp_type=RESPONSE_TYPE.OK
)
tables = self.get_tables_in('mindsdb', cid)
self.assert_list(
tables, {
'models',
'models_versions',
f'test_view_{char}'
}
)
for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:
response = self.sql_via_http(
f"select * from mindsdb.test_view_{char}",
company_id=cid,
expected_resp_type=RESPONSE_TYPE.TABLE
)
assert len(response['data']) == 50
response = self.sql_via_http(
f"DROP VIEW mindsdb.test_view_{char}",
company_id=cid,
expected_resp_type=RESPONSE_TYPE.OK
)
tables = self.get_tables_in('mindsdb', cid)
self.assert_list(
tables, {
'models',
'models_versions'
}
)
self.sql_via_http(
f"select * from mindsdb.test_view_{char}",
company_id=cid,
expected_resp_type=RESPONSE_TYPE.ERROR
)
|
|
23,286 | 108,653 | 430 | lib/matplotlib/testing/compare.py | 155 | 30 | def convert(filename, cache):
path = Path(filename)
if not path.exists():
raise IOError(f"{path} does not exist")
if path.suffix[1:] not in converter:
import pytest
pytest.skip(f"Don't know how to convert {path.suffix} files to png")
newpath = path.parent / f"{path.stem}_{path.suffix[1:]}.png"
# Only convert the file if the destination doesn't already exist or
# is out of date.
if not newpath.exists() or newpath.stat().st_mtime < path.stat().st_mtime:
cache_dir = _get_cache_path() if cache else None
if cache_dir is not None:
_register_conversion_cache_cleaner_once()
hash_value = get_file_hash(path)
cached_path = cache_dir / (hash_value + newpath.suffix)
if cached_path.exists():
_log.debug("For %s: reusing cached conversion.", filename)
shutil.copyfile(cached_path, newpath)
return str(newpath)
_log.debug("For %s: converting to png.", filename)
convert = converter[path.suffix[1:]]
if path.suffix == ".svg":
contents = path.read_text()
if 'style="font:' in contents:
# for svg.fonttype = none, we explicitly patch the font search
| Support not embedding glyphs in svg mathtests. | convert | 740235060519c8330e6e733a10d8795e40e19b54 | matplotlib | compare.py | 14 | 29 | https://github.com/matplotlib/matplotlib.git | 11 | 219 | 0 | 104 | 397 | Python | {
"docstring": "\n Convert the named file to png; return the name of the created file.\n\n If *cache* is True, the result of the conversion is cached in\n `matplotlib.get_cachedir() + '/test_cache/'`. The caching is based on a\n hash of the exact contents of the input file. Old cache entries are\n automatically deleted as needed to keep the size of the cache capped to\n twice the size of all baseline images.\n ",
"language": "en",
"n_whitespaces": 91,
"n_words": 67,
"vocab_size": 46
} | def convert(filename, cache):
path = Path(filename)
if not path.exists():
raise IOError(f"{path} does not exist")
if path.suffix[1:] not in converter:
import pytest
pytest.skip(f"Don't know how to convert {path.suffix} files to png")
newpath = path.parent / f"{path.stem}_{path.suffix[1:]}.png"
# Only convert the file if the destination doesn't already exist or
# is out of date.
if not newpath.exists() or newpath.stat().st_mtime < path.stat().st_mtime:
cache_dir = _get_cache_path() if cache else None
if cache_dir is not None:
_register_conversion_cache_cleaner_once()
hash_value = get_file_hash(path)
cached_path = cache_dir / (hash_value + newpath.suffix)
if cached_path.exists():
_log.debug("For %s: reusing cached conversion.", filename)
shutil.copyfile(cached_path, newpath)
return str(newpath)
_log.debug("For %s: converting to png.", filename)
convert = converter[path.suffix[1:]]
if path.suffix == ".svg":
contents = path.read_text()
if 'style="font:' in contents:
# for svg.fonttype = none, we explicitly patch the font search
# path so that fonts shipped by Matplotlib are found.
convert = _svg_with_matplotlib_fonts_converter
convert(path, newpath)
if cache_dir is not None:
_log.debug("For %s: caching conversion result.", filename)
shutil.copyfile(newpath, cached_path)
return str(newpath)
|
|
69,796 | 242,234 | 985 | src/PIL/Image.py | 245 | 35 | def resize(self, size, resample=None, box=None, reducing_gap=None):
if resample is None:
type_special = ";" in self.mode
resample = Resampling.NEAREST if type_special else Resampling.BICUBIC
elif resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
Resampling.LANCZOS,
Resampling.BOX,
Resampling.HAMMING,
):
message = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
(Resampling.BOX, "Image.Resampling.BOX"),
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
)
]
raise ValueError(
message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
)
if reducing_gap is not None and reducing_gap < 1.0:
raise ValueError("reducing_gap must be 1.0 o | Added enums | resize | f8e4e9c2dd94c6f4789639dd891b8a6d5fb16e14 | Pillow | Image.py | 18 | 60 | https://github.com/python-pillow/Pillow.git | 20 | 520 | 0 | 134 | 816 | Python | {
"docstring": "\n Returns a resized copy of this image.\n\n :param size: The requested size in pixels, as a 2-tuple:\n (width, height).\n :param resample: An optional resampling filter. This can be\n one of :py:data:`PIL.Image.Resampling.NEAREST`,\n :py:data:`PIL.Image.Resampling.BOX`,\n :py:data:`PIL.Image.Resampling.BILINEAR`,\n :py:data:`PIL.Image.Resampling.HAMMING`,\n :py:data:`PIL.Image.Resampling.BICUBIC` or\n :py:data:`PIL.Image.Resampling.LANCZOS`.\n If the image has mode \"1\" or \"P\", it is always set to\n :py:data:`PIL.Image.Resampling.NEAREST`.\n If the image mode specifies a number of bits, such as \"I;16\", then the\n default filter is :py:data:`PIL.Image.Resampling.NEAREST`.\n Otherwise, the default filter is\n :py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`.\n :param box: An optional 4-tuple of floats providing\n the source image region to be scaled.\n The values must be within (0, 0, width, height) rectangle.\n If omitted or None, the entire source is used.\n :param reducing_gap: Apply optimization by resizing the image\n in two steps. First, reducing the image by integer times\n using :py:meth:`~PIL.Image.Image.reduce`.\n Second, resizing using regular resampling. The last step\n changes size no less than by ``reducing_gap`` times.\n ``reducing_gap`` may be None (no first step is performed)\n or should be greater than 1.0. The bigger ``reducing_gap``,\n the closer the result to the fair resampling.\n The smaller ``reducing_gap``, the faster resizing.\n With ``reducing_gap`` greater or equal to 3.0, the result is\n indistinguishable from fair resampling in most cases.\n The default value is None (no optimization).\n :returns: An :py:class:`~PIL.Image.Image` object.\n ",
"language": "en",
"n_whitespaces": 528,
"n_words": 207,
"vocab_size": 130
} | def resize(self, size, resample=None, box=None, reducing_gap=None):
if resample is None:
type_special = ";" in self.mode
resample = Resampling.NEAREST if type_special else Resampling.BICUBIC
elif resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
Resampling.LANCZOS,
Resampling.BOX,
Resampling.HAMMING,
):
message = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
(Resampling.BOX, "Image.Resampling.BOX"),
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
)
]
raise ValueError(
message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
)
if reducing_gap is not None and reducing_gap < 1.0:
raise ValueError("reducing_gap must be 1.0 or greater")
size = tuple(size)
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if self.size == size and box == (0, 0) + self.size:
return self.copy()
if self.mode in ("1", "P"):
resample = Resampling.NEAREST
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.resize(size, resample, box)
return im.convert(self.mode)
self.load()
if reducing_gap is not None and resample != Resampling.NEAREST:
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
if factor_x > 1 or factor_y > 1:
reduce_box = self._get_safe_box(size, resample, box)
factor = (factor_x, factor_y)
if callable(self.reduce):
self = self.reduce(factor, box=reduce_box)
else:
self = Image.reduce(self, factor, box=reduce_box)
box = (
(box[0] - reduce_box[0]) / factor_x,
(box[1] - reduce_box[1]) / factor_y,
(box[2] - reduce_box[0]) / factor_x,
(box[3] - reduce_box[1]) / factor_y,
)
return self._new(self.im.resize(size, resample, box))
|
|
21,491 | 102,176 | 49 | tools/test/test_gen_backend_stubs.py | 24 | 6 | def test_unrecognized_key(self) -> None:
yaml_str =
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(output_error, ) # noqa: B950
# if use_out_as_primary is provided, it must | Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950
This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.
Test Plan: Imported from OSS
Reviewed By: albanD
Differential Revision: D33113545
Pulled By: bdhirsh
fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288 | test_unrecognized_key | bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d | pytorch | test_gen_backend_stubs.py | 8 | 9 | https://github.com/pytorch/pytorch.git | 1 | 26 | 0 | 22 | 49 | Python | {
"docstring": "\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- abs\ninvalid_key: invalid_val contains unexpected keys: invalid_key. Only the following keys are supported: backend, cpp_namespace, extra_headers, supported, autograd, full_codegen",
"language": "en",
"n_whitespaces": 20,
"n_words": 26,
"vocab_size": 25
} | def test_unrecognized_key(self) -> None:
yaml_str =
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(output_error, ) # noqa: B950
# if use_out_as_primary is provided, it must be a bool |
|
38,648 | 160,521 | 31 | numpy/f2py/capi_maps.py | 19 | 4 | def f2cexpr(expr):
| ENH: Support character string arrays
TST: added test for issue #18684
ENH: f2py opens files with correct encoding, fixes #635
TST: added test for issue #6308
TST: added test for issue #4519
TST: added test for issue #3425
ENH: Implement user-defined hooks support for post-processing f2py data structure. Implement character BC hook.
ENH: Add support for detecting utf-16 and utf-32 encodings. | f2cexpr | d4e11c7a2eb64861275facb076d47ccd135fa28c | numpy | capi_maps.py | 9 | 3 | https://github.com/numpy/numpy.git | 1 | 21 | 0 | 18 | 38 | Python | {
"docstring": "Rewrite Fortran expression as f2py supported C expression.\n\n Due to the lack of a proper expression parser in f2py, this\n function uses a heuristic approach that assumes that Fortran\n arithmetic expressions are valid C arithmetic expressions when\n mapping Fortran function calls to the corresponding C function/CPP\n macros calls.\n\n ",
"language": "en",
"n_whitespaces": 66,
"n_words": 48,
"vocab_size": 36
} | def f2cexpr(expr):
# TODO: support Fortran `len` function with optional kind parameter
expr = re.sub(r'\blen\b', 'f2py_slen', expr)
return expr
|
|
21,327 | 101,950 | 328 | lib/gui/wrapper.py | 108 | 26 | def build_args(self, category, command=None, generate=False):
logger.debug("Build cli arguments: (category: %s, command: %s, generate: %s)",
category, command, generate)
command = self.command if not command else command
script = f"{category}.py"
pathexecscript = os.path.join(self.pathscript, script)
args = [sys.executable] if generate else [sys.executable, "-u"]
args.extend([pathexecscript, command]) | Typing - lib.gui.display_command | build_args | dab823a3eb7a5257cb1e0818ee10ed234d3de97f | faceswap | wrapper.py | 17 | 21 | https://github.com/deepfakes/faceswap.git | 12 | 183 | 0 | 70 | 309 | Python | {
"docstring": " Build the faceswap command and arguments list.\n\n If training, pass the model folder and name to the training\n :class:`lib.gui.analysis.Session` for the GUI.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 18
} | def build_args(self, category, command=None, generate=False):
logger.debug("Build cli arguments: (category: %s, command: %s, generate: %s)",
category, command, generate)
command = self.command if not command else command
script = f"{category}.py"
pathexecscript = os.path.join(self.pathscript, script)
args = [sys.executable] if generate else [sys.executable, "-u"]
args.extend([pathexecscript, command])
cli_opts = get_config().cli_opts
for cliopt in cli_opts.gen_cli_arguments(command):
args.extend(cliopt)
if command == "train" and not generate:
self._get_training_session_info(cliopt)
if not generate:
args.append("-gui") # Indicate to Faceswap that we are running the GUI
if generate:
# Delimit args with spaces
args = [f'"{arg}"' if " " in arg and not arg.startswith(("[", "("))
and not arg.endswith(("]", ")")) else arg
for arg in args]
logger.debug("Built cli arguments: (%s)", args)
return args
|
|
29,251 | 130,365 | 114 | python/ray/autoscaler/_private/aliyun/utils.py | 28 | 11 | def describe_v_switches(self, vpc_id=None):
request = DescribeVSwitchesRequest()
if vpc_id | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | describe_v_switches | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | utils.py | 12 | 10 | https://github.com/ray-project/ray.git | 3 | 63 | 0 | 21 | 110 | Python | {
"docstring": "Queries one or more VSwitches.\n\n :param vpc_id: The ID of the VPC to which the VSwitch belongs.\n :return: VSwitch list.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 20,
"vocab_size": 18
} | def describe_v_switches(self, vpc_id=None):
request = DescribeVSwitchesRequest()
if vpc_id is not None:
request.set_VpcId(vpc_id)
response = self._send_request(request)
if response is not None:
return response.get("VSwitches").get("VSwitch")
else:
logging.error("Describe VSwitches Failed.")
return None
|
|
22,482 | 106,863 | 611 | py/visdom/__init__.py | 183 | 25 | def line(self, Y, X=None, win=None, env=None, opts=None, update=None, name=None):
if update is not None:
if update == "re | apply black py to all python files | line | 5b8b7f267cfaf76a2a39a727ef31a62b3909a093 | visdom | __init__.py | 16 | 40 | https://github.com/fossasia/visdom.git | 12 | 389 | 0 | 101 | 601 | Python | {
"docstring": "\n This function draws a line plot. It takes in an `N` or `NxM` tensor\n `Y` that specifies the values of the `M` lines (that connect `N` points)\n to plot. It also takes an optional `X` tensor that specifies the\n corresponding x-axis values; `X` can be an `N` tensor (in which case all\n lines will share the same x-axis values) or have the same size as `Y`.\n\n `update` can be used to efficiently update the data of an existing line.\n Use 'append' to append data, 'replace' to use new data, and 'remove' to\n delete the trace that is specified in `name`. If updating a\n single trace, use `name` to specify the name of the trace to be updated.\n Update data that is all NaN is ignored (can be used for masking update).\n Using `update='append'` will create a plot if it doesn't exist\n and append to the existing plot otherwise.\n\n The following `opts` are supported:\n\n - `opts.fillarea` : fill area below line (`boolean`)\n - `opts.markers` : show markers (`boolean`; default = `false`)\n - `opts.markersymbol`: marker symbol (`string`; default = `'dot'`)\n - `opts.markersize` : marker size (`number`; default = `'10'`)\n - `opts.linecolor` : line colors (`np.array`; default = None)\n - `opts.dash` : line dash type (`np.array`; default = None)\n - `opts.legend` : `list` or `tuple` containing legend names\n\n If `update` is specified, the figure will be updated without\n creating a new plot -- this can be used for efficient updating.\n ",
"language": "en",
"n_whitespaces": 421,
"n_words": 237,
"vocab_size": 140
} | def line(self, Y, X=None, win=None, env=None, opts=None, update=None, name=None):
if update is not None:
if update == "remove":
return self.scatter(
X=None,
Y=None,
opts=opts,
win=win,
env=env,
update=update,
name=name,
)
else:
assert X is not None, "must specify x-values for line update"
assert Y.ndim == 1 or Y.ndim == 2, "Y should have 1 or 2 dim"
assert Y.shape[-1] > 0, "must plot one line at least"
if X is not None:
assert X.ndim == 1 or X.ndim == 2, "X should have 1 or 2 dim"
else:
X = np.linspace(0, 1, Y.shape[0])
if Y.ndim == 2 and X.ndim == 1:
X = np.tile(X, (Y.shape[1], 1)).transpose()
assert X.shape == Y.shape, "X and Y should be the same shape"
opts = {} if opts is None else opts
opts["markers"] = opts.get("markers", False)
opts["fillarea"] = opts.get("fillarea", False)
opts["mode"] = "lines+markers" if opts.get("markers") else "lines"
_title2str(opts)
_assert_opts(opts)
if Y.ndim == 1:
linedata = np.column_stack((X, Y))
else:
linedata = np.column_stack((X.ravel(order="F"), Y.ravel(order="F")))
labels = None
if Y.ndim == 2:
labels = np.arange(1, Y.shape[1] + 1)
labels = np.tile(labels, (Y.shape[0], 1)).ravel(order="F")
return self.scatter(
X=linedata, Y=labels, opts=opts, win=win, env=env, update=update, name=name
)
|
|
8,910 | 46,519 | 87 | tests/providers/databricks/operators/test_databricks_repos.py | 20 | 18 | def test_delete_with_id(self, db_mock_class):
op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id="123")
db_mock = db_mock_class.return_value
db_mock.delete_repo.re | More operators for Databricks Repos (#22422) | test_delete_with_id | 352d7f72dd1e21f1522d69b71917142430548d66 | airflow | test_databricks_repos.py | 10 | 9 | https://github.com/apache/airflow.git | 1 | 64 | 0 | 18 | 105 | Python | {
"docstring": "\n Test the execute function using Repo ID.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def test_delete_with_id(self, db_mock_class):
op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id="123")
db_mock = db_mock_class.return_value
db_mock.delete_repo.return_value = None
op.execute(None)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay
)
db_mock.delete_repo.assert_called_once_with('123')
|
|
18,096 | 86,306 | 182 | tests/sentry/models/test_groupsnooze.py | 30 | 21 | def test_user_rate_reached(self):
for i in range(5):
group = self.store_event(
data={
"fingerprint": ["group1"],
"timestamp": iso_format(before_now(minutes=5 + i)),
"tags": {"sentry:user": i},
| ref(perf issues): Enable ignore in a time period (#39120)
Enable ignoring a performance issue in a time period e.g. ignore this
until it happens 10x / hr or ignore until 10 users experience it in an
hour. | test_user_rate_reached | d745edbd591063f2c3241cd1960c361834058823 | sentry | test_groupsnooze.py | 20 | 12 | https://github.com/getsentry/sentry.git | 2 | 94 | 0 | 29 | 153 | Python | {
"docstring": "Test that ignoring an error issue until it's hit by 10 users in an hour works.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 15
} | def test_user_rate_reached(self):
for i in range(5):
group = self.store_event(
data={
"fingerprint": ["group1"],
"timestamp": iso_format(before_now(minutes=5 + i)),
"tags": {"sentry:user": i},
},
project_id=self.project.id,
).group
snooze = GroupSnooze.objects.create(group=group, user_count=5, user_window=60)
assert not snooze.is_valid(test_rates=True)
|
|
56,631 | 222,552 | 860 | python3.10.4/Lib/distutils/archive_util.py | 203 | 34 | def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
zip_filename = base_name + ".zip"
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
# If zipfile module is not available, try spawning an external
# 'zip' command.
if zipfile is None:
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
try:
spawn(["zip", zipoptions, zip_filename, base_dir],
dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise DistutilsExecError(("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename)
else:
log.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
try:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
except RuntimeError:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_STORED)
| add python 3.10.4 for windows | make_zipfile | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | archive_util.py | 22 | 41 | https://github.com/XX-net/XX-Net.git | 11 | 290 | 0 | 134 | 638 | Python | {
"docstring": "Create a zip file from all the files under 'base_dir'.\n\n The output zip file will be named 'base_name' + \".zip\". Uses either the\n \"zipfile\" Python module (if available) or the InfoZIP \"zip\" utility\n (if installed and found on the default search path). If neither tool is\n available, raises DistutilsExecError. Returns the name of the output zip\n file.\n ",
"language": "en",
"n_whitespaces": 78,
"n_words": 57,
"vocab_size": 47
} | def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
zip_filename = base_name + ".zip"
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
# If zipfile module is not available, try spawning an external
# 'zip' command.
if zipfile is None:
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
try:
spawn(["zip", zipoptions, zip_filename, base_dir],
dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise DistutilsExecError(("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename)
else:
log.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
try:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
except RuntimeError:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_STORED)
with zip:
if base_dir != os.curdir:
path = os.path.normpath(os.path.join(base_dir, ''))
zip.write(path, path)
log.info("adding '%s'", path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in dirnames:
path = os.path.normpath(os.path.join(dirpath, name, ''))
zip.write(path, path)
log.info("adding '%s'", path)
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
log.info("adding '%s'", path)
return zip_filename
ARCHIVE_FORMATS = {
'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"),
'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (make_zipfile, [],"ZIP file")
}
|
|
71,413 | 246,929 | 224 | tests/rest/client/test_upgrade_room.py | 56 | 16 | def test_power_levels_user_default(self):
# The other | Replace assertEquals and friends with non-deprecated versions. (#12092) | test_power_levels_user_default | 02d708568b476f2f7716000b35c0adfa4cbd31b3 | synapse | test_upgrade_room.py | 10 | 17 | https://github.com/matrix-org/synapse.git | 1 | 104 | 0 | 39 | 165 | Python | {
"docstring": "\n Another user can upgrade the room if the default power level for users is increased.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 15,
"vocab_size": 14
} | def test_power_levels_user_default(self):
# The other user doesn't have the proper power level.
channel = self._upgrade_room(self.other_token)
self.assertEqual(403, channel.code, channel.result)
# Increase the power levels so that this user can upgrade.
power_levels = self.helper.get_state(
self.room_id,
"m.room.power_levels",
tok=self.creator_token,
)
power_levels["users_default"] = 100
self.helper.send_state(
self.room_id,
"m.room.power_levels",
body=power_levels,
tok=self.creator_token,
)
# The upgrade should succeed!
channel = self._upgrade_room(self.other_token)
self.assertEqual(200, channel.code, channel.result)
|
|
14,461 | 67,267 | 8 | erpnext/regional/report/uae_vat_201/uae_vat_201.py | 22 | 11 | def get_exempt_total(filters):
conditions = get_conditions(filters)
try:
return (
frappe.db.sql(
.format(
where_conditions=conditions
),
filters,
)[0][0]
| style: format code with black | get_exempt_total | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | uae_vat_201.py | 16 | 24 | https://github.com/frappe/erpnext.git | 3 | 52 | 0 | 20 | 84 | Python | {
"docstring": "Returns the sum of each Sales Invoice Item Amount which is Vat Exempt.\n\t\t\tselect\n\t\t\t\tsum(i.base_amount) as total\n\t\t\tfrom\n\t\t\t\t`tabSales Invoice Item` i inner join `tabSales Invoice` s\n\t\t\ton\n\t\t\t\ti.parent = s.name\n\t\t\twhere\n\t\t\t\ts.docstatus = 1 and i.is_exempt = 1\n\t\t\t\t{where_conditions} ;\n\t\t\t",
"language": "en",
"n_whitespaces": 32,
"n_words": 41,
"vocab_size": 36
} | def get_exempt_total(filters):
conditions = get_conditions(filters)
try:
return (
frappe.db.sql(
.format(
where_conditions=conditions
),
filters,
)[0][0]
or 0
)
except (IndexError, TypeError):
return 0
|
|
52,673 | 209,428 | 176 | scapy/layers/kerberos.py | 86 | 18 | def encrypt(self, key, text, confounder=None, key_usage_number=None):
if key_usage_number is None:
key_usage_number = self.get_usage()[0]
self.cipher = key.encrypt(key_usage_number, text, confounder=confounder)
EncryptionKey = lambda **kwargs: ASN1F_SEQUENCE(
Int32("keytype", 0, explicit_tag=0x0),
ASN1F_STRING("keyvalue", "", explicit_tag=0x1),
**kwargs
)
Kerberos | Kerberos update (#3688)
* Kerberos over TCP
* Kerberos: add FAST & PKCA
* Many user-friendly improvements
* RFC3961 crypto
* Summary, Sessions, Examples, Bugs
* More tests, _n_fold edge case
* Ignore potatoe (kerberos tests) from codespell | encrypt | b26f2283379d3bba48d575c1fffd1c3cdeaf64c2 | scapy | kerberos.py | 11 | 4 | https://github.com/secdev/scapy.git | 2 | 49 | 0 | 76 | 274 | Python | {
"docstring": "\n Encrypt text and set it into cipher.\n\n :param key: the key to use for encryption\n :param text: the bytes value to encode\n :param confounder: (optional) specify the confounder bytes. Random otherwise\n :param key_usage_number: (optional) specify the key usage number.\n Guessed otherwise\n ",
"language": "en",
"n_whitespaces": 116,
"n_words": 41,
"vocab_size": 30
} | def encrypt(self, key, text, confounder=None, key_usage_number=None):
if key_usage_number is None:
key_usage_number = self.get_usage()[0]
self.cipher = key.encrypt(key_usage_number, text, confounder=confounder)
EncryptionKey = lambda **kwargs: ASN1F_SEQUENCE(
Int32("keytype", 0, explicit_tag=0x0),
ASN1F_STRING("keyvalue", "", explicit_tag=0x1),
**kwargs
)
KerberosFlags = ASN1F_FLAGS
_PADATA_TYPES = {
1: "PA-TGS-REQ",
2: "PA-ENC-TIMESTAMP",
3: "PA-PW-SALT",
11: "PA-ETYPE-INFO",
14: "PA-PK-AS-REQ-OLD",
15: "PA-PK-AS-REP-OLD",
16: "PA-PK-AS-REQ",
17: "PA-PK-AS-REP",
19: "PA-ETYPE-INFO2",
20: "PA-SVR-REFERRAL-INFO",
128: "PA-PAC-REQUEST",
133: "PA-FX-COOKIE",
134: "PA-AUTHENTICATION-SET",
135: "PA-AUTH-SET-SELECTED",
136: "PA-FX-FAST",
137: "PA-FX-ERROR",
165: "PA-SUPPORTED-ENCTYPES",
167: "PA-PAC-OPTIONS",
}
_PADATA_CLASSES = {
# Filled elsewhere in this file
}
# RFC4120
|
|
29,238 | 130,352 | 114 | python/ray/autoscaler/_private/aliyun/utils.py | 29 | 15 | def create_v_switch(self, vpc_id, zone_id, cidr_block):
request = CreateVSwitchRequest()
request.set_ZoneId(zone_ | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | create_v_switch | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | utils.py | 11 | 11 | https://github.com/ray-project/ray.git | 2 | 68 | 0 | 26 | 114 | Python | {
"docstring": "Create vSwitches to divide the VPC into one or more subnets\n\n :param vpc_id: The ID of the VPC to which the VSwitch belongs.\n :param zone_id: The ID of the zone to which\n the target VSwitch belongs.\n :param cidr_block: The CIDR block of the VSwitch.\n :return:\n ",
"language": "en",
"n_whitespaces": 103,
"n_words": 45,
"vocab_size": 27
} | def create_v_switch(self, vpc_id, zone_id, cidr_block):
request = CreateVSwitchRequest()
request.set_ZoneId(zone_id)
request.set_VpcId(vpc_id)
request.set_CidrBlock(cidr_block)
response = self._send_request(request)
if response is not None:
return response.get("VSwitchId")
else:
logging.error("create_v_switch vpc_id %s failed.", vpc_id)
return None
|
|
3,579 | 20,835 | 24 | pipenv/patched/notpip/_vendor/rich/style.py | 10 | 5 | def transparent_background(self) -> bool:
return self.bgcolor is None or self.bgcolor.is_default
| check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | transparent_background | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | style.py | 8 | 3 | https://github.com/pypa/pipenv.git | 2 | 20 | 0 | 10 | 34 | Python | {
"docstring": "Check if the style specified a transparent background.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def transparent_background(self) -> bool:
return self.bgcolor is None or self.bgcolor.is_default
|
|
4,448 | 22,757 | 118 | primelib/primelib.py | 61 | 9 | def isPerfectNumber(number):
# precondition
assert isinstance(number, int) and (
number > 1
), "'number' must been an int and >= 1"
divisors = getDivisors(number)
# precondition
ass | refactor: clean code
Signed-off-by: slowy07 <slowy.arfy@gmail.com> | isPerfectNumber | f0af0c43340763724f139fa68aa1e5a9ffe458b4 | Python | primelib.py | 13 | 11 | https://github.com/geekcomputers/Python.git | 4 | 73 | 0 | 47 | 120 | Python | {
"docstring": "\n input: positive integer 'number' > 1\n returns true if 'number' is a perfect number otherwise false.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 15
} | def isPerfectNumber(number):
# precondition
assert isinstance(number, int) and (
number > 1
), "'number' must been an int and >= 1"
divisors = getDivisors(number)
# precondition
assert (
isinstance(divisors, list)
and (divisors[0] == 1)
and (divisors[len(divisors) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
# ------------------------------------------------------------
|
|
@_api.deprecated(
'3.6',
pending=True,
alternative="``matplotlib.colormaps.unregister_cmap(name)``"
) | 23,578 | 109,426 | 108 | lib/matplotlib/cm.py | 56 | 17 | def _get_cmap(name=None, lut=None):
if name is None:
name = mpl.rcParams['image.cmap']
if isinstance(name, colors.Colormap):
retu | API: Add pending deprecation to mpl.cm top level functions
- matplotlib.cm.get_cmap
- matplotlib.cm.register_cmap
- matplotlib.cm.unregister_cmap
- matplotlib.pyplot.register_cmap
in preference for working with the ColormapRegistry on the top level module.
Co-authored-by: Greg Lucas <greg.m.lucas@gmail.com> | _get_cmap | bc4b0295161db92fe7232eb46ddb97eba287287d | matplotlib | cm.py | 11 | 10 | https://github.com/matplotlib/matplotlib.git | 4 | 72 | 1 | 43 | 175 | Python | {
"docstring": "\n Get a colormap instance, defaulting to rc values if *name* is None.\n\n Colormaps added with :func:`register_cmap` take precedence over\n built-in colormaps.\n\n Parameters\n ----------\n name : `matplotlib.colors.Colormap` or str or None, default: None\n If a `.Colormap` instance, it will be returned. Otherwise, the name of\n a colormap known to Matplotlib, which will be resampled by *lut*. The\n default, None, means :rc:`image.cmap`.\n lut : int or None, default: None\n If *name* is not already a Colormap instance and *lut* is not None, the\n colormap will be resampled to have *lut* entries in the lookup table.\n\n Returns\n -------\n Colormap\n ",
"language": "en",
"n_whitespaces": 165,
"n_words": 96,
"vocab_size": 65
} | def _get_cmap(name=None, lut=None):
if name is None:
name = mpl.rcParams['image.cmap']
if isinstance(name, colors.Colormap):
return name
_api.check_in_list(sorted(_colormaps), name=name)
if lut is None:
return _colormaps[name]
else:
return _colormaps[name].resampled(lut)
# do it in two steps like this so we can have an un-deprecated version in
# pyplot.
get_cmap = _api.deprecated(
'3.6', pending=True, alternative="``matplotlib.colormaps[name]``"
)(_get_cmap)
@_api.deprecated(
'3.6',
pending=True,
alternative="``matplotlib.colormaps.unregister_cmap(name)``"
) |
56,438 | 221,578 | 127 | python3.10.4/Lib/concurrent/futures/_base.py | 25 | 10 | def cancel(self):
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = | add python 3.10.4 for windows | cancel | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _base.py | 10 | 10 | https://github.com/XX-net/XX-Net.git | 3 | 56 | 0 | 18 | 93 | Python | {
"docstring": "Cancel the future if possible.\n\n Returns True if the future was cancelled, False otherwise. A future\n cannot be cancelled if it is running or has already completed.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 27,
"vocab_size": 22
} | def cancel(self):
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
|
|
5,990 | 32,823 | 100 | src/transformers/utils/hub.py | 46 | 20 | def get_hub_metadata(url, token=None):
if token is None:
token = HfFolder.get_token()
| Use new huggingface_hub tools for download models (#18438)
* Draft new cached_file
* Initial draft for config and model
* Small fixes
* Fix first batch of tests
* Look in cache when internet is down
* Fix last tests
* Bad black, not fixing all quality errors
* Make diff less
* Implement change for TF and Flax models
* Add tokenizer and feature extractor
* For compatibility with main
* Add utils to move the cache and auto-do it at first use.
* Quality
* Deal with empty commit shas
* Deal with empty etag
* Address review comments | get_hub_metadata | 5cd40323684c183c30b34758aea1e877996a7ac9 | transformers | hub.py | 11 | 14 | https://github.com/huggingface/transformers.git | 4 | 119 | 0 | 33 | 200 | Python | {
"docstring": "\n Returns the commit hash and associated etag for a given url.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 11
} | def get_hub_metadata(url, token=None):
if token is None:
token = HfFolder.get_token()
headers = {"user-agent": http_user_agent()}
headers["authorization"] = f"Bearer {token}"
r = huggingface_hub.file_download._request_with_retry(
method="HEAD", url=url, headers=headers, allow_redirects=False
)
huggingface_hub.file_download._raise_for_status(r)
commit_hash = r.headers.get(HUGGINGFACE_HEADER_X_REPO_COMMIT)
etag = r.headers.get(HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get("ETag")
if etag is not None:
etag = huggingface_hub.file_download._normalize_etag(etag)
return etag, commit_hash
|
|
70,416 | 244,523 | 35 | mmdet/models/dense_heads/anchor_free_head.py | 10 | 6 | def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False):
return self.aug_test_bboxes(
aug_batch_feats, aug_batch_img_metas, rescal | Refactor interface of base dense free head and fcos head | aug_test | 015f8a9bafe808fbe3db673d629f126a804a9207 | mmdetection | anchor_free_head.py | 8 | 3 | https://github.com/open-mmlab/mmdetection.git | 1 | 27 | 0 | 8 | 40 | Python | {
"docstring": "Test function with test time augmentation.\n\n Args:\n aug_batch_feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n aug_batch_img_metas (list[list[dict]]): the outer list indicates\n test-time augs (multiscale, flip, etc.) and the inner list\n indicates images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[ndarray]: bbox results of each class\n ",
"language": "en",
"n_whitespaces": 215,
"n_words": 75,
"vocab_size": 56
} | def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False):
return self.aug_test_bboxes(
aug_batch_feats, aug_batch_img_metas, rescale=rescale)
|
|
29,328 | 130,602 | 128 | python/ray/data/impl/block_list.py | 39 | 18 | def ensure_schema_for_first_block(self) -> Optional[Union["pyarrow.Schema", type]]:
get_schema = cached_remote_fn(_get_schema)
try:
block = next(self.iter_blocks())
except (StopIteration, ValueError):
# Dataset is empty (no blocks) or was manually | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ensure_schema_for_first_block | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | block_list.py | 12 | 13 | https://github.com/ray-project/ray.git | 2 | 68 | 0 | 32 | 113 | Python | {
"docstring": "Ensure that the schema is set for the first block.\n\n Returns None if the block list is empty.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 18,
"vocab_size": 15
} | def ensure_schema_for_first_block(self) -> Optional[Union["pyarrow.Schema", type]]:
get_schema = cached_remote_fn(_get_schema)
try:
block = next(self.iter_blocks())
except (StopIteration, ValueError):
# Dataset is empty (no blocks) or was manually cleared.
return None
schema = ray.get(get_schema.remote(block))
# Set the schema.
self._metadata[0].schema = schema
return schema
|
|
@pytest.fixture(params=["string", "pathlike", "buffer"]) | 40,362 | 168,984 | 25 | pandas/tests/io/formats/test_format.py | 14 | 8 | def get_local_am_pm():
am_local = time(1).strftime("%p")
pm_local = time(13).strftime("%p")
return am_local, pm_local
@pytest.fixture(params=["string", "pa | BUG: Fixed Unicode decoding error in `Period.strftime` when a locale-specific directive is used (#46405)
* Added test representative of #46319. Should fail on CI
* Added a gha worker with non utf 8 zh_CN encoding
* Attempt to fix the encoding so that locale works
* Added the fix, but not using it for now, until CI is able to reproduce the issue.
* Crazy idea: maybe simply removing the .utf8 modifier will use the right encoding !
* Hopefully fixing the locale not available error
* Now simply generating the locale, not updating the ubuntu one
* Trying to install the locale without enabling it
* Stupid mistake
* Testing the optional locale generator condition
* Put back all runners
* Added whatsnew
* Now using the fix
* As per code review: moved locale-switching fixture `overridden_locale` to conftest
* Flake8
* Added comments on the runner
* Added a non-utf8 locale in the `it_IT` runner. Added the zh_CN.utf8 locale in the tests
* Improved readability of fixture `overridden_locale` as per code review
* Added two comments on default encoding
* Fixed #46319 by adding a new `char_to_string_locale` function in the `tslibs.util` module, able to decode char* using the current locale.
* As per code review: modified the test to contain non-utf8 chars. Fixed the resulting issue.
* Split the test in two for clarity
* Fixed test and flake8 error.
* Updated whatsnew to ref #46468 . Updated test name
* Removing wrong whatsnew bullet
* Nitpick on whatsnew as per code review
* Fixed build error rst directive
* Names incorrectly reverted in last merge commit
* Fixed test_localization so that #46595 can be demonstrated on windows targets (even if today these do not run on windows targets, see #46597)
* Fixed `tm.set_locale` context manager, it could error and leak when category LC_ALL was used. Fixed #46595
* Removed the fixture as per code review, and added corresponding parametrization in tests.
* Dummy mod to trigger CI again
* reverted dummy mod
* Attempt to fix the remaining error on the numpy worker
* Fixed issue in `_from_ordinal`
* Added asserts to try to understand
* Reverted debugging asserts and applied fix for numpy repeat from #47670.
* Fixed the last issue on numpy dev: a TypeError message had changed
* Code review: Removed `EXTRA_LOC`
* Code review: removed commented line
* Code review: reverted out of scope change
* Code review: reverted out of scope change
* Fixed unused import
* Fixed revert mistake
* Moved whatsnew to 1.6.0
* Update pandas/tests/io/parser/test_quoting.py
Co-authored-by: Sylvain MARIE <sylvain.marie@se.com> | get_local_am_pm | ae6dc976d334e791b3e215cf6e63a267675cccbe | pandas | test_format.py | 10 | 4 | https://github.com/pandas-dev/pandas.git | 1 | 31 | 1 | 12 | 86 | Python | {
"docstring": "Return the AM and PM strings returned by strftime in current locale.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def get_local_am_pm():
am_local = time(1).strftime("%p")
pm_local = time(13).strftime("%p")
return am_local, pm_local
@pytest.fixture(params=["string", "pathlike", "buffer"]) |
7,309 | 40,010 | 49 | dash/_validate.py | 12 | 7 | def validate_pages_layout(module, page):
try:
getattr(page, "layout")
except AttributeError:
raise exceptions.NoLayoutException(
f
)
| update 2 after review | validate_pages_layout | 9d622aca0ce4d2d6a3cbc56079c6978b46219a98 | dash | _validate.py | 14 | 10 | https://github.com/plotly/dash.git | 2 | 26 | 0 | 12 | 53 | Python | {
"docstring": "\n No layout found in {module + \".py\"}\n A variable or a function named \"layout\" is required.\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 16,
"vocab_size": 16
} | def validate_pages_layout(module, page):
try:
getattr(page, "layout")
except AttributeError:
raise exceptions.NoLayoutException(
f
)
|
|
29,435 | 130,950 | 673 | python/ray/serve/tests/test_autoscaling_policy.py | 278 | 22 | def test_upscale_downscale_delay():
upscale_delay_s = 30.0
downscale_delay_s = 600.0
config = AutoscalingConfig(
min_replicas=1,
max_replicas=2,
target_num_ongoing_requests_per_replica=1,
upscale_delay_s=30.0,
downscale_delay_s=600.0,
)
policy = BasicAutoscalingPolicy(config)
upscale_wait_periods = int(upscale_delay_s / CONTROL_LOOP_PERIOD_S)
downscale_wait_periods = int(downscale_delay_s / CONTROL_LOOP_PERIOD_S)
overload_requests = [100]
# We should scale up only after enough consecutive scale-up decisions.
for i in range(upscale_wait_periods):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 1, i
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 2
no_requests = [0, 0]
# We should scale down only after enough consecutive scale-down decisions.
for i in range(downscale_wait_periods):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=no_requests, curr_target_num_replicas=2
)
assert new_num_replicas == 2, i
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=no_requests, curr_target_num_replicas=2
)
assert new_num_replicas == 1
# Get some scale-up decisions, but not enough to trigger a scale up.
for i in range(int(upscale_wait_periods / 2)):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 1, i
# Interrupt with a scale-down decision.
policy.get_decision_num_replicas(
current_num_ongoing_requests=[0], curr_target_num_replicas=1
)
# The counter should be reset, so it should require `upscale_wait_periods`
# more periods before we actually scale up.
for i in range(upscale_wait_periods):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 1, i
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_r | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_upscale_downscale_delay | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_autoscaling_policy.py | 11 | 67 | https://github.com/ray-project/ray.git | 7 | 358 | 0 | 92 | 534 | Python | {
"docstring": "Unit test for upscale_delay_s and downscale_delay_s.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | def test_upscale_downscale_delay():
upscale_delay_s = 30.0
downscale_delay_s = 600.0
config = AutoscalingConfig(
min_replicas=1,
max_replicas=2,
target_num_ongoing_requests_per_replica=1,
upscale_delay_s=30.0,
downscale_delay_s=600.0,
)
policy = BasicAutoscalingPolicy(config)
upscale_wait_periods = int(upscale_delay_s / CONTROL_LOOP_PERIOD_S)
downscale_wait_periods = int(downscale_delay_s / CONTROL_LOOP_PERIOD_S)
overload_requests = [100]
# We should scale up only after enough consecutive scale-up decisions.
for i in range(upscale_wait_periods):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 1, i
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 2
no_requests = [0, 0]
# We should scale down only after enough consecutive scale-down decisions.
for i in range(downscale_wait_periods):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=no_requests, curr_target_num_replicas=2
)
assert new_num_replicas == 2, i
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=no_requests, curr_target_num_replicas=2
)
assert new_num_replicas == 1
# Get some scale-up decisions, but not enough to trigger a scale up.
for i in range(int(upscale_wait_periods / 2)):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 1, i
# Interrupt with a scale-down decision.
policy.get_decision_num_replicas(
current_num_ongoing_requests=[0], curr_target_num_replicas=1
)
# The counter should be reset, so it should require `upscale_wait_periods`
# more periods before we actually scale up.
for i in range(upscale_wait_periods):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 1, i
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1
)
assert new_num_replicas == 2
# Get some scale-down decisions, but not enough to trigger a scale down.
for i in range(int(downscale_wait_periods / 2)):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=no_requests, curr_target_num_replicas=2
)
assert new_num_replicas == 2, i
# Interrupt with a scale-up decision.
policy.get_decision_num_replicas(
current_num_ongoing_requests=[100, 100], curr_target_num_replicas=2
)
# The counter should be reset so it should require `downscale_wait_periods`
# more periods before we actually scale down.
for i in range(downscale_wait_periods):
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=no_requests, curr_target_num_replicas=2
)
assert new_num_replicas == 2, i
new_num_replicas = policy.get_decision_num_replicas(
current_num_ongoing_requests=no_requests, curr_target_num_replicas=2
)
assert new_num_replicas == 1
|