ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
72,982
249,542
132
tests/storage/test_event_federation.py
27
8
def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo: room_id = "!backfill-room-test:some-host" depth_map: Dict[str, int] = {
Only try to backfill event if we haven't tried before recently (#13635) Only try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over. Fix https://github.com/matrix-org/synapse/issues/13622 Fix https://github.com/matrix-org/synapse/issues/8451 Follow-up to https://github.com/matrix-org/synapse/pull/13589 Part of https://github.com/matrix-org/synapse/issues/13356
_setup_room_for_insertion_backfill_tests
ac1a31740b6d0dfda4d57a25762aaddfde981caf
synapse
test_event_federation.py
9
27
https://github.com/matrix-org/synapse.git
1
81
0
26
88
Python
{ "docstring": "\n Sets up a room with various insertion event backward extremities to test\n backfill functions against.\n\n Returns:\n _BackfillSetupInfo including the `room_id` to test against and\n `depth_map` of events in the room\n ", "language": "en", "n_whitespaces": 81, "n_words": 30, "vocab_size": 26 }
def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo: room_id = "!backfill-room-test:some-host" depth_map: Dict[str, int] = { "1": 1, "2": 2, "insertion_eventA": 3, "3": 4, "insertion_eventB": 5, "4": 6, "5": 7, }
28,435
127,405
196
python/ray/serve/experimental/gradio_visualize_graph.py
59
13
def postprocessing(data): if type_to_string(type(data)) == "torch.Tensor": try: import torch from torchvision import transforms # By default Torch tensors are displayed as images. To display them as JSON, # the user can simply convert them to numpy arrays. transformer = transforms.ToPILImage() return transformer(torch.squeeze(data)) except ModuleNotFoundError: logger.warning( "Module `torchvision` isn't installed,
[serve] Add additional features to DAG visualization with Gradio (#28246)
postprocessing
203253321d34543aa25483803ebc21e3903679b6
ray
gradio_visualize_graph.py
13
13
https://github.com/ray-project/ray.git
3
55
0
50
101
Python
{ "docstring": "Add support for types that are not supported by Gradio.\n\n Some data types like PyTorch tensors, cannot be processed and displayed through\n Gradio. Thus we extend support to these data types by transforming them into a form\n that Gradio can process and display.\n ", "language": "en", "n_whitespaces": 55, "n_words": 43, "vocab_size": 35 }
def postprocessing(data): if type_to_string(type(data)) == "torch.Tensor": try: import torch from torchvision import transforms # By default Torch tensors are displayed as images. To display them as JSON, # the user can simply convert them to numpy arrays. transformer = transforms.ToPILImage() return transformer(torch.squeeze(data)) except ModuleNotFoundError: logger.warning( "Module `torchvision` isn't installed, unable to process torch tensor." ) return data return data
14,186
66,430
9
erpnext/manufacturing/doctype/work_order/test_work_order.py
18
10
def get_scrap_item_details(bom_no): scrap_items = {} for item in frappe.db.sql( , bom_no, as_dict=1, ): scrap_items[item.item_code] = item.stock_qty return scrap_items
style: format code with black
get_scrap_item_details
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
test_work_order.py
10
10
https://github.com/frappe/erpnext.git
2
40
0
16
62
Python
{ "docstring": "select item_code, stock_qty from `tabBOM Scrap Item`\n\t\twhere parent = %s", "language": "en", "n_whitespaces": 9, "n_words": 11, "vocab_size": 11 }
def get_scrap_item_details(bom_no): scrap_items = {} for item in frappe.db.sql( , bom_no, as_dict=1, ): scrap_items[item.item_code] = item.stock_qty return scrap_items
9,020
46,854
23
airflow/models/dag.py
9
11
def get_is_active(self, session=NEW_SESSION) -> Optional[None]:
API: Fix deprecation warning due to using query.value (#22775) When using sqlalchemy 1.4, there's a deprecation warning at the task logging: SADeprecationWarning: Query.value() is deprecated and will be removed in a future release. Please use Query.with_entities() in combination with Query.scalar() (deprecated since: 1.4) This PR fixes it
get_is_active
921ccedf7f90f15e8d18c27a77b29d232be3c8cb
airflow
dag.py
12
3
https://github.com/apache/airflow.git
1
39
0
9
63
Python
{ "docstring": "Returns a boolean indicating whether this DAG is active", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def get_is_active(self, session=NEW_SESSION) -> Optional[None]: return session.query(DagModel.is_active).filter(DagModel.dag_id == self.dag_id).scalar()
57,470
225,565
99
mkdocs/structure/pages.py
36
17
def is_homepage(self) -> bool: return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html') previous_page: Optional[Page] next_page: Optional[Page] parent: Optional[Section] children: None = None is_section: bool = False is_page: bool = True is_link: bool =
Relative links end with slash even for homepage links (#3022) Fixes #3015
is_homepage
32359f3e93f5ca7778b9f7c3d6d92f49a629c84c
mkdocs
pages.py
9
3
https://github.com/mkdocs/mkdocs.git
3
30
0
27
143
Python
{ "docstring": "Evaluates to `True` for the homepage of the site and `False` for all other pages.The [page][mkdocs.structure.pages.Page] object for the previous page or `None`.\n The value will be `None` if the current page is the first item in the site navigation\n or if the current page is not included in the navigation at all.The [page][mkdocs.structure.pages.Page] object for the next page or `None`.\n The value will be `None` if the current page is the last item in the site navigation\n or if the current page is not included in the navigation at all.The immediate parent of the page in the site navigation. `None` if the\n page is at the top level.Pages do not contain children and the attribute is always `None`.Indicates that the navigation object is a \"section\" object. Always `False` for page objects.Indicates that the navigation object is a \"page\" object. Always `True` for page objects.Indicates that the navigation object is a \"link\" object. Always `False` for page objects.", "language": "en", "n_whitespaces": 172, "n_words": 158, "vocab_size": 57 }
def is_homepage(self) -> bool: return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html') previous_page: Optional[Page] next_page: Optional[Page] parent: Optional[Section] children: None = None is_section: bool = False is_page: bool = True is_link: bool = False
19,835
100,340
774
lib/gui/utils.py
154
31
def _filetypes(self): all_files = ("All files", "*.*") filetypes = dict( default=(all_files,), alignments=[("Faceswap Alignments", "*.fsa"), all_files], config_project=[("Faceswap Project files", "*.fsw"), all_files], config_task=[("Faceswap Task files", "*.fst"), all_files], config_all=[("Faceswap Project and Task files", "*.fst *.fsw"), all_files], csv=[("Comma separated values", "*.csv"), all_files], image=[("Bitmap", "*.bmp"), ("JPG", "*.jpeg *.jpg"), ("PNG", "*.png"), ("TIFF", "*.tif *.tiff"), all_files], ini=[("Faceswap config files", "*.ini"), all_files],
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
_filetypes
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
utils.py
18
40
https://github.com/deepfakes/faceswap.git
8
337
0
116
586
Python
{ "docstring": " dict: The accepted extensions for each file type for opening/saving ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 9 }
def _filetypes(self): all_files = ("All files", "*.*") filetypes = dict( default=(all_files,), alignments=[("Faceswap Alignments", "*.fsa"), all_files], config_project=[("Faceswap Project files", "*.fsw"), all_files], config_task=[("Faceswap Task files", "*.fst"), all_files], config_all=[("Faceswap Project and Task files", "*.fst *.fsw"), all_files], csv=[("Comma separated values", "*.csv"), all_files], image=[("Bitmap", "*.bmp"), ("JPG", "*.jpeg *.jpg"), ("PNG", "*.png"), ("TIFF", "*.tif *.tiff"), all_files], ini=[("Faceswap config files", "*.ini"), all_files], json=[("JSON file", "*.json"), all_files], model=[("Keras model files", "*.h5"), all_files], state=[("State files", "*.json"), all_files], log=[("Log files", "*.log"), all_files], video=[("Audio Video Interleave", "*.avi"), ("Flash Video", "*.flv"), ("Matroska", "*.mkv"), ("MOV", "*.mov"), ("MP4", "*.mp4"), ("MPEG", "*.mpeg *.mpg *.ts *.vob"), ("WebM", "*.webm"), ("Windows Media Video", "*.wmv"), all_files]) # Add in multi-select options and upper case extensions for Linux for key in filetypes: if platform.system() == "Linux": filetypes[key] = [item if item[0] == "All files" else (item[0], f"{item[1]} {item[1].upper()}") for item in filetypes[key]] if len(filetypes[key]) > 2: multi = [f"{key.title()} Files"] multi.append(" ".join([ftype[1] for ftype in filetypes[key] if ftype[0] != "All files"])) filetypes[key].insert(0, tuple(multi)) return filetypes
56,035
220,528
118
python3.10.4/Lib/asyncio/futures.py
32
12
def result(self): if self._state == _CANCELLED: exc = self._make_cancelled_error() raise exc if self._state != _FINISHED: raise exceptions.InvalidStateError('Result is not ready.')
add python 3.10.4 for windows
result
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
futures.py
10
10
https://github.com/XX-net/XX-Net.git
4
57
0
22
95
Python
{ "docstring": "Return the result this future represents.\n\n If the future has been cancelled, raises CancelledError. If the\n future's result isn't yet available, raises InvalidStateError. If\n the future is done and has an exception set, this exception is raised.\n ", "language": "en", "n_whitespaces": 67, "n_words": 37, "vocab_size": 24 }
def result(self): if self._state == _CANCELLED: exc = self._make_cancelled_error() raise exc if self._state != _FINISHED: raise exceptions.InvalidStateError('Result is not ready.') self.__log_traceback = False if self._exception is not None: raise self._exception return self._result
22,075
105,108
19
src/datasets/formatting/dataset_wrappers/torch_iterable_dataset.py
10
5
def _set_fsspec_for_multiprocess() -> None: fsspec.asyn.iothread[0] = None fsspec.asyn.
Support DataLoader with num_workers > 0 in streaming mode (#4375) * make TorchIterableDataset work in parallel - make it picklable - paralellize over the shards when num_workers is passed * start writing some tests * fix streaming extension and fsspec issues in subprocesses * fix some tests * fix more tests * fix import * fix and add tests * fix patch (handle successive patches and builtins) * revert unnecessary change to enriched_web_blg * style * use open locally to fix win permission errors * keep file opened in read_csv * fix compression for read_csv * consistency of read_csv: don't infer compression for file-like objects * stringify Path objects * comments + raise error if sharding is ambiguous * minor * Update src/datasets/iterable_dataset.py Co-authored-by: Mario Šaško <mariosasko777@gmail.com> Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
_set_fsspec_for_multiprocess
ab7d3045ac9154e9c1c2602d0869130defdc6dc7
datasets
torch_iterable_dataset.py
9
9
https://github.com/huggingface/datasets.git
1
27
0
8
45
Python
{ "docstring": "\n Clear reference to the loop and thread.\n This is necessary otherwise HTTPFileSystem hangs in the ML training loop.\n Only required for fsspec >= 0.9.0\n See https://github.com/fsspec/gcsfs/issues/379\n ", "language": "en", "n_whitespaces": 42, "n_words": 26, "vocab_size": 25 }
def _set_fsspec_for_multiprocess() -> None: fsspec.asyn.iothread[0] = None fsspec.asyn.loop[0] = None
76,501
260,798
137
sklearn/utils/extmath.py
47
19
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): out = np.cumsum(arr, axis=axis, dtype=np.float64) expected = np.sum(arr, axis=axis, dtype=np.float64) if not np.all( np.isclose(
DOC ensure sklearn/utils/extmath/stable_cumsum passes numpydoc (#24348)
stable_cumsum
45756377c748d84aa52f66950b8d9eeefc31456c
scikit-learn
extmath.py
13
14
https://github.com/scikit-learn/scikit-learn.git
2
108
0
40
157
Python
{ "docstring": "Use high precision for cumsum and check that final value matches sum.\n\n Warns if the final cumulative sum does not match the sum (up to the chosen\n tolerance).\n\n Parameters\n ----------\n arr : array-like\n To be cumulatively summed as flat.\n axis : int, default=None\n Axis along which the cumulative sum is computed.\n The default (None) is to compute the cumsum over the flattened array.\n rtol : float, default=1e-05\n Relative tolerance, see ``np.allclose``.\n atol : float, default=1e-08\n Absolute tolerance, see ``np.allclose``.\n\n Returns\n -------\n out : ndarray\n Array with the cumulative sums along the chosen axis.\n ", "language": "en", "n_whitespaces": 171, "n_words": 93, "vocab_size": 68 }
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): out = np.cumsum(arr, axis=axis, dtype=np.float64) expected = np.sum(arr, axis=axis, dtype=np.float64) if not np.all( np.isclose( out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True ) ): warnings.warn( "cumsum was found to be unstable: " "its last element does not correspond to sum", RuntimeWarning, ) return out
7,960
43,407
751
airflow/www/views.py
208
57
def confirm(self): args = request.args dag_id = args.get('dag_id') task_id = args.get('task_id') dag_run_id = args.get('dag_run_id') state = args.get('state') origin = args.get('origin') if 'map_index' not in args: map_indexes: Optional[List[int]] = None else: map_indexes = args.getlist('map_index', type=int) upstream = to_boolean(args.get('upstream')) downstream = to_boolean(args.get('downstream')) future = to_boolean(args.get('future')) past = to_boolean(args.get('past')) origin = origin or url_for('Airflow.index') dag = get_airflow_app().dag_bag.get_dag(dag_id) if not dag: msg = f'DAG {dag_id} not found' return redirect_or_json(origin, msg, status='error', status_code=404) try: task = dag.get_task(task_id) except airflow.exceptions.TaskNotFound: msg = f"Task {task_id} not found" return redirect_or_json(origin, msg, status='error', status_code=404) task.dag = dag if state not in ( 'success', 'failed', ): msg = f"Invalid state {state}, must be either 'success' or 'failed'" return redirect_or_json(origin, msg
Upgrade FAB to 4.1.1 (#24399) * Upgrade FAB to 4.1.1 The Flask Application Builder have been updated recently to support a number of newer dependencies. This PR is the attempt to migrate FAB to newer version. This includes: * update setup.py and setup.cfg upper and lower bounds to account for proper version of dependencies that FAB < 4.0.0 was blocking from upgrade * added typed Flask application retrieval with a custom application fields available for MyPy typing checks. * fix typing to account for typing hints added in multiple upgraded libraries optional values and content of request returned as Mapping * switch to PyJWT 2.* by using non-deprecated "required" claim as list rather than separate fields * add possibiliyt to install providers without constraints so that we could avoid errors on conflicting constraints when upgrade-to-newer-dependencies is used * add pre-commit to check that 2.4+ only get_airflow_app is not used in providers * avoid Bad Request in case the request sent to Flask 2.0 is not JSon content type * switch imports of internal classes to direct packages where classes are available rather than from "airflow.models" to satisfy MyPY * synchronize changes of FAB Security Manager 4.1.1 with our copy of the Security Manager. * add error handling for a few "None" cases detected by MyPY * corrected test cases that were broken by immutability of Flask 2 objects and better escaping done by Flask 2 * updated test cases to account for redirection to "path" rather than full URL by Flask2 Fixes: #22397 * fixup! Upgrade FAB to 4.1.1
confirm
e2f19505bf3622935480e80bee55bf5b6d80097b
airflow
views.py
13
61
https://github.com/apache/airflow.git
12
430
0
129
729
Python
{ "docstring": "Show confirmation page for marking tasks as success or failed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def confirm(self): args = request.args dag_id = args.get('dag_id') task_id = args.get('task_id') dag_run_id = args.get('dag_run_id') state = args.get('state') origin = args.get('origin') if 'map_index' not in args: map_indexes: Optional[List[int]] = None else: map_indexes = args.getlist('map_index', type=int) upstream = to_boolean(args.get('upstream')) downstream = to_boolean(args.get('downstream')) future = to_boolean(args.get('future')) past = to_boolean(args.get('past')) origin = origin or url_for('Airflow.index') dag = get_airflow_app().dag_bag.get_dag(dag_id) if not dag: msg = f'DAG {dag_id} not found' return redirect_or_json(origin, msg, status='error', status_code=404) try: task = dag.get_task(task_id) except airflow.exceptions.TaskNotFound: msg = f"Task {task_id} not found" return redirect_or_json(origin, msg, status='error', status_code=404) task.dag = dag if state not in ( 'success', 'failed', ): msg = f"Invalid state {state}, must be either 'success' or 'failed'" return redirect_or_json(origin, msg, status='error', status_code=400) latest_execution_date = dag.get_latest_execution_date() if not latest_execution_date: msg = f"Cannot mark tasks as {state}, seem that dag {dag_id} has never run" return redirect_or_json(origin, msg, status='error', status_code=400) if map_indexes is None: tasks: Union[List[Operator], List[Tuple[Operator, int]]] = [task] else: tasks = [(task, map_index) for map_index in map_indexes] to_be_altered = set_state( tasks=tasks, run_id=dag_run_id, upstream=upstream, downstream=downstream, future=future, past=past, state=state, commit=False, ) if request.headers.get('Accept') == 'application/json': details = [str(t) for t in to_be_altered] return htmlsafe_json_dumps(details, separators=(',', ':')) details = "\n".join(str(t) for t in to_be_altered) response = self.render_template( "airflow/confirm.html", endpoint=url_for(f'Airflow.{state}'), message=f"Task instances you are about to mark as {state}:", details=details, ) return response
24,583
112,125
54
nni/retiarii/oneshot/pytorch/base_lightning.py
15
10
def resample(self) -> Dict[str, Any]:
Valuechoice oneshot lightning (#4602)
resample
14d2966b9e91ae16dcc39de8f41017a75cec8ff9
nni
base_lightning.py
12
13
https://github.com/microsoft/nni.git
2
39
0
14
63
Python
{ "docstring": "Trigger the resample for each ``nas_module``.\n Sometimes (e.g., in differentiable cases), it does nothing.\n\n Returns\n -------\n dict\n Sampled architecture.\n ", "language": "en", "n_whitespaces": 65, "n_words": 19, "vocab_size": 19 }
def resample(self) -> Dict[str, Any]: result = {} for module in self.nas_modules: result.update(module.resample(memo=result)) return result
24,402
111,419
28
spacy/tests/doc/test_json_doc_conversion.py
9
11
def test_json_to_doc_attribute_consistency(doc): doc_json = doc.to_json() doc_json["tokens"][1].pop("morph") with pytest.raises(ValueError): Doc(doc.vocab).from_js
Add Doc.from_json() (#10688) * Implement Doc.from_json: rough draft. * Implement Doc.from_json: first draft with tests. * Implement Doc.from_json: added documentation on website for Doc.to_json(), Doc.from_json(). * Implement Doc.from_json: formatting changes. * Implement Doc.to_json(): reverting unrelated formatting changes. * Implement Doc.to_json(): fixing entity and span conversion. Moving fixture and doc <-> json conversion tests into single file. * Implement Doc.from_json(): replaced entity/span converters with doc.char_span() calls. * Implement Doc.from_json(): handling sentence boundaries in spans. * Implementing Doc.from_json(): added parser-free sentence boundaries transfer. * Implementing Doc.from_json(): added parser-free sentence boundaries transfer. * Implementing Doc.from_json(): incorporated various PR feedback. * Renaming fixture for document without dependencies. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implementing Doc.from_json(): using two sent_starts instead of one. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implementing Doc.from_json(): doc_without_dependency_parser() -> doc_without_deps. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implementing Doc.from_json(): incorporating various PR feedback. Rebased on latest master. * Implementing Doc.from_json(): refactored Doc.from_json() to work with annotation IDs instead of their string representations. * Implement Doc.from_json(): reverting unwanted formatting/rebasing changes. * Implement Doc.from_json(): added check for char_span() calculation for entities. * Update spacy/tokens/doc.pyx Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): minor refactoring, additional check for token attribute consistency with corresponding test. * Implement Doc.from_json(): removed redundancy in annotation type key naming. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): Simplifying setting annotation values. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement doc.from_json(): renaming annot_types to token_attrs. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): adjustments for renaming of annot_types to token_attrs. * Implement Doc.from_json(): removing default categories. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): simplifying lexeme initialization. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): simplifying lexeme initialization. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): refactoring to only have keys for present annotations. * Implement Doc.from_json(): fix check for tokens' HEAD attributes. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): refactoring Doc.from_json(). * Implement Doc.from_json(): fixing span_group retrieval. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): fixing span retrieval. * Implement Doc.from_json(): added schema for Doc JSON format. Minor refactoring in Doc.from_json(). * Implement Doc.from_json(): added comment regarding Token and Span extension support. * Implement Doc.from_json(): renaming inconsistent_props to partial_attrs.. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): adjusting error message. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): extending E1038 message. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): added params to E1038 raises. * Implement Doc.from_json(): combined attribute collection with partial attributes check. * Implement Doc.from_json(): added optional schema validation. * Implement Doc.from_json(): fixed optional fields in schema, tests. * Implement Doc.from_json(): removed redundant None check for DEP. * Implement Doc.from_json(): added passing of schema validatoin message to E1037.. * Implement Doc.from_json(): removing redundant error E1040. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): changing message for E1037. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): adjusted website docs and docstring of Doc.from_json(). * Update spacy/tests/doc/test_json_doc_conversion.py * Implement Doc.from_json(): docstring update. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): docstring update. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): website docs update. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): docstring formatting. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): docstring formatting. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): fixing Doc reference in website docs. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): reformatted website/docs/api/doc.md. * Implement Doc.from_json(): bumped IDs of new errors to avoid merge conflicts. * Implement Doc.from_json(): fixing bug in tests. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): fix setting of sentence starts for docs without DEP. * Implement Doc.from_json(): add check for valid char spans when manually setting sentence boundaries. Refactor sentence boundary setting slightly. Move error message for lack of support for partial token annotations to errors.py. * Implement Doc.from_json(): simplify token sentence start manipulation. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Combine related error messages * Update spacy/tests/doc/test_json_doc_conversion.py Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
test_json_to_doc_attribute_consistency
8387ce4c01db48d92ac5638e18316c0f1fc8861e
spaCy
test_json_doc_conversion.py
12
5
https://github.com/explosion/spaCy.git
1
44
0
9
80
Python
{ "docstring": "Test that Doc.from_json() raises an exception if tokens don't all have the same set of properties.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 16 }
def test_json_to_doc_attribute_consistency(doc): doc_json = doc.to_json() doc_json["tokens"][1].pop("morph") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json)
52,092
207,767
114
tests/admin_views/tests.py
37
8
def test_index_css_classes(self):
Refs #33476 -- Reformatted code with Black.
test_index_css_classes
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
13
9
https://github.com/django/django.git
1
87
0
19
156
Python
{ "docstring": "\n CSS class names are used for each app and model on the admin index\n pages (#17050).\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
def test_index_css_classes(self): # General index page response = self.client.get(reverse("admin:index")) self.assertContains(response, '<div class="app-admin_views module') self.assertContains(response, '<tr class="model-actor">') self.assertContains(response, '<tr class="model-album">') # App index page response = self.client.get(reverse("admin:app_list", args=("admin_views",))) self.assertContains(response, '<div class="app-admin_views module') self.assertContains(response, '<tr class="model-actor">') self.assertContains(response, '<tr class="model-album">')
51,985
207,495
36
tests/admin_views/test_actions.py
8
9
def test_action_column_class(self): response = self.client.get(reverse("admin:admin_views_subscriber_changelist")) self.assertIsNotNone(response.context["act
Refs #33476 -- Reformatted code with Black.
test_action_column_class
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test_actions.py
11
4
https://github.com/django/django.git
1
38
0
8
69
Python
{ "docstring": "The checkbox column class is present in the response.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_action_column_class(self): response = self.client.get(reverse("admin:admin_views_subscriber_changelist")) self.assertIsNotNone(response.context["action_form"]) self.assertContains(response, "action-checkbox-column")
17,168
81,176
22
awx/conf/settings.py
8
7
def hashkey(cls, *args, **kwargs): return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
add lock to cachetools usage * We observed daphne giving tracebacks when accessing logging settings. Originally, configure tower in tower settings was no a suspect because daphne is not multi-process. We've had issues with configure tower in tower settings and multi-process before. We later learned that Daphne is multi-threaded. Configure tower in tower was back to being a suspect. We constructed a minimal reproducer to show that multiple threads accessing settings can cause the same traceback that we saw in daphne. See https://gist.github.com/chrismeyersfsu/7aa4bdcf76e435efd617cb078c64d413 for that recreator. These fixes stop the recreation.
hashkey
21972c91dd2b52cd206bf71ea038ab0e1f478b32
awx
settings.py
10
2
https://github.com/ansible/awx.git
1
28
0
7
52
Python
{ "docstring": "\n Usage of @cachetools.cached has changed to @cachetools.cachedmethod\n The previous cachetools decorator called the hash function and passed in (self, key).\n The new cachtools decorator calls the hash function with just (key).\n Ideally, we would continue to pass self, however, the cachetools decorator interface\n does not allow us to.\n\n This hashkey function is to maintain that the key generated looks like\n ('<SettingsWrapper>', key). The thought is that maybe it is important to namespace\n our cache to the SettingsWrapper scope in case some other usage of this cache exists.\n I can not think of how any other system could and would use our private cache, but\n for safety sake we are ensuring the key schema does not change.\n ", "language": "en", "n_whitespaces": 194, "n_words": 116, "vocab_size": 82 }
def hashkey(cls, *args, **kwargs): return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
24,457
111,625
102
nni/experiment/config/base.py
31
17
def load(cls, path): with open(path) as yaml_file: data = yaml.safe_load(yaml_file) if not isinstance(data, dict): raise TypeError(f'Conent of config file {path} is not a dict/object') utils.set_base_path(Path(path).parent) config = cls(**data) utils.uns
Some string changes around experiment module (#4442)
load
3f6a8274a97bf003b5eadc05faa324162b7f4123
nni
base.py
11
9
https://github.com/microsoft/nni.git
2
64
0
27
114
Python
{ "docstring": "\n Load a YAML config file from file system.\n\n Since YAML is a superset of JSON, it can also load JSON files.\n\n This method raises exception if:\n\n - The file is not available\n - The file content is not valid YAML\n - Top level value of the YAML is not object\n - The YAML contains not supported fields\n\n It does not raise exception when the YAML misses fields or contains bad fields.\n\n Parameters\n ----------\n path : PathLike\n Path of the config file.\n\n Returns\n -------\n cls\n An object of ConfigBase subclass.\n ", "language": "en", "n_whitespaces": 217, "n_words": 89, "vocab_size": 58 }
def load(cls, path): with open(path) as yaml_file: data = yaml.safe_load(yaml_file) if not isinstance(data, dict): raise TypeError(f'Conent of config file {path} is not a dict/object') utils.set_base_path(Path(path).parent) config = cls(**data) utils.unset_base_path() return config
@frappe.whitelist()
14,111
66,151
22
erpnext/hr/doctype/leave_allocation/leave_allocation.py
34
14
def get_leave_allocation_for_period(employee, leave_type, from_date, to_date): leave_allocated = 0 leave_allocations = frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "employee": employee, "leave_type": leave_type}, as_dict=1, ) if leave_allocations: for leave_alloc in leave_allocations: leave_allocated += leave_alloc.total_leaves_allocated return leave_allocated @frappe.whitelist()
style: format code with black
get_leave_allocation_for_period
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
leave_allocation.py
11
19
https://github.com/frappe/erpnext.git
3
62
1
29
109
Python
{ "docstring": "\n\t\tselect employee, leave_type, from_date, to_date, total_leaves_allocated\n\t\tfrom `tabLeave Allocation`\n\t\twhere employee=%(employee)s and leave_type=%(leave_type)s\n\t\t\tand docstatus=1\n\t\t\tand (from_date between %(from_date)s and %(to_date)s\n\t\t\t\tor to_date between %(from_date)s and %(to_date)s\n\t\t\t\tor (from_date < %(from_date)s and to_date > %(to_date)s))\n\t", "language": "en", "n_whitespaces": 28, "n_words": 35, "vocab_size": 23 }
def get_leave_allocation_for_period(employee, leave_type, from_date, to_date): leave_allocated = 0 leave_allocations = frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "employee": employee, "leave_type": leave_type}, as_dict=1, ) if leave_allocations: for leave_alloc in leave_allocations: leave_allocated += leave_alloc.total_leaves_allocated return leave_allocated @frappe.whitelist()
51,957
207,414
397
tests/admin_utils/test_logentry.py
92
34
def test_proxy_model_content_type_is_used_for_log_entries(self): proxy_content_type = ContentType.objects.get_for_model( ArticleProxy, for_concrete_model=False ) post_data = { "site": self.site.pk, "title": "Foo", "hist": "Bar", "created_0": "2015-12-25", "created_1": "00:00", } changelist_url = reverse("admin:admin_utils_articleproxy_changelist") # add proxy_add_url = reverse("admin:admin_utils_articleproxy_add") response = self.client.post(proxy_add_url, post_data) self.assertRedirects(response, changelist_url) proxy_addition_log = LogEntry.objects.latest("id") self.assertEqual(proxy_addition_log.action_flag, ADDITION) self.assertEqual(proxy_addition_log.content_type, proxy_content_type) # change article_id = proxy_addition_log.object_id proxy_change_url = reverse( "admin:admin_utils_articleproxy_change", args=(article_id,) ) post_data["title"] = "New" response = self.client.post(proxy_change_url, post_data) self.assertRedirects(response, changelist_url) proxy_change_log = LogEntry.objects.latest("id") self.assertEqual(proxy_change_log.action_flag, CHANGE) self.assertEqual(proxy_change_log.content_type, proxy_content_type) # delete proxy_delete_url = reverse( "admin:admin_utils_articleproxy_delete", args=(arti
Refs #33476 -- Reformatted code with Black.
test_proxy_model_content_type_is_used_for_log_entries
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test_logentry.py
11
36
https://github.com/django/django.git
1
251
0
62
424
Python
{ "docstring": "\n Log entries for proxy models should have the proxy model's contenttype\n (#21084).\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 11 }
def test_proxy_model_content_type_is_used_for_log_entries(self): proxy_content_type = ContentType.objects.get_for_model( ArticleProxy, for_concrete_model=False ) post_data = { "site": self.site.pk, "title": "Foo", "hist": "Bar", "created_0": "2015-12-25", "created_1": "00:00", } changelist_url = reverse("admin:admin_utils_articleproxy_changelist") # add proxy_add_url = reverse("admin:admin_utils_articleproxy_add") response = self.client.post(proxy_add_url, post_data) self.assertRedirects(response, changelist_url) proxy_addition_log = LogEntry.objects.latest("id") self.assertEqual(proxy_addition_log.action_flag, ADDITION) self.assertEqual(proxy_addition_log.content_type, proxy_content_type) # change article_id = proxy_addition_log.object_id proxy_change_url = reverse( "admin:admin_utils_articleproxy_change", args=(article_id,) ) post_data["title"] = "New" response = self.client.post(proxy_change_url, post_data) self.assertRedirects(response, changelist_url) proxy_change_log = LogEntry.objects.latest("id") self.assertEqual(proxy_change_log.action_flag, CHANGE) self.assertEqual(proxy_change_log.content_type, proxy_content_type) # delete proxy_delete_url = reverse( "admin:admin_utils_articleproxy_delete", args=(article_id,) ) response = self.client.post(proxy_delete_url, {"post": "yes"}) self.assertRedirects(response, changelist_url) proxy_delete_log = LogEntry.objects.latest("id") self.assertEqual(proxy_delete_log.action_flag, DELETION) self.assertEqual(proxy_delete_log.content_type, proxy_content_type)
36,605
156,222
156
dask/utils.py
42
12
def typename(typ, short=False) -> str: if not isinstance(typ, type): return typename(type(typ)) try: if not typ.__module__ or typ.__module__ == "builtins": return typ.
Add mild typing to common utils functions (#8848)
typename
261bf174931580230717abca93fe172e166cc1e8
dask
utils.py
16
28
https://github.com/dask/dask.git
6
88
0
29
150
Python
{ "docstring": "\n Return the name of a type\n\n Examples\n --------\n >>> typename(int)\n 'int'\n\n >>> from dask.core import literal\n >>> typename(literal)\n 'dask.core.literal'\n >>> typename(literal, short=True)\n 'dask.literal'\n ", "language": "en", "n_whitespaces": 57, "n_words": 23, "vocab_size": 20 }
def typename(typ, short=False) -> str: if not isinstance(typ, type): return typename(type(typ)) try: if not typ.__module__ or typ.__module__ == "builtins": return typ.__name__ else: if short: module, *_ = typ.__module__.split(".") else: module = typ.__module__ return module + "." + typ.__name__ except AttributeError: return str(typ)
50,801
204,587
396
django/core/management/__init__.py
114
24
def fetch_command(self, subcommand): # Get commands outside of try block to prevent swallowing exceptions commands = get_commands() try: app_name = commands[subcommand] except KeyError: if os.environ.get("DJANGO_SETTINGS_MODULE"): # If `subcommand` is missing due to misconfigured settings, the # following line will retrigger an ImproperlyConfigured exception # (get_commands() swallows the original one) so the user is # informed about it. settings.INSTALLED_APPS elif not settings.configured: sys.stderr.write("No Django settings specified.\n") possible_matches = get_close_matches(subcommand, commands) sys.stderr.write("Unknown command: %r" % subcommand) if possible_matches: sys.stderr.write(". Did you mean %s?" % possible_matches[0]) sys.stderr.write("\nType '%s help' for usage.\n" % self
Refs #33476 -- Reformatted code with Black.
fetch_command
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
__init__.py
14
20
https://github.com/django/django.git
6
126
0
89
223
Python
{ "docstring": "\n Try to fetch the given subcommand, printing a message with the\n appropriate command called from the command line (usually\n \"django-admin\" or \"manage.py\") if it can't be found.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 24 }
def fetch_command(self, subcommand): # Get commands outside of try block to prevent swallowing exceptions commands = get_commands() try: app_name = commands[subcommand] except KeyError: if os.environ.get("DJANGO_SETTINGS_MODULE"): # If `subcommand` is missing due to misconfigured settings, the # following line will retrigger an ImproperlyConfigured exception # (get_commands() swallows the original one) so the user is # informed about it. settings.INSTALLED_APPS elif not settings.configured: sys.stderr.write("No Django settings specified.\n") possible_matches = get_close_matches(subcommand, commands) sys.stderr.write("Unknown command: %r" % subcommand) if possible_matches: sys.stderr.write(". Did you mean %s?" % possible_matches[0]) sys.stderr.write("\nType '%s help' for usage.\n" % self.prog_name) sys.exit(1) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, subcommand) return klass
42,643
178,270
382
label_studio/core/storage.py
63
28
def url(self, name): name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(name) blob_params = self.get_object_parameters(name) no_signed_url = ( blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth) if not self.custom_endpoint and no_signed_url: return blob.public_url elif no_signed_url: out = '{storage_base_url}/{quoted_name}'.format( storage_base_url=self.custom_endpoint, quoted_name=_quote(name, safe=b"/~"), ) return out elif not self.custom_endpoint: out2 = blob.generate_signed_url( expiration=self.expiration, version="v4", **self._get_signing_kwargs() ) return out2 else: out3 = blob.generate_signed_url( bucket_bound_hostname=self.custom_endpoint, expir
fix: DEV-3911: Move persistent storages to OS (#3377) * fix: DEV-3911: Move persistent storages to OS * Fix * Add deps * Back header * Move DownloadStorageData handler * Update all urls json * Fix import * add nginx config * Fix GSC storage Co-authored-by: Sergei Ivashchenko <triklozoid@gmail.com> Co-authored-by: Sergey Zhuk <sergey.zhuk@heartex.com>
url
92314e4a9c431c407533e4a064481acf3c5983ab
label-studio
storage.py
16
29
https://github.com/heartexlabs/label-studio.git
6
164
0
41
266
Python
{ "docstring": "\n Return public url or a signed url for the Blob.\n This DOES NOT check for existance of Blob - that makes codes too slow\n for many use cases.\n Overridden to force the use of the IAM signBlob API.\n See https://github.com/googleapis/python-storage/blob/519074112775c19742522158f612b467cf590219/google/cloud/storage/_signing.py#L628 # NOQA\n ", "language": "en", "n_whitespaces": 86, "n_words": 42, "vocab_size": 35 }
def url(self, name): name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(name) blob_params = self.get_object_parameters(name) no_signed_url = ( blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth) if not self.custom_endpoint and no_signed_url: return blob.public_url elif no_signed_url: out = '{storage_base_url}/{quoted_name}'.format( storage_base_url=self.custom_endpoint, quoted_name=_quote(name, safe=b"/~"), ) return out elif not self.custom_endpoint: out2 = blob.generate_signed_url( expiration=self.expiration, version="v4", **self._get_signing_kwargs() ) return out2 else: out3 = blob.generate_signed_url( bucket_bound_hostname=self.custom_endpoint, expiration=self.expiration, version="v4", **self._get_signing_kwargs() ) return out3
76,591
260,960
242
sklearn/utils/validation.py
104
23
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all): if isclass(estimator): raise TypeError("{} is a class, not an instance.".format(estimator)) if msg is None: msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this estimator." ) if not hasattr(estimator, "fit"): raise TypeError("%s is not an estimator instance." % (estimator)) if attributes is not None: if not isinstance(attributes, (list, tuple)): attributes = [attributes] fitted = all_or_any([hasattr(estimator, attr) for attr in attributes]) elif hasattr(estimator, "__sklearn_is_fitted__"): fitted = estimator.__sklearn_is_fitted__() else: fitted = [ v for v in vars(estimator) if v.endswith("_") and not v.startswith("__") ] if not fitted: raise NotFittedError(msg % {"name": type(estimator).__name__})
DOC Ensures that check_is_fitted passes numpydoc validation (#24454)
check_is_fitted
b850a9417d4777931e2894fd8155b73dc87973b9
scikit-learn
validation.py
16
22
https://github.com/scikit-learn/scikit-learn.git
12
170
0
69
284
Python
{ "docstring": "Perform is_fitted validation for estimator.\n\n Checks if the estimator is fitted by verifying the presence of\n fitted attributes (ending with a trailing underscore) and otherwise\n raises a NotFittedError with the given message.\n\n If an estimator does not set any attributes with a trailing underscore, it\n can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the\n estimator is fitted or not.\n\n Parameters\n ----------\n estimator : estimator instance\n Estimator instance for which the check is performed.\n\n attributes : str, list or tuple of str, default=None\n Attribute name(s) given as string or a list/tuple of strings\n Eg.: ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n\n If `None`, `estimator` is considered fitted if there exist an\n attribute that ends with a underscore and does not start with double\n underscore.\n\n msg : str, default=None\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this\n estimator.\"\n\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n\n all_or_any : callable, {all, any}, default=all\n Specify whether all or any of the given attributes must exist.\n\n Raises\n ------\n TypeError\n If the estimator is a class or not an estimator instance\n\n NotFittedError\n If the attributes are not found.\n ", "language": "en", "n_whitespaces": 369, "n_words": 213, "vocab_size": 127 }
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all): if isclass(estimator): raise TypeError("{} is a class, not an instance.".format(estimator)) if msg is None: msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this estimator." ) if not hasattr(estimator, "fit"): raise TypeError("%s is not an estimator instance." % (estimator)) if attributes is not None: if not isinstance(attributes, (list, tuple)): attributes = [attributes] fitted = all_or_any([hasattr(estimator, attr) for attr in attributes]) elif hasattr(estimator, "__sklearn_is_fitted__"): fitted = estimator.__sklearn_is_fitted__() else: fitted = [ v for v in vars(estimator) if v.endswith("_") and not v.startswith("__") ] if not fitted: raise NotFittedError(msg % {"name": type(estimator).__name__})
120,634
334,468
261
models/vision/glide/modeling_glide.py
113
37
def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None): if model_kwargs is None: model_kwargs = {} B, C = x.shape[:2] assert t.shape == (B,) model_output = model(x, t, transformer_out) assert model_output.shape == (B, C * 2, *x.shape[2:]) model_output, model_var_values = torch.split(model_output, C, dim=1) min_log = _extract_into_tensor(self.noise_scheduler.posterior_log_variance_clipped, t, x.shape) max_log = _extract_into_tensor(np.log(self.noise_scheduler.betas), t, x.shape) # The model_var_values is [-1, 1] for [min_var, max_var]. frac = (model_var_values + 1) / 2 model_log_variance = frac * max_log + (1 - frac) * min_log model_variance = torch.exp(model_log_variance) pred_xstart = self._predict_xstart_from_eps(x
Classifier-free guidance scheduler + GLIDe pipeline
p_mean_variance
1e21f061601dda0aa9740e88bfce68bf4aac4acd
diffusers
modeling_glide.py
12
19
https://github.com/huggingface/diffusers.git
3
243
0
77
356
Python
{ "docstring": "\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n ", "language": "en", "n_whitespaces": 276, "n_words": 116, "vocab_size": 76 }
def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None): if model_kwargs is None: model_kwargs = {} B, C = x.shape[:2] assert t.shape == (B,) model_output = model(x, t, transformer_out) assert model_output.shape == (B, C * 2, *x.shape[2:]) model_output, model_var_values = torch.split(model_output, C, dim=1) min_log = _extract_into_tensor(self.noise_scheduler.posterior_log_variance_clipped, t, x.shape) max_log = _extract_into_tensor(np.log(self.noise_scheduler.betas), t, x.shape) # The model_var_values is [-1, 1] for [min_var, max_var]. frac = (model_var_values + 1) / 2 model_log_variance = frac * max_log + (1 - frac) * min_log model_variance = torch.exp(model_log_variance) pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) if clip_denoised: pred_xstart = pred_xstart.clamp(-1, 1) model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape return model_mean, model_variance, model_log_variance, pred_xstart
76,372
260,606
24
sklearn/utils/tests/test_estimator_html_repr.py
12
9
def test_invalid_parameters_in_stacking(): stacker = StackingClassifier(estimators
FIX Show a HTML repr for meta-estimatosr with invalid parameters (#24015) Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
test_invalid_parameters_in_stacking
84c6421a9067de7d1b54b7a6d8e21ce38e1f0eca
scikit-learn
test_estimator_html_repr.py
10
4
https://github.com/scikit-learn/scikit-learn.git
1
32
0
10
56
Python
{ "docstring": "Invalidate stacking configuration uses default repr.\n\n Non-regression test for #24009.\n ", "language": "en", "n_whitespaces": 16, "n_words": 10, "vocab_size": 10 }
def test_invalid_parameters_in_stacking(): stacker = StackingClassifier(estimators=[]) html_output = estimator_html_repr(stacker) assert html.escape(str(stacker)) in html_output
14,485
67,302
44
erpnext/regional/south_africa/setup.py
59
13
def add_permissions(): for doctype in ("South Africa VAT Settings", "South Africa VAT Account"): add_permission(doctype, "All", 0) for role in ("Accounts Manager", "Accounts User", "System Manager"): add_permission(doctype, role, 0) update_permission_property(doctype, role, 0, "write", 1) update_permission_property(doctype, role, 0, "create", 1) if not frappe.db.get_value("Custom Role",
style: format code with black
add_permissions
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
setup.py
19
15
https://github.com/frappe/erpnext.git
4
128
0
46
215
Python
{ "docstring": "Add Permissions for South Africa VAT Settings and South Africa VAT Account\n\tand VAT Audit Report", "language": "en", "n_whitespaces": 14, "n_words": 16, "vocab_size": 11 }
def add_permissions(): for doctype in ("South Africa VAT Settings", "South Africa VAT Account"): add_permission(doctype, "All", 0) for role in ("Accounts Manager", "Accounts User", "System Manager"): add_permission(doctype, role, 0) update_permission_property(doctype, role, 0, "write", 1) update_permission_property(doctype, role, 0, "create", 1) if not frappe.db.get_value("Custom Role", dict(report="VAT Audit Report")): frappe.get_doc( dict( doctype="Custom Role", report="VAT Audit Report", roles=[dict(role="Accounts User"), dict(role="Accounts Manager"), dict(role="Auditor")], ) ).insert()
50,370
203,426
44
django/contrib/admin/options.py
16
10
def has_delete_permission(self, request, obj=None): opts = self.opts codename = get_permission_codename("delete", opts) retu
Refs #33476 -- Reformatted code with Black.
has_delete_permission
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
options.py
10
4
https://github.com/django/django.git
1
42
0
15
69
Python
{ "docstring": "\n Return True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to delete the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to delete *any* object of the given type.\n ", "language": "en", "n_whitespaces": 129, "n_words": 72, "vocab_size": 42 }
def has_delete_permission(self, request, obj=None): opts = self.opts codename = get_permission_codename("delete", opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename))
23,829
109,922
75
lib/mpl_toolkits/mplot3d/art3d.py
26
17
def set_3d_properties(self, zs=0, zdir='z'): xs = self.get_xdata() ys = self.get_ydata() zs = cbook._to_unmasked_float_array(zs).ravel() zs = np.broadcast_to(zs, len(xs)) self._ve
Improve mpl_toolkit documentation
set_3d_properties
df6f95703b60348e01603f98a439b133da2938a0
matplotlib
art3d.py
10
7
https://github.com/matplotlib/matplotlib.git
1
72
0
20
116
Python
{ "docstring": "\n Set the *z* position and direction of the line.\n\n Parameters\n ----------\n zs : float or array of floats\n The location along the *zdir* axis in 3D space to position the\n line.\n zdir : {'x', 'y', 'z'}\n Plane to plot line orthogonal to. Default: 'z'.\n See `.get_dir_vector` for a description of the values.\n ", "language": "en", "n_whitespaces": 139, "n_words": 52, "vocab_size": 42 }
def set_3d_properties(self, zs=0, zdir='z'): xs = self.get_xdata() ys = self.get_ydata() zs = cbook._to_unmasked_float_array(zs).ravel() zs = np.broadcast_to(zs, len(xs)) self._verts3d = juggle_axes(xs, ys, zs, zdir) self.stale = True
21,305
101,926
29
lib/gui/project.py
8
5
def clear_tasks(self): logger.debug("Clearing stored tasks") self._tasks =
Typing - lib.gui.display_command
clear_tasks
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
faceswap
project.py
8
3
https://github.com/deepfakes/faceswap.git
1
18
0
8
35
Python
{ "docstring": " Clears all of the stored tasks.\n\n This is required when loading a task stored in a legacy project file, and is only to be\n called by :class:`Project` when a project has been loaded which is in fact a task.\n ", "language": "en", "n_whitespaces": 61, "n_words": 39, "vocab_size": 30 }
def clear_tasks(self): logger.debug("Clearing stored tasks") self._tasks = {}
14,117
66,160
28
erpnext/hr/doctype/leave_application/leave_application.py
39
15
def add_department_leaves(events, start, end, employee, company): department = frappe.db.get_value("Emplo
style: format code with black
add_department_leaves
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
leave_application.py
10
11
https://github.com/frappe/erpnext.git
2
71
0
33
113
Python
{ "docstring": "select name from tabEmployee where department=%s\n\t\tand company=%s", "language": "en", "n_whitespaces": 6, "n_words": 8, "vocab_size": 8 }
def add_department_leaves(events, start, end, employee, company): department = frappe.db.get_value("Employee", employee, "department") if not department: return # department leaves department_employees = frappe.db.sql_list( , (department, company), ) filter_conditions = ' and employee in ("%s")' % '", "'.join(department_employees) add_leaves(events, start, end, filter_conditions=filter_conditions)
29,216
130,291
58
python/ray/_private/thirdparty/pathspec/util.py
19
9
def is_file(self, follow_links=None): if follow_links is None: follow_links = True node_stat = self._stat if follow_links else self._lstat
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
is_file
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
util.py
8
5
https://github.com/ray-project/ray.git
3
38
0
15
61
Python
{ "docstring": "\n Get whether the entry is a regular file.\n\n *follow_links* (:class:`bool` or :data:`None`) is whether to follow\n symbolic links. If this is :data:`True`, a symlink to a regular file\n will result in :data:`True`. Default is :data:`None` for :data:`True`.\n\n Returns whether the entry is a regular file (:class:`bool`).\n ", "language": "en", "n_whitespaces": 89, "n_words": 46, "vocab_size": 30 }
def is_file(self, follow_links=None): if follow_links is None: follow_links = True node_stat = self._stat if follow_links else self._lstat return stat.S_ISREG(node_stat.st_mode)
51,888
207,170
119
tests/admin_inlines/tests.py
29
7
def test_tabular_model_form_meta_readonly_field(self): response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add")) self.assertCont
Refs #33476 -- Reformatted code with Black.
test_tabular_model_form_meta_readonly_field
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
11
10
https://github.com/django/django.git
1
39
0
24
75
Python
{ "docstring": "\n Tabular inlines use ModelForm.Meta.help_texts and labels for read-only\n fields.\n ", "language": "en", "n_whitespaces": 31, "n_words": 9, "vocab_size": 9 }
def test_tabular_model_form_meta_readonly_field(self): response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add")) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text from ModelForm.Meta)" ' 'title="Help text from ModelForm.Meta">', ) self.assertContains(response, "Label from ModelForm.Meta")
17,033
80,220
483
wagtail/snippets/tests/test_locking.py
121
18
def test_edit_get_unlocked_no_lock_permission(self): # Use edit permission only self.set_permissions(["change"]) # Get the edit page response = self.client.get(self.get_url("edit")) html = response.content.decode() lock_url = self.get_url("lock") # Should not show lock message self.assertNotContains( response, "<b>'I&#x27;m a lockable snippet!' was locked</b>", ) # Should show unlocked information in the side panel self.assertContains( response, f"Anyone can edit this {self.model_name}.", ) # Should not show info to lock the object in the side panel self.assertNotContains( response, "Lock it to prevent others from editing.", ) # Should show Save action menu item self.assertContains( response, f"<em>{self.save_button_label}</em>", html=True, ) # Should not show Locked action menu item self.assertTagInHTML( '<button type="submit" disabled>Locked</button>',
Add tests for locking snippets
test_edit_get_unlocked_no_lock_permission
10dbbddaf35607e4257f50dd960520a1268dd225
wagtail
test_locking.py
11
34
https://github.com/wagtail/wagtail.git
1
123
0
70
225
Python
{ "docstring": "A user cannot lock an object without the lock permission.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def test_edit_get_unlocked_no_lock_permission(self): # Use edit permission only self.set_permissions(["change"]) # Get the edit page response = self.client.get(self.get_url("edit")) html = response.content.decode() lock_url = self.get_url("lock") # Should not show lock message self.assertNotContains( response, "<b>'I&#x27;m a lockable snippet!' was locked</b>", ) # Should show unlocked information in the side panel self.assertContains( response, f"Anyone can edit this {self.model_name}.", ) # Should not show info to lock the object in the side panel self.assertNotContains( response, "Lock it to prevent others from editing.", ) # Should show Save action menu item self.assertContains( response, f"<em>{self.save_button_label}</em>", html=True, ) # Should not show Locked action menu item self.assertTagInHTML( '<button type="submit" disabled>Locked</button>', html, count=0, allow_extra_attrs=True, ) # Should not show the lock button self.assertTagInHTML( f'<button type="button" data-url="{lock_url}" data-action-lock-unlock>Lock</button>', html, count=0, allow_extra_attrs=True, )
19,047
94,186
751
src/sentry/models/counter.py
184
23
def increment_project_counter(project, delta=1, using="default"): if delta <= 0: raise ValueError("There is only one way, and that's up.") sample_rate = options.get("store.projectcounter-modern-upsert-sample-rate") modern_upsert = sample_rate and random.random() <= sample_rate # To prevent the statement_timeout leaking into the session we need to use # set local which can be used only within a transaction with transaction.atomic(using=using): cur = connections[using].cursor() try: statement_timeout = None if settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT: # WARNING: This is not a proper fix and should be removed once # we have better way of generating next_short_id. cur.execute("show statement_timeout") statement_timeout = cur.fetchone()[0] cur.execute( "set local statement_timeout = %s", [settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT], ) if modern_upsert: # Our postgres wrapper thing does not allow for named arguments cur.execute( "insert into sentry_projectcounter (project_id, value) " "values (%s, %s) " "on conflict (project_id) do update " "set value = sentry_projectcounter.value + %s "
fix(counter): Fix minor linting violation (#37392)
increment_project_counter
7f0e298ca45cd41f0e6df3968a6c0c2923a7b831
sentry
counter.py
16
39
https://github.com/getsentry/sentry.git
7
179
0
127
312
Python
{ "docstring": "This method primarily exists so that south code can use it.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def increment_project_counter(project, delta=1, using="default"): if delta <= 0: raise ValueError("There is only one way, and that's up.") sample_rate = options.get("store.projectcounter-modern-upsert-sample-rate") modern_upsert = sample_rate and random.random() <= sample_rate # To prevent the statement_timeout leaking into the session we need to use # set local which can be used only within a transaction with transaction.atomic(using=using): cur = connections[using].cursor() try: statement_timeout = None if settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT: # WARNING: This is not a proper fix and should be removed once # we have better way of generating next_short_id. cur.execute("show statement_timeout") statement_timeout = cur.fetchone()[0] cur.execute( "set local statement_timeout = %s", [settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT], ) if modern_upsert: # Our postgres wrapper thing does not allow for named arguments cur.execute( "insert into sentry_projectcounter (project_id, value) " "values (%s, %s) " "on conflict (project_id) do update " "set value = sentry_projectcounter.value + %s " "returning value", [project.id, delta, delta], ) else: cur.execute( "select sentry_increment_project_counter(%s, %s)", [project.id, delta], ) project_counter = cur.fetchone()[0] if statement_timeout is not None: cur.execute( "set local statement_timeout = %s", [statement_timeout], ) return project_counter finally: cur.close() # this must be idempotent because it seems to execute twice # (at least during test runs)
73,160
249,830
21
tests/storage/test_id_generators.py
7
4
def test_multiple_gen_nexts_closed_in_different_order(self) -> None:
Reintroduce #14376, with bugfix for monoliths (#14468) * Add tests for StreamIdGenerator * Drive-by: annotate all defs * Revert "Revert "Remove slaved id tracker (#14376)" (#14463)" This reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in turn reverted 36097e88c4da51fce6556a58c49bd675f4cf20ab. This restores the latter. * Fix StreamIdGenerator not handling unpersisted IDs Spotted by @erikjohnston. Closes #14456. * Changelog Co-authored-by: Nick Mills-Barrett <nick@fizzadar.com> Co-authored-by: Erik Johnston <erik@matrix.org>
test_multiple_gen_nexts_closed_in_different_order
115f0eb2334b13665e5c112bd87f95ea393c9047
synapse
test_id_generators.py
8
6
https://github.com/matrix-org/synapse.git
1
26
0
7
28
Python
{ "docstring": "Check that we handle overlapping calls to gen_next, even when their IDs\n created and persisted in different orders.", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 18 }
def test_multiple_gen_nexts_closed_in_different_order(self) -> None: id_gen = self._create_id_generator()
11,712
57,811
49
src/prefect/cli/deployment.py
34
18
def str_presenter(dumper, data): if len(data.splitlines()) > 1: # check for multiline string return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") return dumper.represent_scalar("tag:yaml.org,2002:str", data) yaml.add_representer(str, str_presen
Working YAML generation with lots of bells and whistles
str_presenter
36d9870433a22fff3944fa07f8e2feeb1b622bd9
prefect
deployment.py
11
4
https://github.com/PrefectHQ/prefect.git
2
42
0
30
135
Python
{ "docstring": "\n configures yaml for dumping multiline strings\n Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data\n ", "language": "en", "n_whitespaces": 18, "n_words": 8, "vocab_size": 8 }
def str_presenter(dumper, data): if len(data.splitlines()) > 1: # check for multiline string return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") return dumper.represent_scalar("tag:yaml.org,2002:str", data) yaml.add_representer(str, str_presenter) yaml.representer.SafeRepresenter.add_representer(str, str_presenter) deployment_app = PrefectTyper( name="deployment", help="Commands for working with deployments." ) app.add_typer(deployment_app)
5,201
29,120
123
saleor/core/auth_backend.py
44
16
def _get_permissions(self, user_obj, obj, from_name): if not user_obj.is_active or user_obj.is_anonymous or obj is not None: return set() perm_cache_name = "_effective_permissions_cache" if not getattr(user_obj, perm_cache_name, None): perms = getattr(self, f"_get_{from_name}_permissions")(user_obj) perms = perms.values_list("content_type__app_label", "codename").order_by() setattr(user_obj, perm_cache_name, {f"{c
Replace Interpolation With Fstring (#11016) * Replace Interpolation With Fstring * Fix out of bound lines. * Revert to lazy formatting for log messages. Also fix failing flake8. * Fix minor code smells and typo. * Make street_address to one line. * Fix test cases. * Fix lints.
_get_permissions
92a0c6c9f4324aa8f65a9b3e3a319604660a92a8
saleor
auth_backend.py
13
9
https://github.com/saleor/saleor.git
6
95
0
34
163
Python
{ "docstring": "Return the permissions of `user_obj` from `from_name`.\n\n `from_name` can be either \"group\" or \"user\" to return permissions from\n `_get_group_permissions` or `_get_user_permissions` respectively.\n ", "language": "en", "n_whitespaces": 43, "n_words": 22, "vocab_size": 19 }
def _get_permissions(self, user_obj, obj, from_name): if not user_obj.is_active or user_obj.is_anonymous or obj is not None: return set() perm_cache_name = "_effective_permissions_cache" if not getattr(user_obj, perm_cache_name, None): perms = getattr(self, f"_get_{from_name}_permissions")(user_obj) perms = perms.values_list("content_type__app_label", "codename").order_by() setattr(user_obj, perm_cache_name, {f"{ct}.{name}" for ct, name in perms}) return getattr(user_obj, perm_cache_name)
81,576
276,145
91
keras/saving/saved_model/saved_model_test.py
28
12
def test_trainable_layers(self): mo
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
test_trainable_layers
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
saved_model_test.py
10
8
https://github.com/keras-team/keras.git
1
80
0
24
130
Python
{ "docstring": "Tests that trainable status of individual layers is preserved.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_trainable_layers(self): model = model = self._get_model() # Set the last layer to *not* be trainable. model.layers[-1].trainable = False self._train_model(model, use_dataset=True) loaded = self._save_and_load(model) self._test_evaluation(model, loaded) self.assertFalse(model.layers[-1].trainable) self.assertFalse(loaded.layers[-1].trainable)
35,236
153,052
239
modin/core/dataframe/pandas/dataframe/dataframe.py
57
16
def _reorder_labels(self, row_positions=None, col_positions=None): if row_positions is not None: ordered_rows = self._partition_mgr_cls.map_axis_partitions( 0, self._partitions, lambda df: df.iloc[row_positions] ) row_idx = self.index[row_positions] else: ordered_rows = self._partitions row_idx = self.index if col_positions is not None: ordered_cols = self._partition_mgr_cls.map_a
REFACTOR-#2656: Update modin to fit algebra (code only) (#3717) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com> Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com> Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Signed-off-by: Rehan Durrani <rehan@ponder.io>
_reorder_labels
58bbcc37477866d19c8b092a0e1974a4f0baa586
modin
dataframe.py
13
18
https://github.com/modin-project/modin.git
3
123
0
36
187
Python
{ "docstring": "\n Reorder the column and or rows in this DataFrame.\n\n Parameters\n ----------\n row_positions : list of int, optional\n The ordered list of new row orders such that each position within the list\n indicates the new position.\n col_positions : list of int, optional\n The ordered list of new column orders such that each position within the\n list indicates the new position.\n\n Returns\n -------\n PandasDataframe\n A new PandasDataframe with reordered columns and/or rows.\n ", "language": "en", "n_whitespaces": 189, "n_words": 70, "vocab_size": 39 }
def _reorder_labels(self, row_positions=None, col_positions=None): if row_positions is not None: ordered_rows = self._partition_mgr_cls.map_axis_partitions( 0, self._partitions, lambda df: df.iloc[row_positions] ) row_idx = self.index[row_positions] else: ordered_rows = self._partitions row_idx = self.index if col_positions is not None: ordered_cols = self._partition_mgr_cls.map_axis_partitions( 1, ordered_rows, lambda df: df.iloc[:, col_positions] ) col_idx = self.columns[col_positions] else: ordered_cols = ordered_rows col_idx = self.columns return self.__constructor__(ordered_cols, row_idx, col_idx)
14,998
69,225
14
erpnext/assets/doctype/asset_capitalization/test_asset_capitalization.py
27
13
def get_actual_sle_dict(name): sles = frappe.db.sql( , name, as_dict=1, ) sle_dict = {} for d in sles: sle_dict[(d.item_code, d.warehouse)] = { "actual_qty": d.actual_qty,
feat: Asset Capitalization - manual selection of entry type - GLE cleanup with smaller functions - GLE considering periodical inventory - test cases
get_actual_sle_dict
58d430fe3ee62e93ad8d16a08bb42156a25b7d41
erpnext
test_asset_capitalization.py
11
22
https://github.com/frappe/erpnext.git
2
60
0
24
94
Python
{ "docstring": "\n\t\tselect\n\t\t\titem_code, warehouse,\n\t\t\tsum(actual_qty) as actual_qty,\n\t\t\tsum(stock_value_difference) as stock_value_difference\n\t\tfrom `tabStock Ledger Entry`\n\t\twhere voucher_type = 'Asset Capitalization' and voucher_no = %s\n\t\tgroup by item_code, warehouse\n\t\thaving actual_qty != 0\n\t", "language": "en", "n_whitespaces": 22, "n_words": 30, "vocab_size": 27 }
def get_actual_sle_dict(name): sles = frappe.db.sql( , name, as_dict=1, ) sle_dict = {} for d in sles: sle_dict[(d.item_code, d.warehouse)] = { "actual_qty": d.actual_qty, "stock_value_difference": d.stock_value_difference, } return sle_dict
113,486
314,885
125
homeassistant/config_entries.py
30
11
async def _async_process_on_unload(self) -> None:
Track tasks adding entities (#73828) * Track tasks adding entities * Update homeassistant/config_entries.py * fix cast tests Co-authored-by: J. Nick Koston <nick@koston.org>
_async_process_on_unload
00810235c92b492a966c6021021d49360ffb3cdd
core
config_entries.py
13
10
https://github.com/home-assistant/core.git
7
71
0
25
120
Python
{ "docstring": "Process the on_unload callbacks and wait for pending tasks.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def _async_process_on_unload(self) -> None: if self._on_unload is not None: while self._on_unload: self._on_unload.pop()() while self._pending_tasks: pending = [task for task in self._pending_tasks if not task.done()] self._pending_tasks.clear() if pending: await asyncio.gather(*pending)
36,013
154,490
22
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py
13
7
def apply_func(partition, func, *args, **kwargs):
FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715) Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Jonathan Shi <jhshi@ponder.io>
apply_func
d6d503ac7c3028d871c34d9e99e925ddb0746df6
modin
partition.py
9
3
https://github.com/modin-project/modin.git
1
32
0
12
51
Python
{ "docstring": "\n Execute a function on the partition in a worker process.\n\n Parameters\n ----------\n partition : pandas.DataFrame\n A pandas DataFrame the function needs to be executed on.\n func : callable\n The function to perform.\n *args : list\n Positional arguments to pass to ``func``.\n **kwargs : dict\n Keyword arguments to pass to ``func``.\n\n Returns\n -------\n pandas.DataFrame\n The resulting pandas DataFrame.\n str\n The node IP address of the worker process.\n\n Notes\n -----\n Directly passing a call queue entry (i.e. a list of [func, args, kwargs]) instead of\n destructuring it causes a performance penalty.\n ", "language": "en", "n_whitespaces": 180, "n_words": 89, "vocab_size": 60 }
def apply_func(partition, func, *args, **kwargs): result = func(partition, *args, **kwargs) return result, get_ip()
54,603
216,481
186
salt/client/mixins.py
53
22
def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True): if daemonize and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize()
Implement ``__getstate__`` and ``__setstate__`` instead of using ``classmethod`` Signed-off-by: Pedro Algarvio <palgarvio@vmware.com>
_proc_function_remote
c78f1ee4f49df35ab04e921a45de0878716d8bf5
salt
mixins.py
11
12
https://github.com/saltstack/salt.git
4
105
0
47
175
Python
{ "docstring": "\n Run this method in a multiprocess target to execute the function on the\n master and fire the return data on the event bus\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 19 }
def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True): if daemonize and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize() # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() # pack a few things into low low["__jid__"] = jid low["__user__"] = user low["__tag__"] = tag try: return self.cmd_sync(low) except salt.exceptions.EauthAuthenticationError as exc: log.error(exc)
36,536
156,073
23
dask/array/utils.py
14
9
def array_safe(a, like, **kwargs):
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
array_safe
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
utils.py
8
3
https://github.com/dask/dask.git
1
35
0
13
51
Python
{ "docstring": "\n If `a` is `dask.array`, return `dask.array.asarray(a, **kwargs)`,\n otherwise return `np.asarray(a, like=like, **kwargs)`, dispatching\n the call to the library that implements the like array. Note that\n when `a` is a `dask.Array` backed by `cupy.ndarray` but `like`\n isn't, this function will call `a.compute(scheduler=\"sync\")`\n before `np.array`, as downstream libraries are unlikely to know how\n to convert a `dask.Array` and CuPy doesn't implement `__array__` to\n prevent implicit copies to host.\n ", "language": "en", "n_whitespaces": 94, "n_words": 66, "vocab_size": 52 }
def array_safe(a, like, **kwargs): from dask.array.routines import array return _array_like_safe(np.array, array, a, like, **kwargs)
25,668
116,102
73
mindsdb/integrations/handlers/elasticsearch_handler/elasticsearch_handler.py
23
12
def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df.drop(['type', 'type'], axis=1) result.data_frame = df.rename(columns={'name': 'table_name'}) return result
implemented the get_tables() and get_columns() methods
get_tables
c8accc16e3c56d0e7d2a0b63c63a956849da57da
mindsdb
elasticsearch_handler.py
12
14
https://github.com/mindsdb/mindsdb.git
1
58
0
16
103
Python
{ "docstring": "\n Return list of entities that will be accessible as tables.\n Returns:\n HandlerResponse\n \n SHOW TABLES;\n ", "language": "en", "n_whitespaces": 66, "n_words": 14, "vocab_size": 14 }
def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df.drop(['type', 'type'], axis=1) result.data_frame = df.rename(columns={'name': 'table_name'}) return result
74,596
254,376
75
d2l/jax.py
33
19
def accuracy(self, params, X, Y, averaged=True): Y_hat = self.apply(params, X) Y_hat = d2l.reshap
JAX: Add section classification.md (#2293)
accuracy
f348aecdade3cdec4f93b72da548c7394ecb42ce
d2l-en
jax.py
12
6
https://github.com/d2l-ai/d2l-en.git
2
101
0
28
150
Python
{ "docstring": "Compute the number of correct predictions.\n \n Defined in :numref:`sec_classification`", "language": "en", "n_whitespaces": 19, "n_words": 9, "vocab_size": 9 }
def accuracy(self, params, X, Y, averaged=True): Y_hat = self.apply(params, X) Y_hat = d2l.reshape(Y_hat, (-1, Y_hat.shape[-1])) preds = d2l.astype(d2l.argmax(Y_hat, axis=1), Y.dtype) compare = d2l.astype(preds == d2l.reshape(Y, -1), d2l.float32) return d2l.reduce_mean(compare) if averaged else compare
40,848
173,483
380
cps/tasks/metadata_backup.py
92
24
def open_metadata(self, book, custom_columns): if config.config_use_google_drive: if not gdriveutils.is_gdrive_ready(): raise Exception('Google Drive is configured but not ready') web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path) if not web_content_link: raise Exception('Google Drive cover url not found') stream = None try: stream = urlopen(web_content_link) except Exception as ex: # Bubble exception to calling function self.log.debug('Error reading metadata
Backup metadata 3rd step
open_metadata
26be5ee2372b08c2f906661283a12e84d6c181f8
calibre-web
metadata_backup.py
15
37
https://github.com/janeczku/calibre-web.git
7
121
0
68
209
Python
{ "docstring": "namespaces = {'dc': PURL_NAMESPACE, 'opf': OPF_NAMESPACE}\n test = etree.parse(book_metadata_filepath)\n root = test.getroot()\n for i in root.iter():\n self.log.info(i)\n title = root.find(\"dc:metadata\", namespaces)\n pass\n with open(book_metadata_filepath, \"rb\") as f:\n xml = f.read()\n\n root = objectify.fromstring(xml)\n # root.metadata['{http://purl.org/dc/elements/1.1/}title']\n # root.metadata[PURL + 'title']\n # getattr(root.metadata, PURL +'title')\n # test = objectify.parse()\n pass\n # backup not found has to be created\n #raise Exception('Book cover file not found')", "language": "en", "n_whitespaces": 245, "n_words": 62, "vocab_size": 48 }
def open_metadata(self, book, custom_columns): if config.config_use_google_drive: if not gdriveutils.is_gdrive_ready(): raise Exception('Google Drive is configured but not ready') web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path) if not web_content_link: raise Exception('Google Drive cover url not found') stream = None try: stream = urlopen(web_content_link) except Exception as ex: # Bubble exception to calling function self.log.debug('Error reading metadata.opf: ' + str(ex)) # ToDo Check whats going on raise ex finally: if stream is not None: stream.close() else: # ToDo: Handle book folder not found or not readable book_metadata_filepath = os.path.join(config.config_calibre_dir, book.path, 'metadata.opf') #if not os.path.isfile(book_metadata_filepath): self.create_new_metadata_backup(book, custom_columns, book_metadata_filepath) # else:
@pytest.mark.skipif(not can_import_module("tkinter"), reason="tkinter cannot be imported.")
77,352
262,765
38
tests/functional/test_libraries.py
39
8
def test_gevent_monkey(pyi_builder): pyi_builder.test_source() # The tkinter
tests: gevent tests: remove no-op excludes The `gevent` tests seem to be attempting to exclude several packages. As per comment in 416e1a0e83bf5a4924cc50d2befa2bb622b55107, this was introduced in an attempt to break the following Windows-specific import chain: setuptools.msvc -> numpy -> numpy.testing -> pytest -> pygments -> PIL -> PIL.ImageQt -> PySide2. However, nowadays we already break that chain in two places: our setuptools.msvc hook excludes numpy, and our numpy hook excludes pytest. More importantly, `excludes` is not a valid keyword argument for the `pyi_builder.test_source` (anymore?), and is quietly swallowed by the `**kwargs`. So those exclude lists achieve nothing, except confusing people who look at existing code to find a way to exclude packages in a test. (As a side note, the tests that do use `excludes` keyword argument are passing it to the modulegraph's functions, not the `pyi_builder` fixture ones.)
test_gevent_monkey
93ad16d5c970f70f843a5eda8b177f681743005b
pyinstaller
test_libraries.py
10
5
https://github.com/pyinstaller/pyinstaller.git
1
11
1
36
54
Python
{ "docstring": "\n from gevent.monkey import patch_all\n patch_all()\n ", "language": "en", "n_whitespaces": 27, "n_words": 5, "vocab_size": 5 }
def test_gevent_monkey(pyi_builder): pyi_builder.test_source() # The tkinter module may be available for import, but not actually importable due to missing shared libraries. # Therefore, we need to use `can_import_module`-based skip decorator instead of `@importorskip`. @pytest.mark.skipif(not can_import_module("tkinter"), reason="tkinter cannot be imported.")
12,446
61,221
32
.venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py
20
7
def split_auth_netloc_from_url(url): # type: (str) -> Tuple[str, str, Tuple[str,
upd; format
split_auth_netloc_from_url
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
misc.py
8
3
https://github.com/jindongwang/transferlearning.git
1
26
0
18
42
Python
{ "docstring": "\n Parse a url into separate netloc, auth, and url with no auth.\n\n Returns: (url_without_auth, netloc, (username, password))\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 15 }
def split_auth_netloc_from_url(url): # type: (str) -> Tuple[str, str, Tuple[str, str]] url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc) return url_without_auth, netloc, auth
46,153
189,647
19
tests/test_text_mobject.py
10
7
def test_non_str_color(): text = Text("test_color_inheritance", color=Color("blue")) markup_text = MarkupText("test_color_inheritance", color=Color("blue"))
:class:`~.MathTex`, :class:`~.Tex`, :class:`~.Text` and :class:`~.MarkupText` inherit color from their parent mobjects. (#2467) * comment out color-related things from tex_mob * add change to svg_mobject * MarkupText handles colour internally * MarkupText handles colour internally * make coordinate_system.py colour agnostic * get_line_from_axis_to_point * add typings for SingleStringMathTex * add typings for MathTex * make internal methods internal * black + isort * fix typo * black + isort * fix typo * revert internalizing change * Revert "Merge branch 'mathtexx' of https://github.com/hydrobeam/manim into mathtexx" This reverts commit 6be3c3981440fd5cfee54e5d9f24b30e1ba991e9, reversing changes made to 2b30b446ae4004efb06adbb646f54e9ef269bc61. * remove accidental import * do it in a less bad way * WIP: Text2setting causing problems * allow tex_mobject.py to inherit colour * allow tex_mobject.py to inherit colour * add tests * remove undeedde imports + formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix warnings from pre-commit hooks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix some tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove other color_inheritance test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typo * accomodate the color->attribute PR * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix tests and doc build add a check for None when inheriting colour in `coordinate_systems.py`, and turn written tests into graphical tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Comment out `Text` color inheritance test. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Set font for text_color_inheritance test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Small change to retrigger docs build * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
test_non_str_color
2275ec5916de0ad3bedbc276da09fc3bfbae4d5e
manim
test_text_mobject.py
12
3
https://github.com/ManimCommunity/manim.git
1
31
0
8
60
Python
{ "docstring": "Test that the Text and MarkupText can accept non_str color values\n i.e. colour.Color(red).", "language": "en", "n_whitespaces": 15, "n_words": 13, "vocab_size": 13 }
def test_non_str_color(): text = Text("test_color_inheritance", color=Color("blue")) markup_text = MarkupText("test_color_inheritance", color=Color("blue"))
103,899
305,107
49
tests/components/zha/test_config_flow.py
20
16
async def test_strategy_no_network_settings(pick_radio, mock_app, hass): mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed()) result, port = await pick_radio(RadioType.ezsp) assert ( config_flow.FORMATION_REUSE_SETTINGS not in result["data_schema"].schema["next_step_id"].container )
ZHA backup/restore config flow (#77044)
test_strategy_no_network_settings
f78b39bdbfbe151e8bab72610b6fe03afc8c0747
core
test_config_flow.py
12
7
https://github.com/home-assistant/core.git
1
52
0
19
87
Python
{ "docstring": "Test formation strategy when no network settings are present.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def test_strategy_no_network_settings(pick_radio, mock_app, hass): mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed()) result, port = await pick_radio(RadioType.ezsp) assert ( config_flow.FORMATION_REUSE_SETTINGS not in result["data_schema"].schema["next_step_id"].container )
23,059
108,099
282
lib/matplotlib/mlab.py
121
15
def detrend(x, key=None, axis=None): if key is None or key in ['constant', 'mean', 'default']: return detrend(x, key=detrend_mean, axis=axis) elif key == 'linear': return detrend(x, key=detrend_linear, axis=axis) elif key == 'none': return detrend(x, key=detrend_none, axis=axis) elif callable(key):
Improve mlab documentation (and example)
detrend
17b3c44f67f779e7d103381878f08c548c2c8495
matplotlib
mlab.py
14
21
https://github.com/matplotlib/matplotlib.git
13
180
0
82
295
Python
{ "docstring": "\n Return *x* with its trend removed.\n\n Parameters\n ----------\n x : array or sequence\n Array or sequence containing the data.\n\n key : {'default', 'constant', 'mean', 'linear', 'none'} or function\n The detrending algorithm to use. 'default', 'mean', and 'constant' are\n the same as `detrend_mean`. 'linear' is the same as `detrend_linear`.\n 'none' is the same as `detrend_none`. The default is 'mean'. See the\n corresponding functions for more details regarding the algorithms. Can\n also be a function that carries out the detrend operation.\n\n axis : int\n The axis along which to do the detrending.\n\n See Also\n --------\n detrend_mean : Implementation of the 'mean' algorithm.\n detrend_linear : Implementation of the 'linear' algorithm.\n detrend_none : Implementation of the 'none' algorithm.\n ", "language": "en", "n_whitespaces": 200, "n_words": 114, "vocab_size": 75 }
def detrend(x, key=None, axis=None): if key is None or key in ['constant', 'mean', 'default']: return detrend(x, key=detrend_mean, axis=axis) elif key == 'linear': return detrend(x, key=detrend_linear, axis=axis) elif key == 'none': return detrend(x, key=detrend_none, axis=axis) elif callable(key): x = np.asarray(x) if axis is not None and axis + 1 > x.ndim: raise ValueError(f'axis(={axis}) out of bounds') if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1): return key(x) # try to use the 'axis' argument if the function supports it, # otherwise use apply_along_axis to do it try: return key(x, axis=axis) except TypeError: return np.apply_along_axis(key, axis=axis, arr=x) else: raise ValueError( f"Unknown value for key: {key!r}, must be one of: 'default', " f"'constant', 'mean', 'linear', or a function")
@register.filter(is_safe=True) @stringfilter
15,653
71,270
26
wagtail/admin/templatetags/wagtailadmin_tags.py
12
10
def has_unrendered_errors(bound_field): return bound_field.errors and not hasattr( bound_field.field.widget, "render_with_errors" )
Reformat with black
has_unrendered_errors
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
wagtailadmin_tags.py
11
4
https://github.com/wagtail/wagtail.git
2
22
1
12
57
Python
{ "docstring": "\n Return true if this field has errors that were not accounted for by render_with_errors, because\n the widget does not support the render_with_errors method\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 21 }
def has_unrendered_errors(bound_field): return bound_field.errors and not hasattr( bound_field.field.widget, "render_with_errors" ) @register.filter(is_safe=True) @stringfilter
17,448
82,589
189
cms/tests/test_admin.py
36
9
def test_raw_id_threshold_page_permission_inline_admin(self): with self.settings(CMS_RAW_ID_USERS=1): with self.assertNumQueries(1): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) # Create users to check if threshold is honored self._get_guys() with self.settings(CMS_RAW_ID_USERS=False): with self.assertNumQueries(0): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) with self.settings(CMS_RAW_ID_USERS=True): with sel
perf: Don't count users when CMS_RAW_ID_USERS=True (#7414) * perf: Don't count users when CMS_RAW_ID_USERS=True When using CMS_RAW_ID_USERS=True on a Postgres database with many users, counting the users is slow and will always yield the same result. Only count users when using an integer value as a threshold and reuse the same logic for both PagePermissionInlineAdmin and GlobalPagePermissionAdmin. * Ensure that only integer settings of CMS_RAW_ID_USERS are compared to the number of users * Add documentation for the CMS_RAW_ID_USER=True setting * fix isort for added tests * Fix: in python this is always True: isinstance(False, int) Co-authored-by: Pankrat <lhaehne@gmail.com>
test_raw_id_threshold_page_permission_inline_admin
7ca1b613d8573dff70e45dd54229b0032c3e8ca7
django-cms
test_admin.py
13
14
https://github.com/django-cms/django-cms.git
1
129
0
21
229
Python
{ "docstring": "\n Only count users when using an integer value as threshold for\n CMS_RAW_ID_USERS.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
def test_raw_id_threshold_page_permission_inline_admin(self): with self.settings(CMS_RAW_ID_USERS=1): with self.assertNumQueries(1): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) # Create users to check if threshold is honored self._get_guys() with self.settings(CMS_RAW_ID_USERS=False): with self.assertNumQueries(0): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) with self.settings(CMS_RAW_ID_USERS=True): with self.assertNumQueries(0): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user']) with self.settings(CMS_RAW_ID_USERS=1): with self.assertNumQueries(1): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user'])
56,240
221,145
65
python3.10.4/Lib/bdb.py
22
8
def get_breaks(self, filename, lineno): filename = self
add python 3.10.4 for windows
get_breaks
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
bdb.py
11
5
https://github.com/XX-net/XX-Net.git
4
47
0
18
69
Python
{ "docstring": "Return all breakpoints for filename:lineno.\n\n If no breakpoints are set, return an empty list.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 13 }
def get_breaks(self, filename, lineno): filename = self.canonic(filename) return filename in self.breaks and \ lineno in self.breaks[filename] and \ Breakpoint.bplist[filename, lineno] or []
73,002
249,580
156
tests/storage/test_registration.py
26
14
def test_override(self) -> None: self.get_success( self.store.register_user( self.user_id, self.pwhash, approved=True, ) ) user = self.get_success(self.store.get_user_by_id(self.user_id)) self.assertIsNotNone(user) assert user is not None self.assertEqual(user["approved"], 1) approved = self.get_success(s
Allow admins to require a manual approval process before new accounts can be used (using MSC3866) (#13556)
test_override
be76cd8200b18f3c68b895f85ac7ef5b0ddc2466
synapse
test_registration.py
11
17
https://github.com/matrix-org/synapse.git
1
94
0
23
150
Python
{ "docstring": "Tests that if we require approval for new accounts, but we explicitly say the\n new user should be considered approved, they're marked as approved.\n ", "language": "en", "n_whitespaces": 38, "n_words": 24, "vocab_size": 22 }
def test_override(self) -> None: self.get_success( self.store.register_user( self.user_id, self.pwhash, approved=True, ) ) user = self.get_success(self.store.get_user_by_id(self.user_id)) self.assertIsNotNone(user) assert user is not None self.assertEqual(user["approved"], 1) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved)
37,370
158,197
64
d2l/mxnet.py
31
7
def tokenize(lines, token='word'): if token == 'word': return [line.spl
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> * 重复语句 (#1188) Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> Co-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com> Co-authored-by: Xinwei Liu <xinzone@outlook.com> Co-authored-by: Anirudh Dagar <anirudhdagar6@gmail.com> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com> Co-authored-by: gyro永不抽风 <1247006353@qq.com> Co-authored-by: CanChengZheng <zcc550169544@163.com> Co-authored-by: linlin <jajupmochi@gmail.com> Co-authored-by: iuk <liukun0104@gmail.com> Co-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com> Co-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com> Co-authored-by: Chiyuan Fu <fuchiyuan2019@outlook.com> Co-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com> Co-authored-by: Haiker Sun <haizhou.uestc2011@gmail.com> Co-authored-by: Ming Liu <akira.liu@njnu.edu.cn> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: silenceZheng66 <13754430639@163.com> Co-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com> Co-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com> Co-authored-by: Krahets <krahets@163.com> Co-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com> Co-authored-by: Jameson <miraclecome@gmail.com> Co-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com> Co-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com> Co-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com> Co-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com> Co-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com> Co-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com> Co-authored-by: VigourJiang <jiangfuqiang154@163.com> Co-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com> Co-authored-by: LYF <27893441+liyufan@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> Co-authored-by: xiaotinghe <xiaotih@amazon.com> Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com> Co-authored-by: HinGwenWoong <peterhuang0323@qq.com> Co-authored-by: Shuai Zhang <cheungdaven@gmail.com>
tokenize
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
mxnet.py
12
7
https://github.com/d2l-ai/d2l-zh.git
5
51
0
23
90
Python
{ "docstring": "Split text lines into word or character tokens.\n\n Defined in :numref:`sec_text_preprocessing`", "language": "en", "n_whitespaces": 13, "n_words": 11, "vocab_size": 11 }
def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('ERROR: unknown token type: ' + token)
@frappe.whitelist()
14,507
67,369
158
erpnext/selling/doctype/sales_order/sales_order.py
252
68
def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None): if not selected_items: return if isinstance(selected_items, str): selected_items = json.loads(selected_items) def set_missing_values(source, target): target.supplier = supplier target.apply_discount_on = "" target.additional_discount_percentage = 0.0 target.discount_amount = 0.0 target.inter_company_order_reference = "" default_price_list = frappe.get_value("Supplier", supplier, "default_price_list") if default_price_list: target.buying_price_list = default_price_list if any(item.delivered_by_supplier == 1 for item in source.items): if source.shipping_address_name: target.shipping_address = source.shipping_address_name target.shipping_address_display = source.shipping_address else: target.shipping_address = source.customer_address target.shipping_address_display = source.address_display target.customer_contact_person = source.contact_person target.customer_contact_display = source.contact_display target.customer_contact_mobile = source.contact_mobile target.customer_contact_email = source.contact_email else: target.customer = "" target.customer_name = "" target.run_method("set_missing_values") target.run_method("calculate_taxes_and_totals") def update_item(source, target, source_parent): target.schedule_date = source.delivery_date target.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor)) target.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty) target.project = source_parent.project suppliers = [item.get("supplier") for item in selected_items if item.get("supplier")] suppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order items_to_map = [item.get("item_code") for item in selected_items if item.get("item_code")] items_to_map = list(set(items_to_map)) if not suppliers: frappe.throw( _("Please set a Supplier against the Items to be considered in the Purchase Order.") ) purchase_orders = [] for supplier in suppliers: doc = get_mapped_doc( "Sales Order", source_name, { "Sales Order": { "doctype": "Purchase Order", "field_no_map": [ "address_display", "contact_display", "contact_mobile", "contact_email", "contact_person", "taxes_and_charges", "shipping_address", "terms", ], "validation": {"docstatus": ["=", 1]}, }, "Sales Order Item": { "doctype": "Purchase Order Item", "field_map": [ ["name", "sales_order_item"
style: format code with black
make_purchase_order_for_default_supplier
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
sales_order.py
19
66
https://github.com/frappe/erpnext.git
11
297
1
168
886
Python
{ "docstring": "Creates Purchase Order for each Supplier. Returns a list of doc objects.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None): if not selected_items: return if isinstance(selected_items, str): selected_items = json.loads(selected_items) def set_missing_values(source, target): target.supplier = supplier target.apply_discount_on = "" target.additional_discount_percentage = 0.0 target.discount_amount = 0.0 target.inter_company_order_reference = "" default_price_list = frappe.get_value("Supplier", supplier, "default_price_list") if default_price_list: target.buying_price_list = default_price_list if any(item.delivered_by_supplier == 1 for item in source.items): if source.shipping_address_name: target.shipping_address = source.shipping_address_name target.shipping_address_display = source.shipping_address else: target.shipping_address = source.customer_address target.shipping_address_display = source.address_display target.customer_contact_person = source.contact_person target.customer_contact_display = source.contact_display target.customer_contact_mobile = source.contact_mobile target.customer_contact_email = source.contact_email else: target.customer = "" target.customer_name = "" target.run_method("set_missing_values") target.run_method("calculate_taxes_and_totals") def update_item(source, target, source_parent): target.schedule_date = source.delivery_date target.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor)) target.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty) target.project = source_parent.project suppliers = [item.get("supplier") for item in selected_items if item.get("supplier")] suppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order items_to_map = [item.get("item_code") for item in selected_items if item.get("item_code")] items_to_map = list(set(items_to_map)) if not suppliers: frappe.throw( _("Please set a Supplier against the Items to be considered in the Purchase Order.") ) purchase_orders = [] for supplier in suppliers: doc = get_mapped_doc( "Sales Order", source_name, { "Sales Order": { "doctype": "Purchase Order", "field_no_map": [ "address_display", "contact_display", "contact_mobile", "contact_email", "contact_person", "taxes_and_charges", "shipping_address", "terms", ], "validation": {"docstatus": ["=", 1]}, }, "Sales Order Item": { "doctype": "Purchase Order Item", "field_map": [ ["name", "sales_order_item"], ["parent", "sales_order"], ["stock_uom", "stock_uom"], ["uom", "uom"], ["conversion_factor", "conversion_factor"], ["delivery_date", "schedule_date"], ], "field_no_map": [ "rate", "price_list_rate", "item_tax_template", "discount_percentage", "discount_amount", "pricing_rules", ], "postprocess": update_item, "condition": lambda doc: doc.ordered_qty < doc.stock_qty and doc.supplier == supplier and doc.item_code in items_to_map, }, }, target_doc, set_missing_values, ) doc.insert() frappe.db.commit() purchase_orders.append(doc) return purchase_orders @frappe.whitelist()
50,262
203,228
256
django/db/migrations/utils.py
70
11
def resolve_relation(model, app_label=None, model_name=None): if isinstance(model, str): if model == RECURSIVE_RELATIONSHIP_CONSTANT:
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
resolve_relation
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
utils.py
15
18
https://github.com/django/django.git
7
101
0
42
169
Python
{ "docstring": "\n Turn a model class or model reference string and return a model tuple.\n\n app_label and model_name are used to resolve the scope of recursive and\n unscoped model relationship.\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 22 }
def resolve_relation(model, app_label=None, model_name=None): if isinstance(model, str): if model == RECURSIVE_RELATIONSHIP_CONSTANT: if app_label is None or model_name is None: raise TypeError( 'app_label and model_name must be provided to resolve ' 'recursive relationships.' ) return app_label, model_name if '.' in model: app_label, model_name = model.split('.', 1) return app_label, model_name.lower() if app_label is None: raise TypeError( 'app_label must be provided to resolve unscoped model relationships.' ) return app_label, model.lower() return model._meta.app_label, model._meta.model_name
5,750
31,459
1,803
src/transformers/modeling_tf_utils.py
479
49
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set( hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names") ) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = sharded_checkpoint_file[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in
TF Sharded (#17713) * initial commit * update modeeling tf utils * quality * clean and update args * update * remove potential bug * code quality * update * update max shard * update tests for sharding from pretrained * fix remaining test * make style * h5py if tf available * update and fix test * fix test * style * modified push to hub to support shard for TF * quick fix * update code * merge branch main and style * Apply suggestions from code review Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * update based on reviews * update doc * update and style * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update based on reviews * fix typo * style Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
load_tf_weights
7cced021fa8ddc59f0f77384300760d34545394e
transformers
modeling_tf_utils.py
27
54
https://github.com/huggingface/transformers.git
13
415
0
200
705
Python
{ "docstring": "\n Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and\n shapes.\n\n Args:\n model (`tf.keras.models.Model`):\n The model to load the weights into.\n resolved_archive_file (`str`):\n The location of the H5 file.\n ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):\n Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.\n\n Returns:\n Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the\n mismatched layers.\n ", "language": "en", "n_whitespaces": 167, "n_words": 83, "vocab_size": 56 }
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set( hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names") ) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = sharded_checkpoint_file[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): # TF names always start with the model name so we ignore it name = "/".join(weight_name.split("/")[1:]) if _prefix is not None: name = _prefix + "/" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split("/")) symbolic_weight_name = "/".join( symbolic_weight.name.split("/")[:delimeter] + symbolic_weight.name.split("/")[delimeter + 1 :] ) else: symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers
51,287
205,929
104
django/forms/boundfield.py
29
14
def css_classes(self, extra_classes=None): if hasattr(extra_classes, "split"): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and h
Refs #33476 -- Reformatted code with Black.
css_classes
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
boundfield.py
11
9
https://github.com/django/django.git
7
91
0
23
153
Python
{ "docstring": "\n Return a string of space-separated CSS classes for this field.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def css_classes(self, extra_classes=None): if hasattr(extra_classes, "split"): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and hasattr(self.form, "error_css_class"): extra_classes.add(self.form.error_css_class) if self.field.required and hasattr(self.form, "required_css_class"): extra_classes.add(self.form.required_css_class) return " ".join(extra_classes)
20,039
100,575
65
lib/gpu_stats/nvidia.py
22
11
def _get_free_vram(self) -> List[float]: vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024) for handle in self._handles] self._log("debug", f"GPU VRAM free: {vram}") return vram
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
_get_free_vram
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
faceswap
nvidia.py
11
14
https://github.com/deepfakes/faceswap.git
2
46
0
21
79
Python
{ "docstring": " Obtain the amount of VRAM that is available, in Megabytes, for each connected Nvidia\n GPU.\n\n Returns\n -------\n list\n List of `float`s containing the amount of VRAM available, in Megabytes, for each\n connected GPU as corresponding to the values in :attr:`_handles\n ", "language": "en", "n_whitespaces": 100, "n_words": 40, "vocab_size": 27 }
def _get_free_vram(self) -> List[float]: vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024) for handle in self._handles] self._log("debug", f"GPU VRAM free: {vram}") return vram
@frappe.whitelist()
14,404
66,996
50
erpnext/projects/doctype/task/task.py
69
25
def get_project(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries imp
style: format code with black
get_project
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
task.py
12
23
https://github.com/frappe/erpnext.git
3
119
1
52
210
Python
{ "docstring": " select name {search_columns} from `tabProject`\n\t\twhere %(key)s like %(txt)s\n\t\t\t%(mcond)s\n\t\t\t{search_condition}\n\t\torder by name\n\t\tlimit %(start)s, %(page_len)s", "language": "en", "n_whitespaces": 12, "n_words": 17, "vocab_size": 16 }
def get_project(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond meta = frappe.get_meta(doctype) searchfields = meta.get_search_fields() search_columns = ", " + ", ".join(searchfields) if searchfields else "" search_cond = " or " + " or ".join(field + " like %(txt)s" for field in searchfields) return frappe.db.sql( .format( search_columns=search_columns, search_condition=search_cond ), { "key": searchfield, "txt": "%" + txt + "%", "mcond": get_match_cond(doctype), "start": start, "page_len": page_len, }, ) @frappe.whitelist()
81,352
275,258
289
keras/optimizers/optimizer_experimental/nadam.py
59
18
def build(self, var_list): super().build(var_list) if getattr(self, "_built", False): return self._built = True self._momentums = [] self._velocities = [] self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype) # Keep a counter on how many times of _u_product has been
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
build
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
nadam.py
13
20
https://github.com/keras-team/keras.git
3
113
0
48
182
Python
{ "docstring": "Initialize optimizer variables.\n\n Nadam optimizer has 2 types of variables: momentums and velocities.\n\n Args:\n var_list: list of model variables to build Nadam variables on.\n ", "language": "en", "n_whitespaces": 54, "n_words": 24, "vocab_size": 20 }
def build(self, var_list): super().build(var_list) if getattr(self, "_built", False): return self._built = True self._momentums = [] self._velocities = [] self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype) # Keep a counter on how many times of _u_product has been computed to # avoid duplicated computations. self._u_product_counter = 1 for var in var_list: self._momentums.append( self.add_variable_from_reference( model_variable=var, variable_name="m" ) ) self._velocities.append( self.add_variable_from_reference( model_variable=var, variable_name="v" ) )
12,762
61,938
372
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
49
18
def list_distinfo_files(self, absolute=False): record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p
upd; format
list_distinfo_files
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
database.py
19
17
https://github.com/jindongwang/transferlearning.git
7
118
0
34
200
Python
{ "docstring": "\n Iterates over the ``installed-files.txt`` entries and returns paths for\n each line if the path is pointing to a file located in the\n ``.egg-info`` directory or one of its subdirectories.\n\n :parameter absolute: If *absolute* is ``True``, each returned path is\n transformed into a local absolute path. Otherwise the\n raw value from ``installed-files.txt`` is returned.\n :type absolute: boolean\n :returns: iterator of paths\n ", "language": "en", "n_whitespaces": 160, "n_words": 60, "vocab_size": 47 }
def list_distinfo_files(self, absolute=False): record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p else: yield line
2,610
13,348
217
jina/parsers/orchestrate/base.py
68
15
def mixin_scalable_deployment_parser(parser): gp = mixin_base_deployment_parser(parser, title='Scalable Deployment') gp.add_argument( '--polling', type=str, default=PollingType.ANY.name, help=, ) gp.add_argument( '--shards', type=int, default=1, help='The number of shards in the deployment running at the same time. For more details check ' 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies', ) gp.add_argument( '--replicas', type=int, default=1, help='The number of
refactor: remove unnecessary parser args (#5328) * refactor: refactor deployment mixin and remove polling and shards for gateway * chore: rename executor to pod and move native and array type to worker args * refactor: make exit-on-exceptions just a worker arg * style: fix overload and cli autocomplete * chore: apply suggestion * chore: move native parameter to deployment group * fix: fix pod init * style: fix overload and cli autocomplete * fix: fix shards and replicas in deployment * chore: disable gpu and volumes for gateway * style: fix overload and cli autocomplete * fix: volume and gpus are optional for container pods Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
mixin_scalable_deployment_parser
bd8003508da0b35713361484f5801ebc818bd0c3
jina
base.py
10
37
https://github.com/jina-ai/jina.git
1
97
0
52
162
Python
{ "docstring": "Mixing in arguments required by a scalable deployment into the given parser.\n The deployment is scalable and can have shards, replicas and polling\n :param parser: the parser instance to which we add arguments\n \n The polling strategy of the Deployment and its endpoints (when `shards>1`).\n Can be defined for all endpoints of a Deployment or by endpoint.\n Define per Deployment:\n - ANY: only one (whoever is idle) Pod polls the message\n - ALL: all Pods poll the message (like a broadcast)\n Define per Endpoint:\n JSON dict, {endpoint: PollingType}\n {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}\n \n ", "language": "en", "n_whitespaces": 134, "n_words": 93, "vocab_size": 70 }
def mixin_scalable_deployment_parser(parser): gp = mixin_base_deployment_parser(parser, title='Scalable Deployment') gp.add_argument( '--polling', type=str, default=PollingType.ANY.name, help=, ) gp.add_argument( '--shards', type=int, default=1, help='The number of shards in the deployment running at the same time. For more details check ' 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies', ) gp.add_argument( '--replicas', type=int, default=1, help='The number of replicas in the deployment', ) gp.add_argument( '--native', action='store_true', default=False, help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.', )
15,840
72,114
135
wagtail/admin/tests/test_privacy.py
35
13
def test_explorer_list_private(self): response = self.client.get( reverse("wagtailadmin_explore", args=(self.private_page.id,)) )
Reformat with black
test_explorer_list_private
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_privacy.py
14
10
https://github.com/wagtail/wagtail.git
1
53
0
30
88
Python
{ "docstring": "\n This tests that there is a padlock displayed\n next to the private child page in the private pages explorer listing\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
def test_explorer_list_private(self): response = self.client.get( reverse("wagtailadmin_explore", args=(self.private_page.id,)) ) # Check the response self.assertEqual(response.status_code, 200) # Must have one privacy icon (next to the private child page) self.assertContains( response, '<span class="indicator privacy-indicator icon icon-no-view"', count=1, )
3,847
21,454
596
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
112
24
def next(self): self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0:
Vendor in pip 22.1.2
next
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
tarfile.py
17
37
https://github.com/pypa/pipenv.git
14
211
0
58
353
Python
{ "docstring": "Return the next member of the archive as a TarInfo object, when\n TarFile is opened for reading. Return None if there is no more\n available.\n ", "language": "en", "n_whitespaces": 52, "n_words": 25, "vocab_size": 22 }
def next(self): self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0: raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if tarinfo is not None: self.members.append(tarinfo) else: self._loaded = True return tarinfo #-------------------------------------------------------------------------- # Little helper methods:
76,839
261,495
287
sklearn/linear_model/_logistic.py
67
19
def predict_proba(self, X): check_is_fitted(self) ovr = self.multi_class in ["ovr", "warn"] or ( self.multi_class == "auto" and ( self.classes_.size <= 2 or self.solver in ("liblinear", "newton-cholesky") ) ) if ovr: return super()._predict_proba_lr(X) else: decision =
ENH add newton-cholesky solver to LogisticRegression (#24767)
predict_proba
bb080aa690364d84d11232c73dc8db2f0dde3578
scikit-learn
_logistic.py
14
18
https://github.com/scikit-learn/scikit-learn.git
6
105
0
51
177
Python
{ "docstring": "\n Probability estimates.\n\n The returned estimates for all classes are ordered by the\n label of classes.\n\n For a multi_class problem, if multi_class is set to be \"multinomial\"\n the softmax function is used to find the predicted probability of\n each class.\n Else use a one-vs-rest approach, i.e calculate the probability\n of each class assuming it to be positive using the logistic function.\n and normalize these values across all the classes.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Vector to be scored, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n T : array-like of shape (n_samples, n_classes)\n Returns the probability of the sample for each class in the model,\n where classes are ordered as they are in ``self.classes_``.\n ", "language": "en", "n_whitespaces": 282, "n_words": 125, "vocab_size": 77 }
def predict_proba(self, X): check_is_fitted(self) ovr = self.multi_class in ["ovr", "warn"] or ( self.multi_class == "auto" and ( self.classes_.size <= 2 or self.solver in ("liblinear", "newton-cholesky") ) ) if ovr: return super()._predict_proba_lr(X) else: decision = self.decision_function(X) if decision.ndim == 1: # Workaround for multi_class="multinomial" and binary outcomes # which requires softmax prediction with only a 1D decision. decision_2d = np.c_[-decision, decision] else: decision_2d = decision return softmax(decision_2d, copy=False)
@keras_export("keras.activations.softplus") @tf.__internal__.dispatch.add_dispatch_support
80,020
269,306
10
keras/activations.py
6
8
def selu(x): return tf.nn.selu(x) @keras_export("keras.activations.softplus") @tf
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
selu
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
activations.py
8
2
https://github.com/keras-team/keras.git
1
15
1
6
50
Python
{ "docstring": "Scaled Exponential Linear Unit (SELU).\n\n The Scaled Exponential Linear Unit (SELU) activation function is defined as:\n\n - `if x > 0: return scale * x`\n - `if x < 0: return scale * alpha * (exp(x) - 1)`\n\n where `alpha` and `scale` are pre-defined constants\n (`alpha=1.67326324` and `scale=1.05070098`).\n\n Basically, the SELU activation function multiplies `scale` (> 1) with the\n output of the `tf.keras.activations.elu` function to ensure a slope larger\n than one for positive inputs.\n\n The values of `alpha` and `scale` are\n chosen so that the mean and variance of the inputs are preserved\n between two consecutive layers as long as the weights are initialized\n correctly (see `tf.keras.initializers.LecunNormal` initializer)\n and the number of input units is \"large enough\"\n (see reference paper for more information).\n\n Example Usage:\n\n >>> num_classes = 10 # 10-class problem\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n\n Args:\n x: A tensor or variable to compute the activation function for.\n\n Returns:\n The scaled exponential unit activation: `scale * elu(x, alpha)`.\n\n Notes:\n - To be used together with the\n `tf.keras.initializers.LecunNormal` initializer.\n - To be used together with the dropout variant\n `tf.keras.layers.AlphaDropout` (not regular dropout).\n\n References:\n - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)\n ", "language": "en", "n_whitespaces": 442, "n_words": 205, "vocab_size": 135 }
def selu(x): return tf.nn.selu(x) @keras_export("keras.activations.softplus") @tf.__internal__.dispatch.add_dispatch_support
3,348
20,369
155
pipenv/patched/notpip/_vendor/pygments/formatters/latex.py
41
16
def _find_safe_escape_tokens(self, text): for i, t, v in self._filter_to( self.lang.get_tokens_unprocessed(text), lambda t: t in Token.Comment or t in Token.String ): if t is None: for i2, t2, v
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_find_safe_escape_tokens
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
latex.py
14
10
https://github.com/pypa/pipenv.git
5
79
0
29
121
Python
{ "docstring": " find escape tokens that are not in strings or comments ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
def _find_safe_escape_tokens(self, text): for i, t, v in self._filter_to( self.lang.get_tokens_unprocessed(text), lambda t: t in Token.Comment or t in Token.String ): if t is None: for i2, t2, v2 in self._find_escape_tokens(v): yield i + i2, t2, v2 else: yield i, None, v
117,264
320,669
59
tests/end2end/features/test_downloads_bdd.py
31
13
def set_up_fileselector(quteproc, py_proc, kind, files, output_type): cmd, args = py_proc(r) args += files.split(' ') if output_type == "a temporary file": args += ['--file={}'] fileselect_cmd = json.dumps([cmd, *args]) quteproc.set_setting('fileselect.handler', 'external') quteproc.set_setting(f'fileselect.{kind}.command', fileselect_cmd)
test(downloads) wip test for external fileselect
set_up_fileselector
36563450763868f12a2481ca636efccb2c7a43cc
qutebrowser
test_downloads_bdd.py
10
25
https://github.com/qutebrowser/qutebrowser.git
2
71
0
27
125
Python
{ "docstring": "Set up fileselect.xxx.command to select the file(s).\n import os\n import sys\n tmp_file = None\n for i, arg in enumerate(sys.argv):\n if arg.startswith('--file='):\n tmp_file = arg[len('--file='):]\n sys.argv.pop(i)\n break\n selected_files = sys.argv[1:]\n if tmp_file is None:\n for selected_file in selected_files:\n print(os.path.abspath(selected_file))\n else:\n with open(tmp_file, 'w') as f:\n for selected_file in selected_files:\n f.write(os.path.abspath(selected_file) + '\\n')\n ", "language": "en", "n_whitespaces": 230, "n_words": 51, "vocab_size": 39 }
def set_up_fileselector(quteproc, py_proc, kind, files, output_type): cmd, args = py_proc(r) args += files.split(' ') if output_type == "a temporary file": args += ['--file={}'] fileselect_cmd = json.dumps([cmd, *args]) quteproc.set_setting('fileselect.handler', 'external') quteproc.set_setting(f'fileselect.{kind}.command', fileselect_cmd)
10,948
53,896
31
tests/test_task_runners.py
15
7
def task_runner(request): if not hasattr(r
Add service marks to task runner tests
task_runner
dc0f9feb764c72620a68ca139eb56e43f6e5f068
prefect
test_task_runners.py
10
4
https://github.com/PrefectHQ/prefect.git
2
33
0
15
60
Python
{ "docstring": "\n An indirect fixture that expects to receive a pytest fixture that yields a task\n runner.\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 12 }
def task_runner(request): if not hasattr(request.param, "_pytestfixturefunction"): raise TypeError("Received invalid `task_runner` parameter. Expected fixture.") yield request.getfixturevalue(request.param.__name__)
17,588
83,054
224
zerver/tests/test_push_notifications.py
47
18
def test_get_apns_context(self) -> None: import zerver.lib.push_notifications zerver.lib.push_notifications.get_apns_context.cache_clear() try: with self.settings(APNS_CERT_FILE="/foo.pem"), mock.patch("aioapns.APNs") as mock_apns: apns_context = get_apns_context() assert apns_context is not None try: self.assertEqual(mock_apns.return_value, apns_context.apns) finally: apns_context.loop.close() finally:
test_push_notifications: Close event loops. Fixes “ResourceWarning: unclosed event loop <_UnixSelectorEventLoop running=False closed=False debug=False>”. Signed-off-by: Anders Kaseorg <anders@zulip.com>
test_get_apns_context
9e70a47f93ad422cadc9d26c656cc8c02e08805e
zulip
test_push_notifications.py
15
17
https://github.com/zulip/zulip.git
3
92
0
40
161
Python
{ "docstring": "This test is pretty hacky, and needs to carefully reset the state\n it modifies in order to avoid leaking state that can lead to\n nondeterministic results for other tests.\n ", "language": "en", "n_whitespaces": 50, "n_words": 29, "vocab_size": 26 }
def test_get_apns_context(self) -> None: import zerver.lib.push_notifications zerver.lib.push_notifications.get_apns_context.cache_clear() try: with self.settings(APNS_CERT_FILE="/foo.pem"), mock.patch("aioapns.APNs") as mock_apns: apns_context = get_apns_context() assert apns_context is not None try: self.assertEqual(mock_apns.return_value, apns_context.apns) finally: apns_context.loop.close() finally: # Reset the cache for `get_apns_context` so that we don't # leak changes to the rest of the world. zerver.lib.push_notifications.get_apns_context.cache_clear()
20,791
101,376
727
scripts/convert.py
230
23
def _validate(self) -> None: if (self._args.writer == "ffmpeg" and not self._images.is_video and self._args.reference_video is None): raise FaceswapError("Output as video selected, but using frames as input. You must " "provide a reference video ('-ref', '--reference-video').") if (self._args.on_the_fly and self._args.mask_type not in ("none", "extended", "components")): logger.warning("You have selected an incompatible mask type ('%s') for On-The-Fly " "conversion. Switching to 'extended'", self._args.mask_type) self._args.mask_type = "extended" if (not self._args.on_the_fly and self._args.mask_type not in ("none", "predicted") and not self._alignments.mask_is_valid(self._args.mask_type)): msg = (f"You have selected the Mask Type `{self._args.mask_type}` but at least one " "face does not have this mask stored in the A
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
_validate
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
convert.py
14
49
https://github.com/deepfakes/faceswap.git
15
224
0
125
423
Python
{ "docstring": " Validate the Command Line Options.\n\n Ensure that certain cli selections are valid and won't result in an error. Checks:\n * If frames have been passed in with video output, ensure user supplies reference\n video.\n * If \"on-the-fly\" and a Neural Network mask is selected, warn and switch to 'extended'\n * If a mask-type is selected, ensure it exists in the alignments file.\n * If a predicted mask-type is selected, ensure model has been trained with a mask\n otherwise attempt to select first available masks, otherwise raise error.\n\n Raises\n ------\n FaceswapError\n If an invalid selection has been found.\n\n ", "language": "en", "n_whitespaces": 210, "n_words": 97, "vocab_size": 66 }
def _validate(self) -> None: if (self._args.writer == "ffmpeg" and not self._images.is_video and self._args.reference_video is None): raise FaceswapError("Output as video selected, but using frames as input. You must " "provide a reference video ('-ref', '--reference-video').") if (self._args.on_the_fly and self._args.mask_type not in ("none", "extended", "components")): logger.warning("You have selected an incompatible mask type ('%s') for On-The-Fly " "conversion. Switching to 'extended'", self._args.mask_type) self._args.mask_type = "extended" if (not self._args.on_the_fly and self._args.mask_type not in ("none", "predicted") and not self._alignments.mask_is_valid(self._args.mask_type)): msg = (f"You have selected the Mask Type `{self._args.mask_type}` but at least one " "face does not have this mask stored in the Alignments File.\nYou should " "generate the required masks with the Mask Tool or set the Mask Type option to " "an existing Mask Type.\nA summary of existing masks is as follows:\nTotal " f"faces: {self._alignments.faces_count}, " f"Masks: {self._alignments.mask_summary}") raise FaceswapError(msg) if self._args.mask_type == "predicted" and not self._predictor.has_predicted_mask: available_masks = [k for k, v in self._alignments.mask_summary.items() if k != "none" and v == self._alignments.faces_count] if not available_masks: msg = ("Predicted Mask selected, but the model was not trained with a mask and no " "masks are stored in the Alignments File.\nYou should generate the " "required masks with the Mask Tool or set the Mask Type to `none`.") raise FaceswapError(msg) mask_type = available_masks[0] logger.warning("Predicted Mask selected, but the model was not trained with a " "mask. Selecting first available mask: '%s'", mask_type) self._args.mask_type = mask_type
18,646
90,218
246
src/sentry/api/base.py
52
18
def get_authenticators(self) -> List[BaseAuthentication]:
ref(hybrid-cloud): Additional test annotations: auth_index (#42425) Extends the hybrid cloud auth service to be usable in many more places ( TY @corps) Annotate 30+ more api endpoint tests Co-authored-by: Mike Ihbe <mike.ihbe@sentry.io> Co-authored-by: Zachary Collins <zachary.collins@sentry.io> Co-authored-by: Zach Collins <recursive.cookie.jar@gmail.com>
get_authenticators
17644550024d6a2eb01356ee48ec0d3ef95c043d
sentry
base.py
16
21
https://github.com/getsentry/sentry.git
6
113
0
40
189
Python
{ "docstring": "\n Instantiates and returns the list of authenticators that this view can use.\n Aggregates together authenticators that can be supported using HybridCloud.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 18 }
def get_authenticators(self) -> List[BaseAuthentication]: # TODO: Increase test coverage and get this working for monolith mode. if SiloMode.get_current_mode() == SiloMode.MONOLITH: return super().get_authenticators() last_api_authenticator = ApiAuthentication([]) result: List[BaseAuthentication] = [] for authenticator_cls in self.authentication_classes: auth_type = ApiAuthenticatorType.from_authenticator(authenticator_cls) if auth_type: last_api_authenticator.types.append(auth_type) else: if last_api_authenticator.types: result.append(last_api_authenticator) last_api_authenticator = ApiAuthentication([]) result.append(authenticator_cls()) if last_api_authenticator.types: result.append(last_api_authenticator) return result
42,229
177,017
77
networkx/algorithms/tests/test_lowest_common_ancestors.py
21
7
def test_naive_lowest_common_ancestor2(self): G = nx.DiGraph() G.add_edge(0, 1) G.add_edge(2, 0) G.add_edge(2, 3) G.add_edge(4, 0) G.add_edge(5, 2) assert naive_lca(G, 1, 3) == 2
Naive lowest common ancestor implementation (#5736) * Add naive lca methods * Naive algorithm implementation for LCA * Modify naive lca functions * Correct parameters of nx.ancestors * Update lowest_common_ancestors.py * Parametrize tests * Apply suggestions from code review Co-authored-by: Dan Schult <dschult@colgate.edu> * Yield instead of append * Tests for naive lca * Correct test cases for naive lca algorithms * Apply suggestions from code review Co-authored-by: Mridul Seth <mail@mriduls.com> * Fix function name -when calling * Make requested changes * Inlining _get_a_lowest_common_ancestor Co-authored-by: dtuncturk <dilaramemis@sabanciuniv.edu> Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: Mridul Seth <mail@mriduls.com>
test_naive_lowest_common_ancestor2
b2f91c34a23058dd70b41784af0d87890216026a
networkx
test_lowest_common_ancestors.py
8
8
https://github.com/networkx/networkx.git
1
64
0
18
100
Python
{ "docstring": "Test that the one-pair function works for issue #4942.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_naive_lowest_common_ancestor2(self): G = nx.DiGraph() G.add_edge(0, 1) G.add_edge(2, 0) G.add_edge(2, 3) G.add_edge(4, 0) G.add_edge(5, 2) assert naive_lca(G, 1, 3) == 2
51,111
205,388
105
django/db/migrations/utils.py
29
13
def get_references(state, model_tuple, field_tuple=()): for state_model_tuple, model_state in state.models.items(): for name, field in model_state.fields.items(): reference = field_references( state_model_tuple, field, model_tuple, *field_tuple ) if reference: yie
Refs #33476 -- Reformatted code with Black.
get_references
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
utils.py
13
15
https://github.com/django/django.git
4
63
0
22
96
Python
{ "docstring": "\n Generator of (model_state, name, field, reference) referencing\n provided context.\n\n If field_tuple is provided only references to this particular field of\n model_tuple will be generated.\n ", "language": "en", "n_whitespaces": 40, "n_words": 24, "vocab_size": 22 }
def get_references(state, model_tuple, field_tuple=()): for state_model_tuple, model_state in state.models.items(): for name, field in model_state.fields.items(): reference = field_references( state_model_tuple, field, model_tuple, *field_tuple ) if reference: yield model_state, name, field, reference
93,241
294,202
429
tests/components/alexa/test_smart_home.py
181
13
async def test_media_player_eq_bands_not_supported(hass): device = ( "media_player.test_bands", "on", { "friendly_name": "Test media player", "supported_features": SUPPORT_SELECT_SOUND_MODE, "sound_mode": "tv", "sound_mode_list": ["movie", "music", "night", "sport", "tv", "rocknroll"], }, ) await discovery_test(device, hass) context = Context() # Test for SetBands Error request = get_new_request( "Alexa.EqualizerController", "SetBands", "media_player#test_bands" ) request["directive"]["payload"] = {"bands": [{"name": "BASS", "value": -2}]} msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert "event" in msg msg = msg["event"] assert msg["header"]["name"] == "ErrorResponse" assert msg["header"]["namespace"] == "Alexa" assert msg["payload"]["type"] == "INVALID_DIRECTIVE" # Test for AdjustBands Error request = get_new_request( "Alexa.EqualizerController", "AdjustBands", "media_player#test_bands" ) request["directive"]["payload"] = { "bands": [{"name": "BASS", "levelDelta": 3, "levelDirection": "UP"}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert "event" in msg msg = msg["event"] assert msg["header"]["name"] == "ErrorResponse" assert msg["header"]["namespace"] == "Alexa" assert msg["payload"]["type"] == "INVALID_DIRECTIVE" # Test for ResetBands Error request = get_new_request( "Alexa.EqualizerController", "ResetBands", "media_player#test_bands" ) request["directive"]["payload"] = { "bands": [{"name": "BASS", "levelDelta": 3, "levelDirection": "UP"}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert "event" in msg msg = msg["event"] assert msg["header"]["name"] == "ErrorResponse" assert msg["header"]["namespace"] == "Alexa" assert msg["payload"]["type"] == "INVALID_DIRECTIVE"
Exclude hidden entities from alexa (#68555)
test_media_player_eq_bands_not_supported
dc8e87a6f70439f9830d93d03c53d6ff098a4861
core
test_smart_home.py
12
53
https://github.com/home-assistant/core.git
1
339
0
72
643
Python
{ "docstring": "Test EqualizerController bands directive not supported.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
async def test_media_player_eq_bands_not_supported(hass): device = ( "media_player.test_bands", "on", { "friendly_name": "Test media player", "supported_features": SUPPORT_SELECT_SOUND_MODE, "sound_mode": "tv", "sound_mode_list": ["movie", "music", "night", "sport", "tv", "rocknroll"], }, ) await discovery_test(device, hass) context = Context() # Test for SetBands Error request = get_new_request( "Alexa.EqualizerController", "SetBands", "media_player#test_bands" ) request["directive"]["payload"] = {"bands": [{"name": "BASS", "value": -2}]} msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert "event" in msg msg = msg["event"] assert msg["header"]["name"] == "ErrorResponse" assert msg["header"]["namespace"] == "Alexa" assert msg["payload"]["type"] == "INVALID_DIRECTIVE" # Test for AdjustBands Error request = get_new_request( "Alexa.EqualizerController", "AdjustBands", "media_player#test_bands" ) request["directive"]["payload"] = { "bands": [{"name": "BASS", "levelDelta": 3, "levelDirection": "UP"}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert "event" in msg msg = msg["event"] assert msg["header"]["name"] == "ErrorResponse" assert msg["header"]["namespace"] == "Alexa" assert msg["payload"]["type"] == "INVALID_DIRECTIVE" # Test for ResetBands Error request = get_new_request( "Alexa.EqualizerController", "ResetBands", "media_player#test_bands" ) request["directive"]["payload"] = { "bands": [{"name": "BASS", "levelDelta": 3, "levelDirection": "UP"}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert "event" in msg msg = msg["event"] assert msg["header"]["name"] == "ErrorResponse" assert msg["header"]["namespace"] == "Alexa" assert msg["payload"]["type"] == "INVALID_DIRECTIVE"
@derived_from(np.linalg)
36,455
155,724
219
dask/array/linalg.py
118
45
def lstsq(a, b): q, r = qr(a) x = solve_triangular(r, q.T.conj().dot(b)) residuals = b - a.dot(x) residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1) token = tokenize(a, b) # r must be a triangular with single block # rank rname = "lstsq-rank-" + token rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))} graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r]) # rank must be an integer rank = Array(graph, rname, shape=(), chunks=(), dtype=int) # singular sname = "lstsq-singular-" + token rt = r.T.conj() sdsk = { (sname, 0): ( _reverse,
Update `pre-commit` version (#8691)
lstsq
510bbc380531cbf56a409f1ae68e6fd84a9599e6
dask
linalg.py
14
22
https://github.com/dask/dask.git
1
280
1
85
425
Python
{ "docstring": "\n Return the least-squares solution to a linear matrix equation using\n QR decomposition.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\n linearly independent rows of `a` can be less than, equal to, or\n greater than its number of linearly independent columns). If `a`\n is square and of full rank, then `x` (but for round-off error) is\n the \"exact\" solution of the equation.\n\n Parameters\n ----------\n a : (M, N) array_like\n \"Coefficient\" matrix.\n b : {(M,), (M, K)} array_like\n Ordinate or \"dependent variable\" values. If `b` is two-dimensional,\n the least-squares solution is calculated for each of the `K` columns\n of `b`.\n\n Returns\n -------\n x : {(N,), (N, K)} Array\n Least-squares solution. If `b` is two-dimensional,\n the solutions are in the `K` columns of `x`.\n residuals : {(1,), (K,)} Array\n Sums of residuals; squared Euclidean 2-norm for each column in\n ``b - a*x``.\n If `b` is 1-dimensional, this is a (1,) shape array.\n Otherwise the shape is (K,).\n rank : Array\n Rank of matrix `a`.\n s : (min(M, N),) Array\n Singular values of `a`.\n ", "language": "en", "n_whitespaces": 345, "n_words": 198, "vocab_size": 122 }
def lstsq(a, b): q, r = qr(a) x = solve_triangular(r, q.T.conj().dot(b)) residuals = b - a.dot(x) residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1) token = tokenize(a, b) # r must be a triangular with single block # rank rname = "lstsq-rank-" + token rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))} graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r]) # rank must be an integer rank = Array(graph, rname, shape=(), chunks=(), dtype=int) # singular sname = "lstsq-singular-" + token rt = r.T.conj() sdsk = { (sname, 0): ( _reverse, (np.sqrt, (np.linalg.eigvalsh, (np.dot, (rt.name, 0, 0), (r.name, 0, 0)))), ) } graph = HighLevelGraph.from_collections(sname, sdsk, dependencies=[rt, r]) meta = meta_from_array(residuals, 1) s = Array(graph, sname, shape=(r.shape[0],), chunks=r.shape[0], meta=meta) return x, residuals, rank, s @derived_from(np.linalg)
78,652
266,908
36
test/lib/ansible_test/_internal/docker_util.py
23
9
def docker_environment(): # type: () -> t.Dict[str, str] env = common_environment() env.update(dict((key, os.environ[key]
Support podman-remote in ansible-test (#75753)
docker_environment
7cb581ed2cb1d4591d094df37a40c9155ea446da
ansible
docker_util.py
14
4
https://github.com/ansible/ansible.git
4
50
0
22
86
Python
{ "docstring": "Return a dictionary of docker related environment variables found in the current environment.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def docker_environment(): # type: () -> t.Dict[str, str] env = common_environment() env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_') or key.startswith('CONTAINER_'))) return env
23,463
109,177
64
lib/matplotlib/tests/test_colorbar.py
31
20
def test_remove_from_figure(use_gridspec): fig, ax = plt.subplots() sc = ax.scatter([1, 2], [3, 4])
warning when scatter plot color settings discarded (#23516) * Warning when scatter plot color settings discarded * Update lib/matplotlib/axes/_axes.py Co-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> * Wrapped 23516-MS.rst lines at 80 characters * Fixed tests to look for proper warning message * Update doc/api/next_api_changes/behavior/23516-MS.rst Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com> Co-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com>
test_remove_from_figure
5d3124dbc826a019bb55b4229312a033912331ff
matplotlib
test_colorbar.py
11
11
https://github.com/matplotlib/matplotlib.git
1
107
0
25
175
Python
{ "docstring": "\n Test `remove` with the specified ``use_gridspec`` setting\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def test_remove_from_figure(use_gridspec): fig, ax = plt.subplots() sc = ax.scatter([1, 2], [3, 4]) sc.set_array(np.array([5, 6])) pre_position = ax.get_position() cb = fig.colorbar(sc, use_gridspec=use_gridspec) fig.subplots_adjust() cb.remove() fig.subplots_adjust() post_position = ax.get_position() assert (pre_position.get_points() == post_position.get_points()).all()
48,962
198,499
68
sympy/multipledispatch/utils.py
24
7
def groupby(func, seq): d = {} for item in seq: key = func(item) if key not in d: d[key] = []
Code cleanup
groupby
9d58006fc0a23afcba38f641c9472917c436428a
sympy
utils.py
11
8
https://github.com/sympy/sympy.git
3
47
0
19
76
Python
{ "docstring": " Group a collection by a key function\n\n >>> from sympy.multipledispatch.utils import groupby\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n\n >>> iseven = lambda x: x % 2 == 0\n >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP\n {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}\n\n See Also:\n ``countby``\n ", "language": "en", "n_whitespaces": 109, "n_words": 72, "vocab_size": 56 }
def groupby(func, seq): d = {} for item in seq: key = func(item) if key not in d: d[key] = [] d[key].append(item) return d
33,324
144,863
82
python/ray/data/dataset.py
21
14
def input_files(self) -> List[str]: metadata = self._plan.execute().get_metadata() files = set() for m in metadata: for f in m.input_files: file
Lay the groundwork for lazy dataset optimization (no behavior changes) (#22233) This PR refactors Dataset execution to enable lazy mode in the future, which can reduce memory usage in large-scale ingest pipelines. There should be no behavior changes in this PR. Many of the optimizations are also punted for future work.
input_files
35a157948efa7ba1adf1d1507c2af1d6d84a7db7
ray
dataset.py
11
15
https://github.com/ray-project/ray.git
3
52
0
18
86
Python
{ "docstring": "Return the list of input files for the dataset.\n\n Time complexity: O(num input files)\n\n Returns:\n The list of input files used to create the dataset, or an empty\n list if the input files is not known.\n ", "language": "en", "n_whitespaces": 79, "n_words": 36, "vocab_size": 25 }
def input_files(self) -> List[str]: metadata = self._plan.execute().get_metadata() files = set() for m in metadata: for f in m.input_files: files.add(f) return list(files)
29,926
133,019
33
python/ray/util/collective/collective_group/nccl_util.py
17
5
def get_nccl_reduce_op(reduce_op): if reduce_op not in NCCL_REDUCE_OP_MAP:
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
get_nccl_reduce_op
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
nccl_util.py
12
4
https://github.com/ray-project/ray.git
2
27
0
16
47
Python
{ "docstring": "Map the reduce op to NCCL reduce op type.\n\n Args:\n reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX).\n Returns:\n (nccl.ncclRedOp_t): the mapped NCCL reduce op.\n ", "language": "en", "n_whitespaces": 45, "n_words": 22, "vocab_size": 17 }
def get_nccl_reduce_op(reduce_op): if reduce_op not in NCCL_REDUCE_OP_MAP: raise RuntimeError("NCCL does not support reduce op: '{}'.".format(reduce_op)) return NCCL_REDUCE_OP_MAP[reduce_op]
20,644
101,224
32
lib/align/detected_face.py
11
4
def aligned(self) -> AlignedFace: assert self._aligned is not None return self._aligned
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
aligned
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
detected_face.py
7
4
https://github.com/deepfakes/faceswap.git
1
19
0
10
32
Python
{ "docstring": " The aligned face connected to this detected face. ", "language": "en", "n_whitespaces": 9, "n_words": 8, "vocab_size": 8 }
def aligned(self) -> AlignedFace: assert self._aligned is not None return self._aligned
8,592
45,465
159
airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py
47
14
def upgrade(): conn = op.get_bind() if conn.dialect.name == "sqlite": # in sqlite TEXT and STRING column types are the same
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
upgrade
69f6f9e01b6df76c3c8fa266d460324163957887
airflow
64a7d6477aae_fix_description_field_in_connection_to_.py
14
14
https://github.com/apache/airflow.git
3
95
0
41
162
Python
{ "docstring": "Apply Fix description field in ``connection`` to be ``text``", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def upgrade(): conn = op.get_bind() if conn.dialect.name == "sqlite": # in sqlite TEXT and STRING column types are the same return if conn.dialect.name == "mysql": op.alter_column( 'connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text(length=5000), existing_nullable=True, ) else: # postgres does not allow size modifier for text type op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())
12,085
60,322
121
code/deep/BJMMD/caffe/python/caffe/test/test_net.py
43
16
def test_memory(self): params = sum(map(list, six.itervalues(self.net.params)), []) blobs = self.net.blobs.values() del self.net # now sum every
Balanced joint maximum mean discrepancy for deep transfer learning
test_memory
cc4d0564756ca067516f71718a3d135996525909
transferlearning
test_net.py
14
9
https://github.com/jindongwang/transferlearning.git
3
91
0
35
148
Python
{ "docstring": "Check that holding onto blob data beyond the life of a Net is OK", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
def test_memory(self): params = sum(map(list, six.itervalues(self.net.params)), []) blobs = self.net.blobs.values() del self.net # now sum everything (forcing all memory to be read) total = 0 for p in params: total += p.data.sum() + p.diff.sum() for bl in blobs: total += bl.data.sum() + bl.diff.sum()
11,587
56,927
23
tests/test_flows.py
9
4
async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path): canary_file = tmp_path / "canary"
Ensure flows are called in an interruptible thread (PrefectHQ/orion#2174) * Ensure flows are called in an interruptible thread * Set higher runtime limit in `test_timeout_stops_execution_in_sync_subflows`
test_timeout_stops_execution_in_sync_subflows
336eca7839fccbcbdb77179f352f926da8b1fa15
prefect
test_flows.py
8
14
https://github.com/PrefectHQ/prefect.git
1
72
0
9
26
Python
{ "docstring": "\n Sync flow runs can be cancelled after a timeout once a task is called\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path): canary_file = tmp_path / "canary"
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
13,970
65,649
59
erpnext/controllers/queries.py
77
21
def get_income_account(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond # income account can be any Credit account, # but can also be a Asset account with account_type='Income Account' in special circumstances. # Hence the first condition is an "OR" if not filter
style: format code with black
get_income_account
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
queries.py
12
19
https://github.com/frappe/erpnext.git
3
94
1
66
177
Python
{ "docstring": "select tabAccount.name from `tabAccount`\n\t\t\twhere (tabAccount.report_type = \"Profit and Loss\"\n\t\t\t\t\tor tabAccount.account_type in (\"Income Account\", \"Temporary\"))\n\t\t\t\tand tabAccount.is_group=0\n\t\t\t\tand tabAccount.`{key}` LIKE %(txt)s\n\t\t\t\t{condition} {match_condition}\n\t\t\torder by idx desc, name", "language": "en", "n_whitespaces": 22, "n_words": 29, "vocab_size": 27 }
def get_income_account(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond # income account can be any Credit account, # but can also be a Asset account with account_type='Income Account' in special circumstances. # Hence the first condition is an "OR" if not filters: filters = {} condition = "" if filters.get("company"): condition += "and tabAccount.company = %(company)s" return frappe.db.sql( .format( condition=condition, match_condition=get_match_cond(doctype), key=searchfield ), {"txt": "%" + txt + "%", "company": filters.get("company", "")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
52,796
209,810
168
scapy/arch/windows/__init__.py
56
14
def win_find_exe(filename, installsubdir=None, env="ProgramFiles"): # type: (str, Optional[Any],
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <pierre@droids-corp.org>
win_find_exe
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
scapy
__init__.py
21
13
https://github.com/secdev/scapy.git
6
93
0
44
156
Python
{ "docstring": "Find executable in current dir, system path or in the\n given ProgramFiles subdir, and retuen its absolute path.\n ", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 17 }
def win_find_exe(filename, installsubdir=None, env="ProgramFiles"): # type: (str, Optional[Any], str) -> str fns = [filename] if filename.endswith(".exe") else [filename + ".exe", filename] # noqa: E501 for fn in fns: try: if installsubdir is None: path = _where(fn) else: path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)]) # noqa: E501 except IOError: path = None else: break return path or ""
16,245
74,294
85
wagtail/core/tests/test_page_model.py
14
9
def test_custom_page_queryset(self): self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet)
Reformat with black
test_custom_page_queryset
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_page_model.py
14
9
https://github.com/wagtail/wagtail.git
1
82
0
10
135
Python
{ "docstring": "\n Managers that are constructed from a custom PageQuerySet\n (via PageManager.from_queryset(CustomPageQuerySet)) should return\n querysets of that type\n ", "language": "en", "n_whitespaces": 45, "n_words": 16, "vocab_size": 15 }
def test_custom_page_queryset(self): self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet) self.assertIs(type(CustomManagerPage.objects.about_spam()), CustomPageQuerySet) self.assertIs( type(CustomManagerPage.objects.all().about_spam()), CustomPageQuerySet ) self.assertIs( type(CustomManagerPage.objects.about_spam().all()), CustomPageQuerySet )
3,197
20,048
31
pipenv/patched/notpip/_vendor/distro.py
10
3
def distro_release_info(self): # type: () -> Dict[s
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
distro_release_info
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
distro.py
6
2
https://github.com/pypa/pipenv.git
1
10
0
10
20
Python
{ "docstring": "\n Return a dictionary containing key-value pairs for the information\n items from the distro release file data source of the OS\n distribution.\n\n For details, see :func:`distro.distro_release_info`.\n ", "language": "en", "n_whitespaces": 61, "n_words": 25, "vocab_size": 23 }
def distro_release_info(self): # type: () -> Dict[str, str] return self._distro_release_info
3,297
20,247
20
pipenv/patched/notpip/_vendor/platformdirs/windows.py
6
7
def user_documents_dir(self) -> str: return os.path.normpath(get
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
user_documents_dir
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
windows.py
10
5
https://github.com/pypa/pipenv.git
1
20
0
6
38
Python
{ "docstring": "\n :return: documents directory tied to the user e.g. ``%USERPROFILE%\\\\Documents``\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def user_documents_dir(self) -> str: return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
32,637
142,266
384
python/ray/data/_internal/push_based_shuffle.py
69
12
def round_robin_reduce_idx_iterator(self): idx = 0 round_idx = 0 while idx < self.output_num_blocks: for merge_idx in range(self.num_merge_tasks_per_round): if merge_idx < self._partitions_with_extra_task: reduce_idx = merge_idx * (self.merge_partition_size + 1) partition_size = self.merge_partition_size + 1 else: reduce_idx = self
[dataset] Pipeline task submission during reduce stage in push-based shuffle (#25795) Reduce stage in push-based shuffle fails to complete at 100k output partitions or more. This is likely because of driver or raylet load from having too many tasks in flight at once. We can fix this from ray core too, but for now, this PR adds pipelining for the reduce stage, to limit the total number of reduce tasks in flight at the same time. This is currently set to 2 * available parallelism in the cluster. We have to pick which reduce tasks to submit carefully since these are pinned to specific nodes. The PR does this by assigning tasks round-robin according to the corresponding merge task (which get spread throughout the cluster). In addition, this PR refactors the map, merge, and reduce stages to use a common pipelined iterator pattern, since they all have a similar pattern of submitting a round of tasks at a time, then waiting for a previous round to finish before submitting more. Related issue number Closes #25412.
round_robin_reduce_idx_iterator
93aae48b80db80f7e9f922eaabedead0d15ee01c
ray
push_based_shuffle.py
17
21
https://github.com/ray-project/ray.git
5
103
0
33
168
Python
{ "docstring": "\n When there are multiple nodes, merge tasks are spread throughout the\n cluster to improve load-balancing. Each merge task produces outputs for\n a contiguous partition of reduce tasks. This method creates an iterator\n that returns reduce task indices round-robin across the merge tasks.\n This can be used to submit reduce tasks in a way that spreads the load\n evenly across the cluster.\n ", "language": "en", "n_whitespaces": 111, "n_words": 61, "vocab_size": 45 }
def round_robin_reduce_idx_iterator(self): idx = 0 round_idx = 0 while idx < self.output_num_blocks: for merge_idx in range(self.num_merge_tasks_per_round): if merge_idx < self._partitions_with_extra_task: reduce_idx = merge_idx * (self.merge_partition_size + 1) partition_size = self.merge_partition_size + 1 else: reduce_idx = self._partitions_with_extra_task * ( self.merge_partition_size + 1 ) merge_idx -= self._partitions_with_extra_task reduce_idx += merge_idx * self.merge_partition_size partition_size = self.merge_partition_size if round_idx >= partition_size: continue reduce_idx += round_idx yield reduce_idx idx += 1 round_idx += 1
35,261
153,106
64
modin/pandas/groupby.py
21
6
def _check_index_name(self, result): if self._by is not None: # pandas does not n
FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373) Signed-off-by: Dmitry Chigarev <dmitry.chigarev@intel.com>
_check_index_name
1e65a4afd191cf61ba05b80545d23f9b88962f41
modin
groupby.py
10
4
https://github.com/modin-project/modin.git
2
26
0
20
44
Python
{ "docstring": "\n Check the result of groupby aggregation on the need of resetting index name.\n\n Parameters\n ----------\n result : DataFrame\n Group by aggregation result.\n\n Returns\n -------\n DataFrame\n ", "language": "en", "n_whitespaces": 93, "n_words": 25, "vocab_size": 20 }
def _check_index_name(self, result): if self._by is not None: # pandas does not name the index for this case result._query_compiler.set_index_name(None) return result
53,127
211,652
210
ppdet/modeling/rbox_utils.py
136
22
def check_points_in_rotated_boxes(points, boxes): # [B, N, 5] -> [B, N, 4, 2] corners = box2corners(boxes) # [1, L, 2] -> [1, 1, L, 2] points = points.unsqueeze(0) # [B, N, 4, 2] -> [B, N, 1, 2] a, b, c, d = corners.split(4, axis=2) ab = b - a ad = d - a # [B, N, L, 2] ap = points - a # [B, N, L] norm_ab = paddle.sum(ab * ab, axis=-1) # [B, N, L] norm_ad = paddle.sum(ad * ad, axis=-1) # [B, N, L] dot product ap_dot_ab = paddle.sum(
add ppyoloe_r (#7105) * add ppyoloe_r * modify code of ops.py * add ppyoloe_r docs and modify rotate docs * modify docs and refine connfigs * fix some problems * refine docs, add nms_rotated ext_op and fix some problems * add image and inference_benchmark.py * modify docs * fix some problems * modify code accroding to review Co-authored-by: wangxinxin08 <>
check_points_in_rotated_boxes
c6c10032924aaf4eb1646a4fd593c17a7e2ecb3b
PaddleDetection
rbox_utils.py
11
14
https://github.com/PaddlePaddle/PaddleDetection.git
1
142
0
58
229
Python
{ "docstring": "Check whether point is in rotated boxes\n\n Args:\n points (tensor): (1, L, 2) anchor points\n boxes (tensor): [B, N, 5] gt_bboxes\n eps (float): default 1e-9\n \n Returns:\n is_in_box (tensor): (B, N, L)\n\n ", "language": "en", "n_whitespaces": 72, "n_words": 31, "vocab_size": 26 }
def check_points_in_rotated_boxes(points, boxes): # [B, N, 5] -> [B, N, 4, 2] corners = box2corners(boxes) # [1, L, 2] -> [1, 1, L, 2] points = points.unsqueeze(0) # [B, N, 4, 2] -> [B, N, 1, 2] a, b, c, d = corners.split(4, axis=2) ab = b - a ad = d - a # [B, N, L, 2] ap = points - a # [B, N, L] norm_ab = paddle.sum(ab * ab, axis=-1) # [B, N, L] norm_ad = paddle.sum(ad * ad, axis=-1) # [B, N, L] dot product ap_dot_ab = paddle.sum(ap * ab, axis=-1) # [B, N, L] dot product ap_dot_ad = paddle.sum(ap * ad, axis=-1) # [B, N, L] <A, B> = |A|*|B|*cos(theta) is_in_box = (ap_dot_ab >= 0) & (ap_dot_ab <= norm_ab) & (ap_dot_ad >= 0) & ( ap_dot_ad <= norm_ad) return is_in_box
78,474
266,557
208
lib/ansible/modules/git.py
102
30
def write_ssh_wrapper(module): try: # make sure we
Bypass fragile git ssh wrapper (#73404) git module now uses env vars exclusively - updated docs to clarify usage - now env vars append instead of overwrite to allow existing custom setups to keep working fixes #38104, #64673, #64674 - added note for hostkeychecking more securely fixes #69846 - keep script cause old versions still choke on env - env var cannot hold more than 'command' for older versions - all ssh_opts in one place
write_ssh_wrapper
b493c590bcee9b64e8ae02c17d4fde2331e0598b
ansible
git.py
15
18
https://github.com/ansible/ansible.git
3
154
0
83
265
Python
{ "docstring": "\n This writes an shell wrapper for ssh options to be used with git\n this is only relevant for older versions of gitthat cannot\n handle the options themselves. Returns path to the script\n #!/bin/sh\n%s $GIT_SSH_OPTS\n", "language": "en", "n_whitespaces": 58, "n_words": 35, "vocab_size": 31 }
def write_ssh_wrapper(module): try: # make sure we have full permission to the module_dir, which # may not be the case if we're sudo'ing to a non-root user if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK): fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/') else: raise OSError except (IOError, OSError): fd, wrapper_path = tempfile.mkstemp() # use existing git_ssh/ssh_command, fallback to 'ssh' template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh'))) # write it with os.fdopen(fd, 'w+b') as fh: fh.write(template) # set execute st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template)) # ensure we cleanup after ourselves module.add_cleanup_file(path=wrapper_path) return wrapper_path
70,092
243,723
441
src/PIL/Image.py
157
21
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): if not isinstance(source, (list, tuple)): msg = "Source must be a tuple" raise ValueError(msg) if not isinstance(dest, (list, tuple)): msg = "Destination must be a tuple" raise ValueError(msg) if not len(source) in (2, 4): msg = "Source must be a 2 or 4-tuple" raise ValueError(msg) if not len(dest) == 2: msg = "Destination must be a 2-tuple" raise ValueError(msg) if min(source) < 0: msg = "Source must be non-negative" raise ValueError(msg) if len(source) == 2: source = source + im.size # over image,
Improve exception traceback readability
alpha_composite
2ae55ccbdad9c842929fb238ea1eb81d1f999024
Pillow
Image.py
11
41
https://github.com/python-pillow/Pillow.git
9
226
0
80
362
Python
{ "docstring": "'In-place' analog of Image.alpha_composite. Composites an image\n onto this image.\n\n :param im: image to composite over this one\n :param dest: Optional 2 tuple (left, top) specifying the upper\n left corner in this (destination) image.\n :param source: Optional 2 (left, top) tuple for the upper left\n corner in the overlay source image, or 4 tuple (left, top, right,\n bottom) for the bounds of the source rectangle\n\n Performance Note: Not currently implemented in-place in the core layer.\n ", "language": "en", "n_whitespaces": 144, "n_words": 75, "vocab_size": 49 }
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): if not isinstance(source, (list, tuple)): msg = "Source must be a tuple" raise ValueError(msg) if not isinstance(dest, (list, tuple)): msg = "Destination must be a tuple" raise ValueError(msg) if not len(source) in (2, 4): msg = "Source must be a 2 or 4-tuple" raise ValueError(msg) if not len(dest) == 2: msg = "Destination must be a 2-tuple" raise ValueError(msg) if min(source) < 0: msg = "Source must be non-negative" raise ValueError(msg) if len(source) == 2: source = source + im.size # over image, crop if it's not the whole thing. if source == (0, 0) + im.size: overlay = im else: overlay = im.crop(source) # target for the paste box = dest + (dest[0] + overlay.width, dest[1] + overlay.height) # destination image. don't copy if we're using the whole image. if box == (0, 0) + self.size: background = self else: background = self.crop(box) result = alpha_composite(background, overlay) self.paste(result, box)
16,685
77,647
112
wagtail/contrib/forms/tests/test_models.py
28
14
def test_form_field_clean_name_override(self): field = ExtendedFormField.objects.create( page=self.form_page, sort_order=1,
form builder - allow clean_name generation to be overridden - adds a new class method to AbstractFormField `get_field_clean_name` - resolves #6903
test_form_field_clean_name_override
fd5218220e4ccc7697ee18f57356810560e5e718
wagtail
test_models.py
10
9
https://github.com/wagtail/wagtail.git
1
47
0
27
78
Python
{ "docstring": "\n Creating a new field should use the overridden method\n See ExtendedFormField get_field_clean_name method\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
def test_form_field_clean_name_override(self): field = ExtendedFormField.objects.create( page=self.form_page, sort_order=1, label="quanti ge·là·to?", field_type="number", # only number fields will add the ID as a prefix to the clean_name required=True, ) self.assertEqual(field.clean_name, "number_field--quanti_gelato")
@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)
26,695
119,830
63
jax/_src/numpy/polynomial.py
52
20
def polyder(p, m=1): _check_arraylike("polyder", p) m = core.concrete_or_error(operator.index, m, "'m' argument of jnp.polyder") p, = _promote_dtypes_inexact(p) if m < 0: raise ValueError("Order of derivative must be positive") if m == 0: return p coeff = (arange(le
lax_numpy: move poly functions into numpy.polynomial
polyder
603bb3c5ca288674579211e64fa47c6b2b0fb7a6
jax
polynomial.py
16
10
https://github.com/google/jax.git
3
104
1
40
190
Python
{ "docstring": "\\\nSetting trim_leading_zeros=True makes the output match that of numpy.\nBut prevents the function from being able to be used in compiled code.\n", "language": "en", "n_whitespaces": 20, "n_words": 23, "vocab_size": 22 }
def polyder(p, m=1): _check_arraylike("polyder", p) m = core.concrete_or_error(operator.index, m, "'m' argument of jnp.polyder") p, = _promote_dtypes_inexact(p) if m < 0: raise ValueError("Order of derivative must be positive") if m == 0: return p coeff = (arange(len(p), m, -1)[np.newaxis, :] - 1 - arange(m)[:, np.newaxis]).prod(0) return p[:-m] * coeff _LEADING_ZEROS_DOC = @_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)