ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
@serializable @register
53,110
211,506
881
ppdet/modeling/losses/probiou_loss.py
181
46
def probiou_loss(pred, target, eps=1e-3, mode='l1'): gbboxes1 = gbb_form(pred) gbboxes2 = gbb_form(target) x1, y1, a1_, b1_, c1_ = gbboxes1[:,
add fcosr model (#6765) * add fcosr * fix some problem * add docs for fcosr * modify code * modify focsr reader * finish tensorrt deployment with dynamic shape * modify according to review comment Co-authored-by: wangxinxin08 <>
probiou_loss
92078713cced4f0d9450a6fc80a449fa75fd8c10
PaddleDetection
probiou_loss.py
17
32
https://github.com/PaddlePaddle/PaddleDetection.git
3
383
1
95
553
Python
{ "docstring": "\n pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0\n target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0\n eps -> threshold to avoid infinite values\n mode -> ('l1' in [0,1] or 'l2' in [0,inf]) metrics according our paper\n\n ", "language": "en", "n_whitespaces": 104, "n_words": 58, "vocab_size": 36 }
def probiou_loss(pred, target, eps=1e-3, mode='l1'): gbboxes1 = gbb_form(pred) gbboxes2 = gbb_form(target) x1, y1, a1_, b1_, c1_ = gbboxes1[:, 0], gbboxes1[:, 1], gbboxes1[:, 2], gbboxes1[:, 3], gbboxes1[:, 4] x2, y2, a2_, b2_, c2_ = gbboxes2[:, 0], gbboxes2[:, 1], gbboxes2[:, 2], gbboxes2[:, 3], gbboxes2[:, 4] a1, b1, c1 = rotated_form(a1_, b1_, c1_) a2, b2, c2 = rotated_form(a2_, b2_, c2_) t1 = 0.25 * ((a1 + a2) * (paddle.pow(y1 - y2, 2)) + (b1 + b2) * (paddle.pow(x1 - x2, 2))) + \ 0.5 * ((c1+c2)*(x2-x1)*(y1-y2)) t2 = (a1 + a2) * (b1 + b2) - paddle.pow(c1 + c2, 2) t3_ = (a1 * b1 - c1 * c1) * (a2 * b2 - c2 * c2) t3 = 0.5 * paddle.log(t2 / (4 * paddle.sqrt(F.relu(t3_)) + eps)) B_d = (t1 / t2) + t3 # B_d = t1 + t2 + t3 B_d = paddle.clip(B_d, min=eps, max=100.0) l1 = paddle.sqrt(1.0 - paddle.exp(-B_d) + eps) l_i = paddle.pow(l1, 2.0) l2 = -paddle.log(1.0 - l_i + eps) if mode == 'l1': probiou = l1 if mode == 'l2': probiou = l2 return probiou @serializable @register
42,834
178,818
20
nuitka/Options.py
11
3
def mayDisableConsoleWindow(): # TODO: What about
Standalone: Added support for requiring modes * For wx on macOS, console must be disabled, avoid the trap. * For the PySide2, on macOS the --onefile must be used when the application bundle is built or else signing has issues. * Recommend to use new option --disable-console for PySide2, PySide6 and wx on non-macOS
mayDisableConsoleWindow
613c31d98f20bdd9a4e5884c99826a06a3328438
Nuitka
Options.py
8
2
https://github.com/Nuitka/Nuitka.git
2
13
0
11
27
Python
{ "docstring": ":returns: bool derived from platform support of disabling the console,", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def mayDisableConsoleWindow(): # TODO: What about MSYS2? return isWin32Windows() or isMacOS()
70,277
244,197
34
mmdet/utils/compat_config.py
16
7
def compat_cfg(cfg): cfg = copy.deepcopy(cfg) cfg = compat_imgs_per_gpu(cfg) cfg = compat_loader_args(cfg) cfg = compat_runner_args(cfg) return cf
[Feature] Support set dataloader args in config and and add function to handle config compatibility (#7668) * add cfg_compatibility and support loader args * resolve comments * add unitest * resolve comments * delete all warning
compat_cfg
dc14675f79681b88ce2c5a3ca3c69901b415ffe4
mmdetection
compat_config.py
8
6
https://github.com/open-mmlab/mmdetection.git
1
34
0
9
59
Python
{ "docstring": "This function would modify some filed to keep the compatibility of\n config.\n\n For example, it will move some args which will be deprecated to the correct\n fields.\n ", "language": "en", "n_whitespaces": 39, "n_words": 27, "vocab_size": 23 }
def compat_cfg(cfg): cfg = copy.deepcopy(cfg) cfg = compat_imgs_per_gpu(cfg) cfg = compat_loader_args(cfg) cfg = compat_runner_args(cfg) return cfg
55,293
218,412
93
python3.10.4/Lib/inspect.py
44
13
def getsourcelines(object): object = unwrap(object) lines, lnum = findsource(object) if istraceback(object): object = object.tb_frame # for module or frame that corresponds to module, return all source lines if (ismodule(object) or
add python 3.10.4 for windows
getsourcelines
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
inspect.py
13
10
https://github.com/XX-net/XX-Net.git
5
73
0
35
123
Python
{ "docstring": "Return a list of source lines and starting line number for an object.\n\n The argument may be a module, class, method, function, traceback, frame,\n or code object. The source code is returned as a list of the lines\n corresponding to the object and the line number indicates where in the\n original source file the first line of code was found. An OSError is\n raised if the source code cannot be retrieved.", "language": "en", "n_whitespaces": 87, "n_words": 71, "vocab_size": 46 }
def getsourcelines(object): object = unwrap(object) lines, lnum = findsource(object) if istraceback(object): object = object.tb_frame # for module or frame that corresponds to module, return all source lines if (ismodule(object) or (isframe(object) and object.f_code.co_name == "<module>")): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1
51,131
205,453
155
django/db/models/deletion.py
39
16
def get_del_batches(self, objs, fields):
Refs #33476 -- Reformatted code with Black.
get_del_batches
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
deletion.py
13
12
https://github.com/django/django.git
4
82
0
34
123
Python
{ "docstring": "\n Return the objs in suitably sized batches for the used connection.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
def get_del_batches(self, objs, fields): field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1 ) if len(objs) > conn_batch_size: return [ objs[i : i + conn_batch_size] for i in range(0, len(objs), conn_batch_size) ] else: return [objs]
1,048
6,670
47
ludwig/utils/checkpoint_utils.py
12
11
def save(self, global_step): save_path = osp.join(s
Add file lock on training checkpoints to prevent race condition (#1938)
save
44356d2d07370b7044640a068ace95842d5ce98c
ludwig
checkpoint_utils.py
11
5
https://github.com/ludwig-ai/ludwig.git
1
42
0
10
75
Python
{ "docstring": "Create a new checkpoint.\n\n Args:\n global_step (int): The iteration number which will be used\n to name the checkpoint.\n ", "language": "en", "n_whitespaces": 52, "n_words": 18, "vocab_size": 17 }
def save(self, global_step): save_path = osp.join(self.directory, f"{global_step:09d}.ckpt") self.checkpoint.save(save_path) self.latest_checkpoint = save_path self.queue.put(True)
47,301
195,586
156
versioneer.py
52
16
def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = # END
add auto tag
versions_from_file
f0194812568c83585ff09488fe7f67df300938cc
rembg
versioneer.py
12
14
https://github.com/danielgatis/rembg.git
4
94
0
34
159
Python
{ "docstring": "Try to determine the version from _version.py if present.\\n(.*)\\r\\n(.*)", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1))
13,724
64,798
3
erpnext/accounts/doctype/bank_transaction/bank_transaction.py
9
7
def get_total_allocated_amount(payment_entry): return frappe.db.sql( , (payment_entry.p
style: format code with black
get_total_allocated_amount
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
bank_transaction.py
9
19
https://github.com/frappe/erpnext.git
1
29
0
9
44
Python
{ "docstring": "\n\t\tSELECT\n\t\t\tSUM(btp.allocated_amount) as allocated_amount,\n\t\t\tbt.name\n\t\tFROM\n\t\t\t`tabBank Transaction Payments` as btp\n\t\tLEFT JOIN\n\t\t\t`tabBank Transaction` bt ON bt.name=btp.parent\n\t\tWHERE\n\t\t\tbtp.payment_document = %s\n\t\tAND\n\t\t\tbtp.payment_entry = %s\n\t\tAND\n\t\t\tbt.docstatus = 1", "language": "en", "n_whitespaces": 17, "n_words": 30, "vocab_size": 24 }
def get_total_allocated_amount(payment_entry): return frappe.db.sql( , (payment_entry.payment_document, payment_entry.payment_entry), as_dict=True, )
@not_implemented_for("multigraph") @not_implemented_for("directed")
41,855
176,369
82
networkx/algorithms/matching.py
53
21
def min_weight_matching(G, maxcardinality=False, weight="weight"): if len(G.edges) == 0: return max_weight_matching(G, maxcardinality, weight) G_edges = G.edges(data=weight, default=1) min_weight = min(w for _, _, w in G_edges) InvG = nx.Graph() edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges) InvG.add_weighted_edges_from(e
Update matching functions for error validation and speed (#4897) * First steps to update matching functions for #4644 Expand tests Change API to raise NetworkXError when matching involves nodes not in G Update is_*_matching to 100+ times faster. * improve matching_dict_to_set and docs for min_weight_matching * fix sphinx error
min_weight_matching
28b3014d68d2b4e40d3e02219770296a827bd55c
networkx
matching.py
12
9
https://github.com/networkx/networkx.git
4
114
1
40
191
Python
{ "docstring": "Computing a minimum-weight maximal matching of G.\n\n Use reciprocal edge weights with the maximum-weight algorithm.\n\n A matching is a subset of edges in which no node occurs more than once.\n The weight of a matching is the sum of the weights of its edges.\n A maximal matching cannot add more edges and still be a matching.\n The cardinality of a matching is the number of matched edges.\n\n This method replaces the weights with their reciprocal and\n then runs :func:`max_weight_matching`.\n Read the documentation of max_weight_matching for more information.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n maxcardinality: bool, optional (default=False)\n If maxcardinality is True, compute the maximum-cardinality matching\n with minimum weight among all maximum-cardinality matchings.\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n If key not found, uses 1 as weight.\n\n Returns\n -------\n matching : set\n A minimal weight matching of the graph.\n ", "language": "en", "n_whitespaces": 233, "n_words": 146, "vocab_size": 92 }
def min_weight_matching(G, maxcardinality=False, weight="weight"): if len(G.edges) == 0: return max_weight_matching(G, maxcardinality, weight) G_edges = G.edges(data=weight, default=1) min_weight = min(w for _, _, w in G_edges) InvG = nx.Graph() edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges) InvG.add_weighted_edges_from(edges, weight=weight) return max_weight_matching(InvG, maxcardinality, weight) @not_implemented_for("multigraph") @not_implemented_for("directed")
3,270
20,218
20
pipenv/patched/notpip/_vendor/platformdirs/macos.py
6
4
def site_config_dir(self) -> str: return self._append_app_name_and_version("/Libr
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
site_config_dir
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
macos.py
8
3
https://github.com/pypa/pipenv.git
1
15
0
6
29
Python
{ "docstring": ":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def site_config_dir(self) -> str: return self._append_app_name_and_version("/Library/Preferences")
36,095
154,585
310
modin/experimental/core/execution/native/implementations/hdk_on_native/expr.py
99
18
def _cmp_op(self, other, op_name): lhs_dtype_class = self._get_dtype_cmp_class(self._dtype) rhs_dtype_class = self._get_dtype_cmp_class(other._dtype) res_dtype = get_dtype(bool) # In HDK comparison with NULL always results in NULL, # but in pandas it is True for 'ne' comparison and False # for others. # Also pandas allows 'eq' and 'ne' comparison for value
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com>
_cmp_op
e5b1888cd932909e49194d58035da34b210b91c4
modin
expr.py
15
16
https://github.com/modin-project/modin.git
4
106
0
70
192
Python
{ "docstring": "\n Build a comparison expression.\n\n Parameters\n ----------\n other : BaseExpr\n A value to compare with.\n op_name : str\n The comparison operation name.\n\n Returns\n -------\n BaseExpr\n The resulting comparison expression.\n ", "language": "en", "n_whitespaces": 125, "n_words": 28, "vocab_size": 22 }
def _cmp_op(self, other, op_name): lhs_dtype_class = self._get_dtype_cmp_class(self._dtype) rhs_dtype_class = self._get_dtype_cmp_class(other._dtype) res_dtype = get_dtype(bool) # In HDK comparison with NULL always results in NULL, # but in pandas it is True for 'ne' comparison and False # for others. # Also pandas allows 'eq' and 'ne' comparison for values # of incompatible types which doesn't work in HDK. if lhs_dtype_class != rhs_dtype_class: if op_name == "eq" or op_name == "ne": return LiteralExpr(op_name == "ne") else: raise TypeError( f"Invalid comparison between {self._dtype} and {other._dtype}" ) else: cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype) return build_if_then_else( self.is_null(), LiteralExpr(op_name == "ne"), cmp, res_dtype )
23,965
110,191
249
lib/matplotlib/widgets.py
47
27
def set_active(self, index): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') if colors.same_color( self._
Use scatter for check boxes instead of Rectangle With the current implementation, the boxes get stretched into rectangles if the aspect ratio is not maintained. To overcome this, the boxes are now created using scatter instead to maintain their shapes.
set_active
723cd86d7d7bdc14a4d3fc0e08c3a01e72d310b6
matplotlib
widgets.py
17
18
https://github.com/matplotlib/matplotlib.git
8
174
0
37
295
Python
{ "docstring": "\n Toggle (activate or deactivate) a check button by index.\n\n Callbacks will be triggered if :attr:`eventson` is True.\n\n Parameters\n ----------\n index : int\n Index of the check button to toggle.\n\n Raises\n ------\n ValueError\n If *index* is invalid.\n ", "language": "en", "n_whitespaces": 122, "n_words": 36, "vocab_size": 33 }
def set_active(self, index): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') if colors.same_color( self._crosses.get_facecolor()[index], colors.to_rgba("none") ): self._crosses.get_facecolor()[index] = colors.to_rgba("k") else: self._crosses.get_facecolor()[index] = colors.to_rgba("none") if hasattr(self, "_rectangles"): for i, p in enumerate(self._rectangles): p.set_facecolor("k" if colors.same_color( p.get_facecolor(), colors.to_rgba("none")) else "none") if self.drawon: self.ax.figure.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text())
103,215
304,408
75
homeassistant/components/ebox/sensor.py
17
8
async def async_update(self) -> None: await s
Improve entity type hints [e] (#77041)
async_update
3a3f41f3df932368791d3ee3f5fbae5fb3b38bfe
core
sensor.py
13
7
https://github.com/home-assistant/core.git
2
50
0
17
82
Python
{ "docstring": "Get the latest data from EBox and update the state.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
async def async_update(self) -> None: await self.ebox_data.async_update() if self.entity_description.key in self.ebox_data.data: self._attr_native_value = round( self.ebox_data.data[self.entity_description.key], 2 )
70,312
244,297
474
tools/analysis_tools/analyze_results.py
102
49
def panoptic_evaluate(self, dataset, results, topk=20): # image to annotations gt_json = dataset.coco.img_ann_map result_files, tmp_dir = dataset.format_results(results) pred_json = mmcv.load(result_files['panoptic'])['annotations'] pred_folder = osp.join(tmp_dir.name, 'panoptic') gt_folder = dataset.seg_prefix pqs = {} prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): data_info = dataset.prepare_train_img(i) image_id = data_info['img_info']['id'] gt_ann = { 'image_id': image_id, 'segments_info': gt_json[image_id], 'file_name': data_info['img_info']['segm_file'] } pred_ann = pred_json[i] pq_stat = pq_compute_single_core( i, [(gt_ann, pred_ann)], gt_folder,
[Feature] Support panoptic segmentation result analysis (#7922) * support analyze panoptic segmentation result * fix lint * update docstring * update docstring * set print_log=False by default * update * fix bug 8035
panoptic_evaluate
f3a451abab8fc89810b317ca0a88ee9fd12cb0c2
mmdetection
analyze_results.py
13
34
https://github.com/open-mmlab/mmdetection.git
3
248
0
80
399
Python
{ "docstring": "Evaluation for panoptic segmentation.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n results (list): Panoptic segmentation results from test\n results pkl file.\n topk (int): Number of the highest topk and\n lowest topk after evaluation index sorting. Default: 20.\n\n Returns:\n tuple: A tuple contains good samples and bad samples.\n good_pqs (dict[int, float]): A dict contains good\n samples's indices in dataset and model's\n performance on them.\n bad_pqs (dict[int, float]): A dict contains bad\n samples's indices in dataset and model's\n performance on them.\n ", "language": "en", "n_whitespaces": 279, "n_words": 78, "vocab_size": 52 }
def panoptic_evaluate(self, dataset, results, topk=20): # image to annotations gt_json = dataset.coco.img_ann_map result_files, tmp_dir = dataset.format_results(results) pred_json = mmcv.load(result_files['panoptic'])['annotations'] pred_folder = osp.join(tmp_dir.name, 'panoptic') gt_folder = dataset.seg_prefix pqs = {} prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): data_info = dataset.prepare_train_img(i) image_id = data_info['img_info']['id'] gt_ann = { 'image_id': image_id, 'segments_info': gt_json[image_id], 'file_name': data_info['img_info']['segm_file'] } pred_ann = pred_json[i] pq_stat = pq_compute_single_core( i, [(gt_ann, pred_ann)], gt_folder, pred_folder, dataset.categories, dataset.file_client, print_log=False) pq_results, classwise_results = pq_stat.pq_average( dataset.categories, isthing=None) pqs[i] = pq_results['pq'] prog_bar.update() if tmp_dir is not None: tmp_dir.cleanup() # descending select topk image pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) good_pqs = pqs[-topk:] bad_pqs = pqs[:topk] return good_pqs, bad_pqs
76,702
261,234
298
sklearn/feature_selection/_mutual_info.py
113
37
def _compute_mi_cd(c, d, n_neighbors): n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with un
CLN Remove unnecessary operation in mutual_info (#24569)
_compute_mi_cd
c22be1defcf3e59ebd79ed3e479ada8ea558f601
scikit-learn
_mutual_info.py
14
34
https://github.com/scikit-learn/scikit-learn.git
3
270
0
69
422
Python
{ "docstring": "Compute mutual information between continuous and discrete variables.\n\n Parameters\n ----------\n c : ndarray, shape (n_samples,)\n Samples of a continuous random variable.\n\n d : ndarray, shape (n_samples,)\n Samples of a discrete random variable.\n\n n_neighbors : int\n Number of nearest neighbors to search for each point, see [1]_.\n\n Returns\n -------\n mi : float\n Estimated mutual information. If it turned out to be negative it is\n replace by 0.\n\n Notes\n -----\n True mutual information can't be negative. If its estimate by a numerical\n method is negative, it means (providing the method is adequate) that the\n mutual information is close to 0 and replacing it by 0 is a reasonable\n strategy.\n\n References\n ----------\n .. [1] B. C. Ross \"Mutual Information between Discrete and Continuous\n Data Sets\". PLoS ONE 9(2), 2014.\n ", "language": "en", "n_whitespaces": 221, "n_words": 126, "vocab_size": 85 }
def _compute_mi_cd(c, d, n_neighbors): n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with unique labels. mask = label_counts > 1 n_samples = np.sum(mask) label_counts = label_counts[mask] k_all = k_all[mask] c = c[mask] radius = radius[mask] kd = KDTree(c) m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) m_all = np.array(m_all) mi = ( digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all)) ) return max(0, mi)
50,251
203,216
47
django/core/management/base.py
15
5
def handle_app_config(self, app_config, **options): raise Not
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
handle_app_config
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
base.py
8
4
https://github.com/django/django.git
1
16
0
15
29
Python
{ "docstring": "\n Perform the command's actions for app_config, an AppConfig instance\n corresponding to an application label given on the command line.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 17 }
def handle_app_config(self, app_config, **options): raise NotImplementedError( "Subclasses of AppCommand must provide a handle_app_config() method." )
@pytest.mark.parametrize( "max_features", [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], )
75,720
259,339
70
sklearn/feature_selection/tests/test_from_model.py
38
23
def test_inferred_max_features_integer(max_features): clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) assert transformer.max_features_ == max_features assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.paramet
ENH Allow `SelectFromModel`'s `max_features` to accept callables (#22356) * Initial implementation * Improved error handling and stability * Added unit tests * Updated test to use `max_features_` instead of `max_features` * Added documentation for new private attribute `max_features_` * Improved error handling for callables * Updated whats_new * Removed incorrect term reference to `max_features` * Removed float case and improved testing * Updated test names to more clearly reflect intention * Added a sample callable in `max_features` description * Improved documentation and streamlined error handling * Updated example to include demonstrate using a callable for max_features * Separated out callable demo into separate example * Removed demo from `max_features` docs (now in example) * Updated changelog * Apply suggestions from code review Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> * Trimmed unneeded comments * Updated tests to reflect new error handling * Removed new line at end of docstring * Updated docstring * Fixed example syntax error * Fixed example syntax * Apply suggestions from code review Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> * Reverted irrelevant changes * Update sklearn/feature_selection/_from_model.py Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> * Fixed error message * Improved test coverage * Minor doc improvement -- added a list for `max_features` type * Update sklearn/feature_selection/_from_model.py Co-authored-by: Adrin Jalali <adrin.jalali@gmail.com> * Improved input validation and added test for array-like * Updated doc to use no longer use lambda function * Fixed docstring list * Added missing whitespace for list format in docstring Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Adrin Jalali <adrin.jalali@gmail.com>
test_inferred_max_features_integer
db24a30bd3b90a9d55e82e450631de96305744f7
scikit-learn
test_from_model.py
12
8
https://github.com/scikit-learn/scikit-learn.git
1
64
1
29
162
Python
{ "docstring": "Check max_features_ and output shape for integer max_features.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_inferred_max_features_integer(max_features): clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) assert transformer.max_features_ == max_features assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.parametrize( "max_features", [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], )
42,228
177,016
40
networkx/algorithms/tests/test_lowest_common_ancestors.py
12
12
def test_naive_all_pairs_lowest_common_ancestor3(self): all_pairs = product(self.DG.nodes(), self.DG.nodes()) ans = naive_all_pairs_lca(self.DG, pairs=all_pairs) self.assert_lca_dicts_same(dict(ans), self.gold)
Naive lowest common ancestor implementation (#5736) * Add naive lca methods * Naive algorithm implementation for LCA * Modify naive lca functions * Correct parameters of nx.ancestors * Update lowest_common_ancestors.py * Parametrize tests * Apply suggestions from code review Co-authored-by: Dan Schult <dschult@colgate.edu> * Yield instead of append * Tests for naive lca * Correct test cases for naive lca algorithms * Apply suggestions from code review Co-authored-by: Mridul Seth <mail@mriduls.com> * Fix function name -when calling * Make requested changes * Inlining _get_a_lowest_common_ancestor Co-authored-by: dtuncturk <dilaramemis@sabanciuniv.edu> Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: Mridul Seth <mail@mriduls.com>
test_naive_all_pairs_lowest_common_ancestor3
b2f91c34a23058dd70b41784af0d87890216026a
networkx
test_lowest_common_ancestors.py
11
4
https://github.com/networkx/networkx.git
1
51
0
11
83
Python
{ "docstring": "Produces the correct results when all pairs given as a generator.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_naive_all_pairs_lowest_common_ancestor3(self): all_pairs = product(self.DG.nodes(), self.DG.nodes()) ans = naive_all_pairs_lca(self.DG, pairs=all_pairs) self.assert_lca_dicts_same(dict(ans), self.gold)
51,063
205,281
79
django/db/migrations/autodetector.py
22
8
def _resolve_dependency(dependency): if dependency[0] != "__setting__": return dependen
Refs #33476 -- Reformatted code with Black.
_resolve_dependency
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
autodetector.py
11
7
https://github.com/django/django.git
2
54
0
21
89
Python
{ "docstring": "\n Return the resolved dependency and a boolean denoting whether or not\n it was swappable.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
def _resolve_dependency(dependency): if dependency[0] != "__setting__": return dependency, False resolved_app_label, resolved_object_name = getattr( settings, dependency[1] ).split(".") return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True
@not_implemented_for("undirected") @not_implemented_for("multigraph")
42,203
176,975
54
networkx/algorithms/lowest_common_ancestors.py
23
11
def lowest_common_ancestor(G, node1, node2, default=None): ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) if ans: assert len(ans) == 1 return ans[0][1] else: return default @not_implemented_for("undirected") @not_implemented_for("multigraph")
Add examples to lowest common ancestors algorithms (#5531) * Add examples to lowest common ancestors documentation * Fix output style of examples * Fix output style of example * Update pre-commit * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Indentation fix & pprint dictionary * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Move "import pprint" to the example Co-authored-by: dtuncturk <dilaramemis@sabanciuniv.edu> Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
lowest_common_ancestor
abaa68779ccb4cce8d1a5ecade622ab96d01edeb
networkx
lowest_common_ancestors.py
13
7
https://github.com/networkx/networkx.git
2
55
1
22
105
Python
{ "docstring": "Compute the lowest common ancestor of the given pair of nodes.\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n node1, node2 : nodes in the graph.\n\n default : object\n Returned if no common ancestor between `node1` and `node2`\n\n Returns\n -------\n The lowest common ancestor of node1 and node2,\n or default if they have no common ancestors.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)])\n >>> nx.lowest_common_ancestor(G, 3, 5)\n 2\n\n We can also set `default` argument as below. The value of default is returned\n if there are no common ancestors of given two nodes.\n\n >>> G = nx.DiGraph([(4, 5), (12, 13)])\n >>> nx.lowest_common_ancestor(G, 12, 5, default=\"No common ancestors!\")\n 'No common ancestors!'\n\n Notes\n -----\n Only defined on non-null directed acyclic graphs.\n Takes n log(n) time in the size of the graph.\n See `all_pairs_lowest_common_ancestor` when you have\n more than one pair of nodes of interest.\n\n See Also\n --------\n tree_all_pairs_lowest_common_ancestor\n all_pairs_lowest_common_ancestor\n ", "language": "en", "n_whitespaces": 252, "n_words": 155, "vocab_size": 107 }
def lowest_common_ancestor(G, node1, node2, default=None): ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) if ans: assert len(ans) == 1 return ans[0][1] else: return default @not_implemented_for("undirected") @not_implemented_for("multigraph")
14,058
65,933
16
erpnext/education/report/program_wise_fee_collection/program_wise_fee_collection.py
33
16
def get_data(filters=None): data = [] conditions = get_filter_conditions(filters) fee_details = frappe.db.sql( % (conditions), as_dict=1, ) for entry in fee_details: data.append( { "program": entry.program, "fees_collected": entry.paid_amount, "outstanding_amount": entry.outstanding_amount, "grand_total": entry.grand_total, } ) return data
style: format code with black
get_data
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
program_wise_fee_collection.py
12
37
https://github.com/frappe/erpnext.git
2
74
0
29
121
Python
{ "docstring": "\n\t\t\tSELECT\n\t\t\t\tFeesCollected.program,\n\t\t\t\tFeesCollected.paid_amount,\n\t\t\t\tFeesCollected.outstanding_amount,\n\t\t\t\tFeesCollected.grand_total\n\t\t\tFROM (\n\t\t\t\tSELECT\n\t\t\t\t\tsum(grand_total) - sum(outstanding_amount) AS paid_amount, program,\n\t\t\t\t\tsum(outstanding_amount) AS outstanding_amount,\n\t\t\t\t\tsum(grand_total) AS grand_total\n\t\t\t\tFROM `tabFees`\n\t\t\t\tWHERE\n\t\t\t\t\tdocstatus = 1 and\n\t\t\t\t\tprogram IS NOT NULL\n\t\t\t\t\t%s\n\t\t\t\tGROUP BY program\n\t\t\t) AS FeesCollected\n\t\t\tORDER BY FeesCollected.paid_amount DESC\n\t\t", "language": "en", "n_whitespaces": 24, "n_words": 42, "vocab_size": 33 }
def get_data(filters=None): data = [] conditions = get_filter_conditions(filters) fee_details = frappe.db.sql( % (conditions), as_dict=1, ) for entry in fee_details: data.append( { "program": entry.program, "fees_collected": entry.paid_amount, "outstanding_amount": entry.outstanding_amount, "grand_total": entry.grand_total, } ) return data
4,271
22,227
84
pipenv/vendor/requirementslib/models/dependencies.py
46
15
def get_dependencies_from_json(ireq): if ireq.editable or not is_pinned_requirement(ireq): return # It is technically possible to parse extras out of the JSON API's
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
get_dependencies_from_json
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
dependencies.py
12
18
https://github.com/pypa/pipenv.git
6
101
0
40
96
Python
{ "docstring": "Retrieves dependencies for the given install requirement from the json\n api.\n\n :param ireq: A single InstallRequirement\n :type ireq: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement`\n :return: A set of dependency lines for generating new InstallRequirements.\n :rtype: set(str) or None\n ", "language": "en", "n_whitespaces": 51, "n_words": 33, "vocab_size": 29 }
def get_dependencies_from_json(ireq): if ireq.editable or not is_pinned_requirement(ireq): return # It is technically possible to parse extras out of the JSON API's # requirement format, but it is such a chore let's just use the simple API. if ireq.extras: return session = requests.session() atexit.register(session.close) version = str(ireq.req.specifier).lstrip("=")
40,716
171,745
58
pandas/core/frame.py
20
12
def assign(self, **kwargs) -> DataFrame: r data = self.copy(deep=None) for k, v in kwargs.items():
ENH/TST: expand copy-on-write to assign() method (#50010)
assign
36dcf519c67a8098572447f7d5a896740fc9c464
pandas
frame.py
10
66
https://github.com/pandas-dev/pandas.git
2
48
0
18
75
Python
{ "docstring": "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas doesn't check it).\n If the values are not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible.\n Later items in '\\*\\*kwargs' may refer to newly created or modified\n columns in 'df'; items are computed and assigned into 'df' in order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence:\n\n >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n You can create multiple columns within the same assign where one\n of the columns depends on another one defined within the same assign:\n\n >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,\n ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n ", "language": "en", "n_whitespaces": 761, "n_words": 268, "vocab_size": 146 }
def assign(self, **kwargs) -> DataFrame: r data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data
70,305
244,278
368
mmdet/models/dense_heads/solo_head.py
40
14
def resize_feats(self, feats): out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear',
[Feature] Support SOLOv2 (#7441) * solov2 init * solov2 r18 lightweight * add model docstrings and reformat the code * add docstrings to model method * add solov2 big model config and correct some errors in the docstring * fix linting issues * refactor code and configs * rename variables according to the convention * add and enhance solov2 logic * add doc strings * update solov2 config files * fix norm_cfg in mask head * minor fix * update configs Co-authored-by: BIGWangYuDong <yudongwang@tju.edu.cn>
resize_feats
d18cdb140ef3cb9ed5fdef6f1a815f5836f1b1ab
mmdetection
solo_head.py
19
20
https://github.com/open-mmlab/mmdetection.git
4
127
0
29
198
Python
{ "docstring": "Downsample the first feat and upsample last feat in feats.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def resize_feats(self, feats): out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear', align_corners=False)) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear', align_corners=False)) else: out.append(feats[i]) return out
14,672
67,940
50
erpnext/stock/report/stock_projected_qty/stock_projected_qty.py
71
17
def get_bin_list(filters): conditions = [] if filters.item_code: cond
style: format code with black
get_bin_list
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
stock_projected_qty.py
16
24
https://github.com/frappe/erpnext.git
5
107
0
52
181
Python
{ "docstring": "select item_code, warehouse, actual_qty, planned_qty, indented_qty,\n\t\tordered_qty, reserved_qty, reserved_qty_for_production, reserved_qty_for_sub_contract, projected_qty\n\t\tfrom tabBin bin {conditions} order by item_code, warehouse\n\t\t", "language": "en", "n_whitespaces": 16, "n_words": 19, "vocab_size": 18 }
def get_bin_list(filters): conditions = [] if filters.item_code: conditions.append("item_code = '%s' " % filters.item_code) if filters.warehouse: warehouse_details = frappe.db.get_value( "Warehouse", filters.warehouse, ["lft", "rgt"], as_dict=1 ) if warehouse_details: conditions.append( " exists (select name from `tabWarehouse` wh \ where wh.lft >= %s and wh.rgt <= %s and bin.warehouse = wh.name)" % (warehouse_details.lft, warehouse_details.rgt) ) bin_list = frappe.db.sql( .format( conditions=" where " + " and ".join(conditions) if conditions else "" ), as_dict=1, ) return bin_list
80,828
271,613
373
keras/engine/training.py
113
9
def run_eagerly(self): if ( self.dynamic and self._run_eagerly is False ): # pylint:disable=g-bool-id-comparison # TODO(fchollet): consider using py_func to enable this. raise ValueError( "Your model contains layers that can only be " "successfully run in eager execution (layers " "constructed with `dynamic=True`). " "You cannot set `run_eagerly=False`." ) if self._cluster_coordinator and self._run_eagerly: raise ValueError( "When using `Model` with `ParameterServerStrategy`, " "`run_eagerly` is not
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
run_eagerly
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training.py
12
20
https://github.com/keras-team/keras.git
8
68
0
80
127
Python
{ "docstring": "Settable attribute indicating whether the model should run eagerly.\n\n Running eagerly means that your model will be run step by step,\n like Python code. Your model might run slower, but it should become easier\n for you to debug it by stepping into individual layer calls.\n\n By default, we will attempt to compile your model to a static graph to\n deliver the best execution performance.\n\n Returns:\n Boolean, whether the model should run eagerly.\n ", "language": "en", "n_whitespaces": 130, "n_words": 72, "vocab_size": 52 }
def run_eagerly(self): if ( self.dynamic and self._run_eagerly is False ): # pylint:disable=g-bool-id-comparison # TODO(fchollet): consider using py_func to enable this. raise ValueError( "Your model contains layers that can only be " "successfully run in eager execution (layers " "constructed with `dynamic=True`). " "You cannot set `run_eagerly=False`." ) if self._cluster_coordinator and self._run_eagerly: raise ValueError( "When using `Model` with `ParameterServerStrategy`, " "`run_eagerly` is not supported." ) # Run eagerly logic, by priority: # (1) Dynamic models must be run eagerly. # (2) Explicitly setting run_eagerly causes a Model to be run eagerly. # (3) Not explicitly setting run_eagerly defaults to TF's global setting. return ( self.dynamic or self._run_eagerly or (tf.config.functions_run_eagerly() and self._run_eagerly is None) )
16,882
79,164
24
wagtail/models/__init__.py
10
5
def get_preview_context(self, request, *args, **kwargs): return {"object": self, "request": request}
Add docs for PreviewableMixin
get_preview_context
e864b9c4d12ad0edd38283c17c2935e950e73520
wagtail
__init__.py
8
2
https://github.com/wagtail/wagtail.git
1
24
0
10
41
Python
{ "docstring": "\n Returns a context dictionary for use in templates for previewing this object.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
def get_preview_context(self, request, *args, **kwargs): return {"object": self, "request": request}
99,145
300,279
228
tests/components/mobile_app/test_sensor.py
61
21
async def test_default_disabling_entity(hass, create_registrations, webhook_client): webhook_id = create_registrations[1]["webhook_id"] webhook_url = f"/api/webhook/{webhook_id}" reg_resp = await webhook_client.post( w
Allow mobile app to disable entities by default (#71562)
test_default_disabling_entity
539ce7ff0e9d9bc59cd8f028f245c09f802c89cb
core
test_sensor.py
15
25
https://github.com/home-assistant/core.git
1
129
0
45
230
Python
{ "docstring": "Test that sensors can be disabled by default upon registration.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
async def test_default_disabling_entity(hass, create_registrations, webhook_client): webhook_id = create_registrations[1]["webhook_id"] webhook_url = f"/api/webhook/{webhook_id}" reg_resp = await webhook_client.post( webhook_url, json={ "type": "register_sensor", "data": { "name": "Battery State", "type": "sensor", "unique_id": "battery_state", "default_disabled": True, }, }, ) assert reg_resp.status == HTTPStatus.CREATED json = await reg_resp.json() assert json == {"success": True} await hass.async_block_till_done() entity = hass.states.get("sensor.test_1_battery_state") assert entity is None assert ( er.async_get(hass).async_get("sensor.test_1_battery_state").disabled_by == er.RegistryEntryDisabler.INTEGRATION )
36,490
155,918
1,545
dask/dataframe/io/parquet/arrow.py
379
51
def _create_dd_meta(cls, dataset_info): # Collect necessary information from dataset_info schema = dataset_info["schema"] index = dataset_info["index"] categories = dataset_info["categories"] partition_obj = dataset_info["partitions"] partitions = dataset_info["partition_names"] physical_column_names = dataset_info.get("physical_schema", schema).names columns = None # Set index and column names using # pandas metadata (when available) pandas_metadata = _get_pandas_metadata(schema) if pandas_metadata: ( index_names, column_names, storage_name_mapping, column_index_names, ) = _parse_pandas_metadata(pandas_metadata) if categories is None: categories = [] for col in pandas_metadata["columns"]: if (col["pandas_type"] == "categorical") and ( col["name"] not in categories ): categories.append(col["name"]) else: # No pandas metadata implies no index, unless selected by the user index_names = [] column_names = physical_column_names storage_name_mapping = {k: k for k in column_names} column_index_names = [None] if index is None and index_names: # Pandas metadata has provided the index name for us index = index_names # Ensure that there is no overlap between partition columns # and explicit column storage if partitions: _partitions = [p for p in partitions if p not in physical_column_names] if not _partitions: partitions = [] dataset_info["partitions"] = None dataset_info["partition_keys"] = {} datas
Fix "physical" column bug in pyarrow-based read_parquet (#8775) Starting with pyarrow-5.0, the `pyarrow.dataset` API can now be used to write parquet datasets. Using `pyarrow.dataset.write_dataset` to write partitioned data results in different "pandas metadata" than we get from a Dask-written dataset, because Dask will not include the partitioned column names in this metadata (since they are not "physical" columns), but pyarrow will. This exposed a bug in Dask, where we were conflating "pandas metadata" column names with "physical" column names. This PR adds a small fix to ensure that Dask will only bail on reading partitioned columns if/when the partitioned columns are really "physical" columns.
_create_dd_meta
73acebb3a2066792dea39c78245a6e1a01b2b173
dask
arrow.py
19
81
https://github.com/dask/dask.git
27
504
0
196
823
Python
{ "docstring": "Use parquet schema and hive-partition information\n (stored in dataset_info) to construct DataFrame metadata.\n\n This method is used by both arrow engines.\n ", "language": "en", "n_whitespaces": 42, "n_words": 21, "vocab_size": 21 }
def _create_dd_meta(cls, dataset_info): # Collect necessary information from dataset_info schema = dataset_info["schema"] index = dataset_info["index"] categories = dataset_info["categories"] partition_obj = dataset_info["partitions"] partitions = dataset_info["partition_names"] physical_column_names = dataset_info.get("physical_schema", schema).names columns = None # Set index and column names using # pandas metadata (when available) pandas_metadata = _get_pandas_metadata(schema) if pandas_metadata: ( index_names, column_names, storage_name_mapping, column_index_names, ) = _parse_pandas_metadata(pandas_metadata) if categories is None: categories = [] for col in pandas_metadata["columns"]: if (col["pandas_type"] == "categorical") and ( col["name"] not in categories ): categories.append(col["name"]) else: # No pandas metadata implies no index, unless selected by the user index_names = [] column_names = physical_column_names storage_name_mapping = {k: k for k in column_names} column_index_names = [None] if index is None and index_names: # Pandas metadata has provided the index name for us index = index_names # Ensure that there is no overlap between partition columns # and explicit column storage if partitions: _partitions = [p for p in partitions if p not in physical_column_names] if not _partitions: partitions = [] dataset_info["partitions"] = None dataset_info["partition_keys"] = {} dataset_info["partition_names"] = partitions elif len(_partitions) != len(partitions): raise ValueError( "No partition-columns should be written in the \n" "file unless they are ALL written in the file.\n" "physical columns: {} | partitions: {}".format( physical_column_names, partitions ) ) column_names, index_names = _normalize_index_columns( columns, column_names + partitions, index, index_names ) all_columns = index_names + column_names # Check that categories are included in columns if categories and not set(categories).intersection(all_columns): raise ValueError( "categories not in available columns.\n" "categories: {} | columns: {}".format(categories, list(all_columns)) ) dtypes = _get_pyarrow_dtypes(schema, categories) dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()} index_cols = index or () meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names) if categories: # Make sure all categories are set to "unknown". # Cannot include index names in the `cols` argument. meta = clear_known_categories( meta, cols=[c for c in categories if c not in meta.index.names] ) if partition_obj: for partition in partition_obj: if isinstance(index, list) and partition.name == index[0]: # Index from directory structure meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=index[0] ) elif partition.name == meta.index.name: # Index created from a categorical column meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=meta.index.name ) elif partition.name in meta.columns: meta[partition.name] = pd.Series( pd.Categorical(categories=partition.keys, values=[]), index=meta.index, ) # Update `dataset_info` and return `meta` dataset_info["index"] = index dataset_info["index_cols"] = index_cols dataset_info["categories"] = categories return meta
9,392
48,185
243
airflow/providers/google/cloud/transfers/postgres_to_gcs.py
54
31
def convert_type(self, value, schema_type, stringify_dict=True): if isinstance(value, datetime.datetime): iso_format_value = value.isoformat() if value.tzinfo is None: return iso_format_value return pendulum.parse(iso_format_value).float_timestamp if isinstance(val
Fix `PostgresToGCSOperator` does not allow nested JSON (#23063) * Avoid double json.dumps for json data export in PostgresToGCSOperator. * Fix CI
convert_type
766726f2e3a282fcd2662f5dc6e9926dc38a6540
airflow
postgres_to_gcs.py
12
19
https://github.com/apache/airflow.git
8
149
0
35
231
Python
{ "docstring": "\n Takes a value from Postgres, and converts it to a value that's safe for\n JSON/Google Cloud Storage/BigQuery.\n Timezone aware Datetime are converted to UTC seconds.\n Unaware Datetime, Date and Time are converted to ISO formatted strings.\n Decimals are converted to floats.\n\n :param value: Postgres column value.\n :param schema_type: BigQuery data type.\n :param stringify_dict: Specify whether to convert dict to string.\n ", "language": "en", "n_whitespaces": 124, "n_words": 60, "vocab_size": 46 }
def convert_type(self, value, schema_type, stringify_dict=True): if isinstance(value, datetime.datetime): iso_format_value = value.isoformat() if value.tzinfo is None: return iso_format_value return pendulum.parse(iso_format_value).float_timestamp if isinstance(value, datetime.date): return value.isoformat() if isinstance(value, datetime.time): formatted_time = time.strptime(str(value), "%H:%M:%S") time_delta = datetime.timedelta( hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec ) return str(time_delta) if stringify_dict and isinstance(value, dict): return json.dumps(value) if isinstance(value, Decimal): return float(value) return value
81,820
276,990
49
keras/utils/metrics_utils.py
27
15
def _filter_top_k(x, k): _, top_k_idx = tf.math.top_k(x, k, sorted=False) top_k_mask = tf.reduce_sum( tf.one_
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_filter_top_k
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
metrics_utils.py
13
6
https://github.com/keras-team/keras.git
1
72
0
24
110
Python
{ "docstring": "Filters top-k values in the last dim of x and set the rest to NEG_INF.\n\n Used for computing top-k prediction values in dense labels (which has the same\n shape as predictions) for recall and precision top-k metrics.\n\n Args:\n x: tensor with any dimensions.\n k: the number of values to keep.\n\n Returns:\n tensor with same shape and dtype as x.\n ", "language": "en", "n_whitespaces": 89, "n_words": 59, "vocab_size": 41 }
def _filter_top_k(x, k): _, top_k_idx = tf.math.top_k(x, k, sorted=False) top_k_mask = tf.reduce_sum( tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2 ) return x * top_k_mask + NEG_INF * (1 - top_k_mask)
43,477
181,690
192
tests/tpot_tests.py
102
12
def test_pick_two_individuals_eligible_for_crossover_bad(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert pick1 is None and pick2 is None # You can
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_pick_two_individuals_eligible_for_crossover_bad
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
tpot_tests.py
9
19
https://github.com/EpistasisLab/tpot.git
4
104
0
48
171
Python
{ "docstring": "Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_pick_two_individuals_eligible_for_crossover_bad(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert pick1 is None and pick2 is None # You can not do crossover with a population of only 1. pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1]) assert pick1 is None and pick2 is None # You can not do crossover with a population of 0. pick1, pick2 = pick_two_individuals_eligible_for_crossover([]) assert pick1 is None and pick2 is None
52,808
209,826
77
scapy/arch/windows/__init__.py
26
9
def get_ips(v6=False): # type: (bool) -> Dict[NetworkInterface, List[
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <pierre@droids-corp.org>
get_ips
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
scapy
__init__.py
13
8
https://github.com/secdev/scapy.git
3
53
0
22
86
Python
{ "docstring": "Returns all available IPs matching to interfaces, using the windows system.\n Should only be used as a WinPcapy fallback.\n\n :param v6: IPv6 addresses\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 23 }
def get_ips(v6=False): # type: (bool) -> Dict[NetworkInterface, List[str]] res = {} for iface in six.itervalues(conf.ifaces): if v6: res[iface] = iface.ips[6] else: res[iface] = iface.ips[4] return res
6,898
38,013
31
src/transformers/models/opt/modeling_opt.py
16
9
def _set_gradient_checkpointing(self, module, value=False):
Add OPT (#17088) * First version - OPT model * Final changes - putting use cache to False * few changes - remove commented block * few changes - remove unecessary files * fix style issues * few changes - remove a test file - added the logits test * Update src/transformers/models/auto/tokenization_auto.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * add gen tests * few changes - rm mask filling example on docstring * few changes - remove useless args * some changes - more tests should pass now - needs to clean more - documentation still needs to be done * fix code quality * major changes - change attention architecture to BART-like - modify some tests - style fix * rm useless classes - remove opt for: - QA - cond generation - seq classif * Removed autodoc calls to non-existant classes TOkenizers are not implemented * Update src/transformers/__init__.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update src/transformers/__init__.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update src/transformers/models/auto/modeling_tf_auto.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Replaced OPTTokeniser with GPT2 tokenizer * added GPT2Tokenizer.from_pretrained("patrickvonplaten/opt_gpt2_tokenizer") * Removed OPTTokenizer * make style * Make style replaces ``` ...).unsqueeze(``` by ``` >>>).unsqueeze(``` * make repo consistency * Removed PretrainedOPTModel * fix opt.mdx removed other heads * fix init, removed 3 heads * removed heads * finished cleaning head * removed seauence classif and question answering * removed unused imports * removed useless dummy object for QA, SC and CG * removed tests for removed useless dummy object for QA, SC and CG * Removed head_mask using encoder layers which don't exist * fixed test * fix line * added OPT to toctree * Updated model path with pushed weigths * fix model path * fixed code quality * fixed embeddings and generation tests * update paths * clean comments * removed OPTClassificationHead for sentence classification * renamed hidden layer * renamed num layers to standard num_hidden_layers * num_attention_heads fix * changes for 125m * add first version for 125m * add first version - flax * add new version * causal LM output * replace output type with BaseModelOutputWithPastAndCrossAttentions * revert working config from 150m to 350m * clean * removed decoder input ids * fixed embed dim * more embed_dim issues * make style + removed enc_dec test * update falx model * removed troublesome copy * added is_encoder_decoder=False to config * added set_input emb fuinction to model class * requires torch on embed test * use head mask instead of decoder head mask input param solves a test * 8 test remaining, update * Updated create_and_check_decoder_model_past_large_inputs * Make style * update op tokenizer with condition * make style * See if I can push * some clean up * remove linear head hack * save intermediate * save correct attention * add copied from from bart * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * fix part of the reviewss Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * same changes in naming / conversion * correct mask * more fixes * delete FlaxOPT and TfOPT * clean traces of Flax and Tf * fix mask * fixed positionnal embedding length when past key value is provoded * get 125m, 6.7b to work * Added do_layer_norm * solved mismatch in load dictionnary * clean up preapre opt input dict * fixed past key value as bool * fix previus * fixed return dict False tuple issue * All tests are passing * Make style * Ignore OPTDecoder non tested * make fix-copies * make repo consistency * small fix * removed uselss @torch.no_grad decorator * make styl;e * fix previous opt test * style * make style * added opt documentation * update OPT_PRETRAINED_MODEL_ARCHIVE_LIST * up * more fixes * model & config work * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * added comment on padding hack (+2) * cleaup * review update * docstring for missing arg * Update docs/source/en/model_doc/opt.mdx Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update docs/source/en/model_doc/opt.mdx Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update docs/source/en/model_doc/opt.mdx Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/models/opt/__init__.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * update pretrained map * update path and tests * make style * styling * make consistency * add gpt2 tok new * more tok fixes * Update src/transformers/models/auto/tokenization_auto.py * Update docs/source/en/model_doc/opt.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update docs/source/en/model_doc/opt.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update docs/source/en/model_doc/opt.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update tests/models/opt/test_modeling_opt.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/opt/modeling_opt.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update based on reviews * Apply suggestions from code review Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * make style * make tokenizer auto tests pass * apply Lysandre suggestion * finish tests * add some good tokenizer tests * improve docs slighly Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> Co-authored-by: ArthurZucker <arthur.zucker@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre@huggingface.co>
_set_gradient_checkpointing
b971c769e80fe85fb7dd35c7cf65f3ac97ea6421
transformers
modeling_opt.py
9
3
https://github.com/huggingface/transformers.git
2
26
0
13
53
Python
{ "docstring": "\n Generation example:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModelForCausalLM\n\n >>> model = OPTForCausalLM.from_pretrained(\"ArthurZ/opt-350m\")\n >>> tokenizer = GPT2Tokenizer.from_pretrained(\"patrickvonplaten/opt_gpt2_tokenizer\")\n\n >>> TEXTS_TO_GENERATE = \"Hey, are you consciours? Can you talk to me?\" \"Hi there, my name is Barack\"\n >>> inputs = tokenizer([TEXTS_TO_GENERATE], max_length=1024, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs[\"input_ids\"], num_beams=2, min_length=0, max_length=20)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n 'I'm not conscious.<\\s>'\n ```\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_inputs`] and modify\n to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the\n default strategy.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 979, "n_words": 470, "vocab_size": 244 }
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (OPTDecoder)): module.gradient_checkpointing = value OPT_GENERATION_EXAMPLE = r OPT_INPUTS_DOCSTRING = r
120,959
337,105
78
examples/text_to_image/train_text_to_image.py
29
7
def to(self, device=None, dtype=None) -> None: r # .to() on the ten
[train_text2image] Fix EMA and make it compatible with deepspeed. (#813) * fix ema * style * add comment about copy * style * quality
to
008b608f1551dbcf521284ed0e7a6722cd02ef07
diffusers
train_text_to_image.py
11
10
https://github.com/huggingface/diffusers.git
3
56
0
28
85
Python
{ "docstring": "Move internal buffers of the ExponentialMovingAverage to `device`.\n\n Args:\n device: like `device` argument to `torch.Tensor.to`\n ", "language": "en", "n_whitespaces": 40, "n_words": 15, "vocab_size": 14 }
def to(self, device=None, dtype=None) -> None: r # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ]
85,240
285,200
28
openbb_terminal/econometrics/econometrics_model.py
16
11
def get_granger_causality(dependent_series, independent_series, lags): granger_set = pd.concat([dependent_series, independent_series], axis=1) granger = grangercausalitytests(granger_set, [lags], verbose=False) return granger
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt>
get_granger_causality
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
OpenBBTerminal
econometrics_model.py
9
4
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
42
0
14
63
Python
{ "docstring": "Calculate granger tests\n\n Parameters\n ----------\n dependent_series: Series\n The series you want to test Granger Causality for.\n independent_series: Series\n The series that you want to test whether it Granger-causes time_series_y\n lags : int\n The amount of lags for the Granger test. By default, this is set to 3.\n ", "language": "en", "n_whitespaces": 86, "n_words": 47, "vocab_size": 36 }
def get_granger_causality(dependent_series, independent_series, lags): granger_set = pd.concat([dependent_series, independent_series], axis=1) granger = grangercausalitytests(granger_set, [lags], verbose=False) return granger
51,592
206,624
91
django/utils/decorators.py
47
4
def _multi_decorate(decorators, method): if hasattr(decorators, "__iter__")
Refs #33476 -- Reformatted code with Black.
_multi_decorate
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
decorators.py
11
10
https://github.com/django/django.git
3
52
0
35
58
Python
{ "docstring": "\n Decorate `method` with one or more function decorators. `decorators` can be\n a single decorator or an iterable of decorators.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 17 }
def _multi_decorate(decorators, method): if hasattr(decorators, "__iter__"): # Apply a list/tuple of decorators if 'decorators' is one. Decorator # functions are applied so that the call order is the same as the # order in which they appear in the iterable. decorators = decorators[::-1] else: decorators = [decorators]
4,536
23,192
716
ppocr/data/imaug/fce_targets.py
191
49
def generate_level_targets(self, img_size, text_polys, ignore_polys): h, w = img_size lv_size_divs = self.level_size_divisors lv_proportion_range = self.level_proportion_range lv_text_polys = [[] for i in range(len(lv_size_divs))] lv_ignore_polys = [[] for i in
add fcenet
generate_level_targets
9f62b610dea6161627200ed85d92e19b1923279a
PaddleOCR
fce_targets.py
15
39
https://github.com/PaddlePaddle/PaddleOCR.git
10
384
0
96
586
Python
{ "docstring": "Generate ground truth target on each level.\n\n Args:\n img_size (list[int]): Shape of input image.\n text_polys (list[list[ndarray]]): A list of ground truth polygons.\n ignore_polys (list[list[ndarray]]): A list of ignored polygons.\n Returns:\n level_maps (list(ndarray)): A list of ground target on each level.\n ", "language": "en", "n_whitespaces": 105, "n_words": 40, "vocab_size": 24 }
def generate_level_targets(self, img_size, text_polys, ignore_polys): h, w = img_size lv_size_divs = self.level_size_divisors lv_proportion_range = self.level_proportion_range lv_text_polys = [[] for i in range(len(lv_size_divs))] lv_ignore_polys = [[] for i in range(len(lv_size_divs))] level_maps = [] for poly in text_polys: # assert len(poly) == 1 # text_instance = [[poly[i], poly[i + 1]] # for i in range(0, len(poly), 2)] polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_text_polys[ind].append(poly / lv_size_divs[ind]) for ignore_poly in ignore_polys: # assert len(ignore_poly) == 1 # text_instance = [[ignore_poly[i], ignore_poly[i + 1]] # for i in range(0, len(ignore_poly), 2)] polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind]) for ind, size_divisor in enumerate(lv_size_divs): current_level_maps = [] level_img_size = (h // size_divisor, w // size_divisor) text_region = self.generate_text_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(text_region) center_region = self.generate_center_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(center_region) effective_mask = self.generate_effective_mask( level_img_size, lv_ignore_polys[ind])[None] current_level_maps.append(effective_mask) fourier_real_map, fourier_image_maps = self.generate_fourier_maps( level_img_size, lv_text_polys[ind]) current_level_maps.append(fourier_real_map) current_level_maps.append(fourier_image_maps) level_maps.append(np.concatenate(current_level_maps)) return level_maps
14,684
67,961
12
erpnext/stock/stock_balance.py
18
8
def get_reserved_qty(item_code, warehouse): reserved_qty = frappe.db.sql( , (item_code, warehouse, item_code, warehouse), ) return flt(
style: format code with black
get_reserved_qty
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
stock_balance.py
10
46
https://github.com/frappe/erpnext.git
2
43
0
17
62
Python
{ "docstring": "\n\t\tselect\n\t\t\tsum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty))\n\t\tfrom\n\t\t\t(\n\t\t\t\t(select\n\t\t\t\t\tqty as dnpi_qty,\n\t\t\t\t\t(\n\t\t\t\t\t\tselect qty from `tabSales Order Item`\n\t\t\t\t\t\twhere name = dnpi.parent_detail_docname\n\t\t\t\t\t\tand (delivered_by_supplier is null or delivered_by_supplier = 0)\n\t\t\t\t\t) as so_item_qty,\n\t\t\t\t\t(\n\t\t\t\t\t\tselect delivered_qty from `tabSales Order Item`\n\t\t\t\t\t\twhere name = dnpi.parent_detail_docname\n\t\t\t\t\t\tand delivered_by_supplier = 0\n\t\t\t\t\t) as so_item_delivered_qty,\n\t\t\t\t\tparent, name\n\t\t\t\tfrom\n\t\t\t\t(\n\t\t\t\t\tselect qty, parent_detail_docname, parent, name\n\t\t\t\t\tfrom `tabPacked Item` dnpi_in\n\t\t\t\t\twhere item_code = %s and warehouse = %s\n\t\t\t\t\tand parenttype=\"Sales Order\"\n\t\t\t\t\tand item_code != parent_item\n\t\t\t\t\tand exists (select * from `tabSales Order` so\n\t\t\t\t\twhere name = dnpi_in.parent and docstatus = 1 and status != 'Closed')\n\t\t\t\t) dnpi)\n\t\t\tunion\n\t\t\t\t(select stock_qty as dnpi_qty, qty as so_item_qty,\n\t\t\t\t\tdelivered_qty as so_item_delivered_qty, parent, name\n\t\t\t\tfrom `tabSales Order Item` so_item\n\t\t\t\twhere item_code = %s and warehouse = %s\n\t\t\t\tand (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0)\n\t\t\t\tand exists(select * from `tabSales Order` so\n\t\t\t\t\twhere so.name = so_item.parent and so.docstatus = 1\n\t\t\t\t\tand so.status != 'Closed'))\n\t\t\t) tab\n\t\twhere\n\t\t\tso_item_qty >= so_item_delivered_qty\n\t", "language": "en", "n_whitespaces": 124, "n_words": 163, "vocab_size": 69 }
def get_reserved_qty(item_code, warehouse): reserved_qty = frappe.db.sql( , (item_code, warehouse, item_code, warehouse), ) return flt(reserved_qty[0][0]) if reserved_qty else 0
23,229
108,518
21
lib/matplotlib/pyplot.py
15
2
def cool(): set_cmap('cool
Cleanup documentation generation for pyplot - remove the awkward `pyplot.plotting()` function, which only served as a namespace to take up the docs for pyplot and output them via `.. autofunction` - Instead generate the same information using `.. autosummary::`. We have to list the desired methods here explicitly. I've added a test that these are the same as previously auto-generated in the `plotting()` docstring. If we change anything in pyplot, we'll be notified through the test failure that we have to adapt the autosummary list. - Removed the docstring generation logic `_setup_pyplot_info_docstrings()`. Apart from generating the `plotting()` docstring, this added docstrings to the pyplot colormap setters. Instead, we now add these docstrings directly via boilerplate.py Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com>
cool
032316bc6c7798fca6c82de24167c975f237687f
matplotlib
pyplot.py
8
2
https://github.com/matplotlib/matplotlib.git
1
9
0
15
22
Python
{ "docstring": "\n Set the colormap to 'cool'.\n\n This changes the default colormap as well as the colormap of the current\n image if there is one. See ``help(colormaps)`` for more information.\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 22 }
def cool(): set_cmap('cool') # Autogenerated by boilerplate.py. Do not edit as changes will be lost.
52,627
209,158
32
scapy/packet.py
11
3
def add_parent(self, parent):
Add parent field to Packet (#3607) Co-authored-by: Sergey Matsievskiy <matsievskiy@fastwel.ru>
add_parent
6d7184e8bec5102dfa66bcc10432a30a7e0dcf3a
scapy
packet.py
7
2
https://github.com/secdev/scapy.git
1
13
0
11
24
Python
{ "docstring": "Set packet parent.\n When packet is an element in PacketListField, parent field would\n point to the list owner packet.", "language": "en", "n_whitespaces": 32, "n_words": 19, "vocab_size": 18 }
def add_parent(self, parent): # type: (Packet) -> None self.parent = parent
52,377
208,534
35
IPython/testing/tools.py
9
7
def make_tempfile(name): open(name, 'w', encoding='utf-8
Fix EncodingWarning on Python 3.10
make_tempfile
23276ac4770f380ce1d5808950dd412a35594af1
ipython
tools.py
11
6
https://github.com/ipython/ipython.git
2
31
0
9
59
Python
{ "docstring": " Create an empty, named, temporary file for the duration of the context.\n ", "language": "en", "n_whitespaces": 16, "n_words": 12, "vocab_size": 11 }
def make_tempfile(name): open(name, 'w', encoding='utf-8').close() try: yield finally: os.unlink(name)
3,761
21,319
342
pipenv/patched/notpip/_vendor/cachecontrol/controller.py
120
21
def update_cached_response(self, request, response): cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = ["content-length"] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status =
Vendor in pip 22.1.2
update_cached_response
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
controller.py
13
16
https://github.com/pypa/pipenv.git
4
103
0
79
172
Python
{ "docstring": "On a 304 we will get a new set of headers that we want to\n update our cached value with, assuming we have one.\n\n This should only ever be called when we've sent an ETag and\n gotten a 304 as the response.\n ", "language": "en", "n_whitespaces": 70, "n_words": 42, "vocab_size": 37 }
def update_cached_response(self, request, response): cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = ["content-length"] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self._cache_set(cache_url, request, cached_response) return cached_response
1,541
9,099
32
parsing/dml_csr/loss/lovasz_softmax.py
21
9
def binary_xloss(logits, labels, ignore=None): l
Create lovasz_softmax.py
binary_xloss
db307ffb12d6ba1f8eaeeafd29ee6d4a3fd6fa97
insightface
lovasz_softmax.py
12
4
https://github.com/deepinsight/insightface.git
1
43
0
17
69
Python
{ "docstring": "\n Binary Cross entropy loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n ignore: void class id\n ", "language": "en", "n_whitespaces": 55, "n_words": 33, "vocab_size": 30 }
def binary_xloss(logits, labels, ignore=None): logits, labels = flatten_binary_scores(logits, labels, ignore) loss = StableBCELoss()(logits, Variable(labels.float())) return loss # --------------------------- MULTICLASS LOSSES ---------------------------
84,464
283,198
137
build/pyinstaller/user_agent/base.py
38
7
def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): config = generate_navigator( os=os, navigator=navigator, platform=platform, device_type=device_type ) return { "appCodeName": config["app_code_name"], "appName": config["app_name"], "appVersion": config["
Create a packaged app bundle with Pyinstaller (#1525) * Add dashboard widget assets * Add ipywidgets and ipyflex to project * Add currencies dashboard notebook * Update docs and docstrings * Add pyinstaller to project deps * Add pyinstaller artifacts to gitignore * Fix linter errors in terminal.py * Update cspell hook and action with a pyinstaller specific word * Add pyinstaller specfile and artifacts * Add splashscreen image * Add app icon * adding splash screen support to terminal.spec and terminal.py * Restore the conda env build files * Sync deps * Add border to the splashscreen image * Clean up terminal launcher * Add support for default feature flags in packages apps * Fix types and linting * Add splashscreen management to app bootup * Check prediction feature flag when entering crypto/pred * Update pyinstaller spec file * fix .spec file to work for splash and icon - removed the ".." * Allows to export when using installer (#1568) * fix export for packaged apps * fix filename * Git : replace commit_hash when it is set in config_terminal * Add update of the git commit hash in gtff default during build * Add packaged app name and feature flag to logs * Add platform specific icon assignment * Add macOS build assets * Add tensorflow to hidden imports * Move LOGGING_COMMIT_HASH to gtff * Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again. * Linting * Workflow : ignore ./build/pyinstaller from codespell * Workflow : exclude ./build/pyinstaller from flake8 * Poetry + Workflow : add types-six * Pyinstaller : remove property_cached, user_agent and vaderSentiment * Revert "Pyinstaller : remove property_cached, user_agent and vaderSentiment" This reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703. * Clean up local paths in specfile * Validate deps have correct Jinja version (they do) * Fix logging commit hash to be set correctly for the logger to see it Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
generate_navigator_js
ab4de1dd70fba866930150e440a03e461a6ca6a8
OpenBBTerminal
base.py
9
17
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
120
0
38
207
Python
{ "docstring": "\n Generates web navigator's config with keys corresponding\n to keys of `windows.navigator` JavaScript object.\n\n :param os: limit list of oses for generation\n :type os: string or list/tuple or None\n :param navigator: limit list of browser engines for generation\n :type navigator: string or list/tuple or None\n :param device_type: limit possible oses by device type\n :type device_type: list/tuple or None, possible values:\n \"desktop\", \"smartphone\", \"tablet\", \"all\"\n :return: User-Agent config\n :rtype: dict with keys (TODO)\n :raises InvalidOption: if could not generate user-agent for\n any combination of allowed oses and navigators\n :raise InvalidOption: if any of passed options is invalid\n ", "language": "en", "n_whitespaces": 149, "n_words": 95, "vocab_size": 60 }
def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): config = generate_navigator( os=os, navigator=navigator, platform=platform, device_type=device_type ) return { "appCodeName": config["app_code_name"], "appName": config["app_name"], "appVersion": config["app_version"], "platform": config["platform"], "userAgent": config["user_agent"], "oscpu": config["oscpu"], "product": config["product"], "productSub": config["product_sub"], "vendor": config["vendor"], "vendorSub": config["vendor_sub"], "buildID": config["build_id"], }
9,893
49,719
212
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/bert_tokenizer.py
79
13
def printable_text(text): # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string.
add disco_diffusion_cnclip_vitb16 module
printable_text
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
PaddleHub
bert_tokenizer.py
17
17
https://github.com/PaddlePaddle/PaddleHub.git
7
103
0
50
179
Python
{ "docstring": "Returns text encoded in a way suitable for print or `tf.logging`.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def printable_text(text): # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?")
34,797
150,599
95
freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py
31
10
def is_tradesignal(self, action): # trade signal return not ((action == Actions.Neutral.value and self._position == Positions.Neutral)
callback function and TDQN model added
is_tradesignal
01232e9a1f8e28e3611e38af3816edb026600767
freqtrade
RLPrediction_env_v2.py
14
4
https://github.com/freqtrade/freqtrade.git
6
65
0
20
102
Python
{ "docstring": "\n not trade signal is :\n Action: Neutral, position: Neutral -> Nothing \n Action: Long, position: Long -> Hold Long\n Action: Short, position: Short -> Hold Short\n ", "language": "en", "n_whitespaces": 62, "n_words": 25, "vocab_size": 16 }
def is_tradesignal(self, action): # trade signal return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long))
54,986
217,880
101
python3.10.4/Lib/http/server.py
13
9
def log_message(self, format, *args): sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(),
add python 3.10.4 for windows
log_message
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
server.py
11
5
https://github.com/XX-net/XX-Net.git
1
37
0
12
62
Python
{ "docstring": "Log an arbitrary message.\n\n This is used by all other logging functions. Override\n it if you have specific logging wishes.\n\n The first argument, FORMAT, is a format string for the\n message to be logged. If the format string contains\n any % escapes requiring parameters, they should be\n specified as subsequent arguments (it's just like\n printf!).\n\n The client ip and current date/time are prefixed to\n every message.\n\n ", "language": "en", "n_whitespaces": 138, "n_words": 66, "vocab_size": 57 }
def log_message(self, format, *args): sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args))
120,698
335,005
34
src/diffusers/utils/logging.py
16
10
def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice
changes comments and env vars in `utils/logging` removes mentions of 🤗Transformers with 🤗Diffusers equivalent.
warning_advice
c3cc8eb23c8095217388d350409b454ea396c12b
diffusers
logging.py
9
5
https://github.com/huggingface/diffusers.git
2
36
0
15
72
Python
{ "docstring": "\n This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this\n warning will not be printed\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 18 }
def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice
47,895
196,395
41
sympy/matrices/repmatrix.py
16
8
def zip_row_op(self, i, k, f): for j in range(self.cols): self[i, j] = f
Moved imports to higher level
zip_row_op
59d22b6bb7287613d598611027f640d068ca5748
sympy
repmatrix.py
11
3
https://github.com/sympy/sympy.git
2
45
0
16
64
Python
{ "docstring": "In-place operation on row ``i`` using two-arg functor whose args are\n interpreted as ``(self[i, j], self[k, j])``.\n\n Examples\n ========\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M\n Matrix([\n [1, 0, 0],\n [2, 1, 0],\n [0, 0, 1]])\n\n See Also\n ========\n row\n row_op\n col_op\n\n ", "language": "en", "n_whitespaces": 166, "n_words": 54, "vocab_size": 46 }
def zip_row_op(self, i, k, f): for j in range(self.cols): self[i, j] = f(self[i, j], self[k, j])
49,356
199,700
18
sympy/polys/orthopolys.py
13
7
def legendre_poly(n, x=None, polys=False): r return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys)
Run orthopolys and appellseqs through a common interface Including unifying the two Chebyshev generators into one function. There are also two kinds of Hermite polynomials, and they too share the same recurrence, but the second type He_n(x) (aka the probabilist, reduced or small polynomials) will not be added here.
legendre_poly
d1d46df73ebaad94089847558d00a8b7269f554d
sympy
orthopolys.py
8
13
https://github.com/sympy/sympy.git
1
33
0
13
47
Python
{ "docstring": "Generates the Legendre polynomial `P_n(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 63, "n_words": 31, "vocab_size": 26 }
def legendre_poly(n, x=None, polys=False): r return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys)
118,084
322,189
495
paddlenlp/taskflow/knowledge_mining.py
91
19
def _concat_short_text_reuslts(self, input_texts, results): long_text_lens = [len(text) for text in input_texts] concat_results = [] single_results = {} count = 0 for text in input_texts: text_len = len(text) while True: if len(single_results) == 0 or len(single_results[ "text"]) < text_len: if len(single_results) == 0: single_results = copy.deepcopy(results[count]) else: single_results["text"] += results[count]["text"] single_results["items"].extend(results[count]["items"]) count += 1 elif len(single_results["text"]) == text_len: concat_results.append(single_results) single_results = {} break else: raise Exception( "The length of input text and raw text is not equal.")
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com> Co-authored-by: tianxin <tianxin04@baidu.com>
_concat_short_text_reuslts
621357338437ee420eabbbf5ab19065bc85e73a5
PaddleNLP
knowledge_mining.py
18
28
https://github.com/PaddlePaddle/PaddleNLP.git
9
172
0
59
289
Python
{ "docstring": "\n Concat the model output of short texts to the total result of long text.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 12 }
def _concat_short_text_reuslts(self, input_texts, results): long_text_lens = [len(text) for text in input_texts] concat_results = [] single_results = {} count = 0 for text in input_texts: text_len = len(text) while True: if len(single_results) == 0 or len(single_results[ "text"]) < text_len: if len(single_results) == 0: single_results = copy.deepcopy(results[count]) else: single_results["text"] += results[count]["text"] single_results["items"].extend(results[count]["items"]) count += 1 elif len(single_results["text"]) == text_len: concat_results.append(single_results) single_results = {} break else: raise Exception( "The length of input text and raw text is not equal.") for result in concat_results: pred_words = result['items'] pred_words = self._reset_offset(pred_words) result['items'] = pred_words return concat_results
53,046
211,216
541
ppdet/modeling/heads/s2anet_head.py
191
43
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): origin_shape = paddle.floor(im_shape / scale_factor + 0.5) origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [b
refactor s2anet (#6604) * refactor s2anet to support batch_size > 1 * fix problem of inference * support batch_size > 1 for training * fix empty results * fix dota eval * fix configs of s2anet_head * modify s2anet_spine_1x to 73 mAP
get_pred
b4727677751081b257c6fa23c3c124ab9e5a32a1
PaddleDetection
s2anet_head.py
13
36
https://github.com/PaddlePaddle/PaddleDetection.git
2
466
0
103
682
Python
{ "docstring": "\n Rescale, clip and filter the bbox from the output of NMS to\n get final prediction.\n Args:\n bboxes(Tensor): bboxes [N, 10]\n bbox_num(Tensor): bbox_num\n im_shape(Tensor): [1 2]\n scale_factor(Tensor): [1 2]\n Returns:\n bbox_pred(Tensor): The output is the prediction with shape [N, 8]\n including labels, scores and bboxes. The size of\n bboxes are corresponding to the original image.\n ", "language": "en", "n_whitespaces": 205, "n_words": 54, "vocab_size": 42 }
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): origin_shape = paddle.floor(im_shape / scale_factor + 0.5) origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([ scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x, scale_y ]) expand_scale = paddle.expand(scale, [bbox_num[i], 8]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) # bboxes: [N, 10], label, score, bbox pred_label_score = bboxes[:, 0:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image pred_bbox = pred_bbox.reshape([-1, 8]) scaled_bbox = pred_bbox / scale_factor_list origin_h = origin_shape_list[:, 0] origin_w = origin_shape_list[:, 1] bboxes = scaled_bbox zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros) y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros) x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros) y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros) x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros) y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros) x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros) y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1) pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1) return pred_result
13,733
64,834
46
erpnext/accounts/doctype/chart_of_accounts_importer/chart_of_accounts_importer.py
65
13
def unset_existing_data(company): linked = frappe.db.sql( , as_dict=True, ) # remove accounts data from company update_values = {d.fieldname: "" for d in linked} frappe.db.set_value("Company", company, update_values, update_values) # remove accounts data from various doctypes for doctype in [ "Account", "Party Account", "Mode of Payment Account", "Tax Withholding Account", "Sales Taxes and Charges Template", "Purchase Taxes and Charges Template", ]: frappe.db.sql(
style: format code with black
unset_existing_data
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
chart_of_accounts_importer.py
13
19
https://github.com/frappe/erpnext.git
3
82
0
48
140
Python
{ "docstring": "select fieldname from tabDocField\n\t\twhere fieldtype=\"Link\" and options=\"Account\" and parent=\"Company\"delete from `tab{0}` where `company`=\"%s\"", "language": "en", "n_whitespaces": 12, "n_words": 14, "vocab_size": 11 }
def unset_existing_data(company): linked = frappe.db.sql( , as_dict=True, ) # remove accounts data from company update_values = {d.fieldname: "" for d in linked} frappe.db.set_value("Company", company, update_values, update_values) # remove accounts data from various doctypes for doctype in [ "Account", "Party Account", "Mode of Payment Account", "Tax Withholding Account", "Sales Taxes and Charges Template", "Purchase Taxes and Charges Template", ]: frappe.db.sql( .format(doctype) % (company) # nosec )
70,390
244,465
55
mmdet/models/dense_heads/base_dense_head.py
16
9
def simple_test(self, feats, batch_img_metas, rescale=False): outs = self.forward(feats) results_list = self.get_results( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return results_list
Modify RetinaNet model interface
simple_test
924c381a78eb70cede198e042ef34e038e05c15a
mmdetection
base_dense_head.py
9
5
https://github.com/open-mmlab/mmdetection.git
1
41
0
14
63
Python
{ "docstring": "Test function without test-time augmentation.\n\n Args:\n feats (tuple[torch.Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n batch_img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[obj:`InstanceData`]: Detection results of each image\n after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n ", "language": "en", "n_whitespaces": 280, "n_words": 91, "vocab_size": 71 }
def simple_test(self, feats, batch_img_metas, rescale=False): outs = self.forward(feats) results_list = self.get_results( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return results_list
5,678
31,086
167
src/transformers/commands/pt_to_tf.py
59
13
def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input): pt_outputs = pt_model(**pt_input, output_hidden_states=True) tf_outputs = tf_model(**tf_input, output_hidden_states=True) # 1. All output attributes must be the same pt_out_attrs = set(pt_outputs.keys())
CLI: add stricter automatic checks to `pt-to-tf` (#17588) * Stricter pt-to-tf checks; Update docker image for related tests * check all attributes in the output Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
compare_pt_tf_models
78c695eb624bc863ea165b6fb0a8850bfd9fcefa
transformers
pt_to_tf.py
12
12
https://github.com/huggingface/transformers.git
2
76
0
48
119
Python
{ "docstring": "\n Compares the TensorFlow and PyTorch models, given their inputs, returning a tuple with the maximum observed\n difference and its source.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input): pt_outputs = pt_model(**pt_input, output_hidden_states=True) tf_outputs = tf_model(**tf_input, output_hidden_states=True) # 1. All output attributes must be the same pt_out_attrs = set(pt_outputs.keys()) tf_out_attrs = set(tf_outputs.keys()) if pt_out_attrs != tf_out_attrs: raise ValueError( f"The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:" f" {tf_out_attrs})" ) # 2. For each output attribute, ALL values must be the same
@pytest.fixture( params=_get_all_parser_float_precision_combinations()["params"], ids=_get_all_parser_float_precision_combinations()["ids"], )
39,627
165,035
174
pandas/tests/io/parser/conftest.py
64
20
def _get_all_parser_float_precision_combinations(): params = [] ids = [] for parser, parser_id in zip(_all_parsers, _all_parser_ids): if hasattr(parser, "values"): # Wrapped in pytest.param, get the actual parser back parser = parser.values[0] for precision in parser.float_precision_choices: # Re-wrap in pytest.param for pyarrow mark = pytest.mark.single_cpu if parser.engine == "pyarrow" els
CI: Add single_cpu build (#45995)
_get_all_parser_float_precision_combinations
08104e8c0a80579dfe3e984b32b35ddc94aafa01
pandas
conftest.py
15
12
https://github.com/pandas-dev/pandas.git
5
105
1
51
223
Python
{ "docstring": "\n Return all allowable parser and float precision\n combinations and corresponding ids.\n ", "language": "en", "n_whitespaces": 21, "n_words": 11, "vocab_size": 10 }
def _get_all_parser_float_precision_combinations(): params = [] ids = [] for parser, parser_id in zip(_all_parsers, _all_parser_ids): if hasattr(parser, "values"): # Wrapped in pytest.param, get the actual parser back parser = parser.values[0] for precision in parser.float_precision_choices: # Re-wrap in pytest.param for pyarrow mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else () param = pytest.param((parser(), precision), marks=mark) params.append(param) ids.append(f"{parser_id}-{precision}") return {"params": params, "ids": ids} @pytest.fixture( params=_get_all_parser_float_precision_combinations()["params"], ids=_get_all_parser_float_precision_combinations()["ids"], )
49,846
201,071
86
tests/app_loading/tests.py
17
12
def test_egg3(self): egg_name = "%s/omelet.egg" % self.egg_dir with extend_sys_path(egg_name):
Refs #33476 -- Reformatted code with Black.
test_egg3
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
14
7
https://github.com/django/django.git
1
54
0
15
101
Python
{ "docstring": "Models module can be loaded from an app located under an egg's top-level package", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
def test_egg3(self): egg_name = "%s/omelet.egg" % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=["omelet.app_with_models"]): models_module = apps.get_app_config("app_with_models").models_module self.assertIsNotNone(models_module) del apps.all_models["app_with_models"]
@pytest.mark.parametrize("setting", ["Enabled", "Disabled"])
54,437
216,155
29
tests/pytests/functional/modules/win_lgpo/test_audit_settings_module.py
18
11
def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing): lgpo.set_computer_policy(setting_name, setting) result = lgpo.get_policy(setting_name, "machine") assert result == setting @pytest.mark.parametrize("setting", ["Enabled",
Add and update tests
test_auditing_case_names
0e69e2317dfa06f114c6dd63bc70c68fc81d19b1
salt
test_audit_settings_module.py
9
4
https://github.com/saltstack/salt.git
1
34
1
17
82
Python
{ "docstring": "\n Helper function to set an audit setting and assert that it was successful\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 13 }
def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing): lgpo.set_computer_policy(setting_name, setting) result = lgpo.get_policy(setting_name, "machine") assert result == setting @pytest.mark.parametrize("setting", ["Enabled", "Disabled"])
16,397
75,348
138
wagtail/images/tests/tests.py
31
15
def test_get(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve", args=(signature, self.image.id, "fill-800x600") ) ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.asse
Reformat with black
test_get
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
tests.py
14
10
https://github.com/wagtail/wagtail.git
1
74
0
24
127
Python
{ "docstring": "\n Test a valid GET request to the view\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def test_get(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve", args=(signature, self.image.id, "fill-800x600") ) ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.assertEqual(response["Content-Type"], "image/png")
341
2,705
149
packages/syft/src/syft/lib/python/slice.py
34
15
def _object2proto(self) -> Slice_PB: slice_pb
change[syft.lib.python] syft import absolute -> relative
_object2proto
e5bfcd8016813b1d253a72da5c5071b0e0965644
PySyft
slice.py
11
19
https://github.com/OpenMined/PySyft.git
4
81
0
23
133
Python
{ "docstring": "\n Serialize the Slice object instance returning a protobuf.\n\n Returns:\n Slice_PB: returns a protobuf object class representing this Slice object.\n ", "language": "en", "n_whitespaces": 53, "n_words": 19, "vocab_size": 16 }
def _object2proto(self) -> Slice_PB: slice_pb = Slice_PB() if self.start: slice_pb.start = self.start slice_pb.has_start = True if self.stop: slice_pb.stop = self.stop slice_pb.has_stop = True if self.step: slice_pb.step = self.step slice_pb.has_step = True slice_pb.id.CopyFrom(serialize(obj=self._id)) return slice_pb
21,008
101,599
158
plugins/extract/recognition/vgg_face2_keras.py
36
13
def __call__(self) -> List[Tuple[int, int]]: logger.info("Sorting face distances. Depending on your dataset this may take some time...") if self._threshold: self._threshold = self._result_linkage[:, 2].max() * self._threshold result_order = self._seriation(self._result_linkage, self._num_pre
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
__call__
98d01760e469fd2108eed8d0b0a1ba6297c3177c
faceswap
vgg_face2_keras.py
13
19
https://github.com/deepfakes/faceswap.git
2
73
0
32
115
Python
{ "docstring": " Process the linkages.\n\n Transforms a distance matrix into a sorted distance matrix according to the order implied\n by the hierarchical tree (dendrogram).\n\n Returns\n -------\n list:\n List of indices with the order implied by the hierarchical tree or list of tuples of\n (`index`, `bin`) if a binning threshold was provided\n ", "language": "en", "n_whitespaces": 114, "n_words": 49, "vocab_size": 34 }
def __call__(self) -> List[Tuple[int, int]]: logger.info("Sorting face distances. Depending on your dataset this may take some time...") if self._threshold: self._threshold = self._result_linkage[:, 2].max() * self._threshold result_order = self._seriation(self._result_linkage, self._num_predictions, self._num_predictions + self._num_predictions - 2) return result_order
53,084
211,402
1,144
ppdet/modeling/post_process.py
292
58
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): if self.export_eb: # enable rcnn models for edgeboard hw to skip the following postprocess. return bboxes, bboxes, bbox_num if not self.export_onnx: bboxes_list = [] bbox_num_list = [] id_start = 0 fake_bboxes = paddle.to_tensor( np.array( [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32')) fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32')) # add fake bbox when output is empty for each batch for i in range(bbox_num.shape[0]): if bbox_num[i] == 0: bboxes_i = fake_bboxes bbox_num_i = fake_bbox_num else: bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] bbox_num_i = bbox_num[i] id_start += bbox_num[i] bboxes_list.append(bboxes_i) bbox_num_list.append(bbox_num_i) bboxes = paddle.concat(bboxes_list) bbox_num = paddle.concat(bbox_num_list) origin_shape = paddle.floor(im_shape / scale_factor + 0.5) if not self.export_onnx: origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) else: # simplify the computation for bs=1 when exporting onnx scale_y, scale_x = scale_factor[0][0], scale_factor[0][1] scale = paddle.concat( [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0) self.origin_shape_list = paddle.expand(origin_shape, [bbox_num[0], 2]) scale_factor_list = paddle.expand(scale, [bbox_num[0], 4]) # bboxes: [N, 6], label, score, bbox pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image scaled_bbox = pred_bbox / scale_factor_list origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) # clip bbox to [0, original_size] x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1) # filter empty bbox keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) retur
add flag skipping postprocess to support edgeboard hardware (#6719) * add flag skipping postprocess to support edgeboard hardware * add flag skipping postprocess to support edgeboard hardware * add flag skipping postprocess to support edgeboard hardware * add comment for the flag export_eb
get_pred
b41194eaed10a01409451e4d3ea7f8b4812cdd23
PaddleDetection
post_process.py
17
62
https://github.com/PaddlePaddle/PaddleDetection.git
7
651
0
171
961
Python
{ "docstring": "\n Rescale, clip and filter the bbox from the output of NMS to \n get final prediction. \n\n Notes:\n Currently only support bs = 1.\n\n Args:\n bboxes (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n pred_result (Tensor): The final prediction results with shape [N, 6]\n including labels, scores and bboxes.\n ", "language": "en", "n_whitespaces": 242, "n_words": 90, "vocab_size": 54 }
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): if self.export_eb: # enable rcnn models for edgeboard hw to skip the following postprocess. return bboxes, bboxes, bbox_num if not self.export_onnx: bboxes_list = [] bbox_num_list = [] id_start = 0 fake_bboxes = paddle.to_tensor( np.array( [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32')) fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32')) # add fake bbox when output is empty for each batch for i in range(bbox_num.shape[0]): if bbox_num[i] == 0: bboxes_i = fake_bboxes bbox_num_i = fake_bbox_num else: bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] bbox_num_i = bbox_num[i] id_start += bbox_num[i] bboxes_list.append(bboxes_i) bbox_num_list.append(bbox_num_i) bboxes = paddle.concat(bboxes_list) bbox_num = paddle.concat(bbox_num_list) origin_shape = paddle.floor(im_shape / scale_factor + 0.5) if not self.export_onnx: origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) else: # simplify the computation for bs=1 when exporting onnx scale_y, scale_x = scale_factor[0][0], scale_factor[0][1] scale = paddle.concat( [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0) self.origin_shape_list = paddle.expand(origin_shape, [bbox_num[0], 2]) scale_factor_list = paddle.expand(scale, [bbox_num[0], 4]) # bboxes: [N, 6], label, score, bbox pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image scaled_bbox = pred_bbox / scale_factor_list origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) # clip bbox to [0, original_size] x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1) # filter empty bbox keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return bboxes, pred_result, bbox_num
99,894
301,046
18
homeassistant/components/nexia/entity.py
4
7
def _signal_zone_update(self): async_dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPD
Update nexia to use asyncio (#72108)
_signal_zone_update
d8a580a90f8bf3206b31619493f4e653fceb3f4b
core
entity.py
11
2
https://github.com/home-assistant/core.git
1
15
0
4
41
Python
{ "docstring": "Signal a zone update.\n\n Whenever the underlying library does an action against\n a zone, the data for the zone is updated.\n\n Update a single zone.\n ", "language": "en", "n_whitespaces": 53, "n_words": 25, "vocab_size": 20 }
def _signal_zone_update(self): async_dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}")
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
75,518
259,003
287
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
133
39
def test_asymmetric_error(quantile): n_samples = 10_000 rng = np.random.RandomState(42) # take care that X @ coef + intercept > 0 X = np.concatenate( ( np.abs(rng.randn(n_samples)[:, None]), -rng.randint(2, size=(n_samples, 1)), ), a
FEA add quantile HGBT (#21800)
test_asymmetric_error
5ad3421a5b5759ecfaaab93406592d988f5d487f
scikit-learn
test_gradient_boosting.py
14
27
https://github.com/scikit-learn/scikit-learn.git
1
209
1
96
361
Python
{ "docstring": "Test quantile regression for asymmetric distributed targets.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def test_asymmetric_error(quantile): n_samples = 10_000 rng = np.random.RandomState(42) # take care that X @ coef + intercept > 0 X = np.concatenate( ( np.abs(rng.randn(n_samples)[:, None]), -rng.randint(2, size=(n_samples, 1)), ), axis=1, ) intercept = 1.23 coef = np.array([0.5, -2]) # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), # the quantile at level q is: # quantile(q) = - log(1 - q) / lambda # scale = 1/lambda = -quantile(q) / log(1-q) y = rng.exponential( scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples ) model = HistGradientBoostingRegressor( loss="quantile", quantile=quantile, max_iter=25, random_state=0, max_leaf_nodes=10, ).fit(X, y) assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2) pinball_loss = PinballLoss(quantile=quantile) loss_true_quantile = pinball_loss(y, X @ coef + intercept) loss_pred_quantile = pinball_loss(y, model.predict(X)) # we are overfitting assert loss_pred_quantile <= loss_true_quantile @pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
88,424
289,281
1,402
homeassistant/components/gtfs/sensor.py
259
46
def update(self) -> None: with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning("Origin stop ID %s not found", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning(
Move attribution to standalone attribute [e-g] (#80513)
update
c717fd19de01fc822d146cc5e353959dfa86d5f7
core
sensor.py
17
72
https://github.com/home-assistant/core.git
18
420
0
141
727
Python
{ "docstring": "Get the latest data from GTFS and update the states.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def update(self) -> None: with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning("Origin stop ID %s not found", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( "Destination stop ID %s not found", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = self._departure["departure_time"].replace( tzinfo=dt_util.UTC ) # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure["trip_id"] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug("Fetching trip details for %s", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure["route_id"] if not self._route or self._route.route_id != route_id: _LOGGER.debug("Fetching route details for %s", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug("Fetching agency details for %s", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( "Agency ID '%s' was not found in agency table, " "you may want to update the routes database table " "to fix this missing reference", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._agency: self._attr_attribution = self._agency.agency_name else: self._attr_attribution = None if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} " f"{self.origin} to {self.destination} next departure" ) if not self._departure: name = f"{DEFAULT_NAME}" self._name = self._custom_name or name
95,240
296,245
102
tests/components/homekit_controller/test_binary_sensor.py
41
16
async def test_carbon_monoxide_sensor_read_state(hass, utcnow): helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service) await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0}, ) state = await helper.poll_and_get_state() assert state.state == "off" await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1}, ) state = await helper.poll_and_get_state() assert state.state == "on" assert state.attributes["d
Fix HomeKit Controller device class for CO Sensors (#69949)
test_carbon_monoxide_sensor_read_state
ad5d7a845b73b6ef09b111597d6c542be4781b07
core
test_binary_sensor.py
11
15
https://github.com/home-assistant/core.git
1
92
0
24
152
Python
{ "docstring": "Test that we can read the state of a HomeKit contact accessory.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
async def test_carbon_monoxide_sensor_read_state(hass, utcnow): helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service) await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0}, ) state = await helper.poll_and_get_state() assert state.state == "off" await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1}, ) state = await helper.poll_and_get_state() assert state.state == "on" assert state.attributes["device_class"] == BinarySensorDeviceClass.CO
@dataclass
1,224
7,512
308
ludwig/utils/triton_utils.py
52
33
def save_config(self) -> TritonArtifact: device = self.device if self.inference_stage != PREDICTOR: device = "cpu" self.config = TritonConfig( self.full_model_name, self.input_features, self.output_features, self.max_batch_size, self.max_queue_delay_microseconds, device, self.model_instance_count, self.inference_stage, ) config_path = os.path.join(self.base_path, "config.pbtxt") with open(config_path, "w") as f: formatted_config = remove_empty_lines(self.config.get_model_conf
Triton ensemble export (#2251)
save_config
ed8d9cf20843744f18593b22fb6a30eaf5f325eb
ludwig
triton_utils.py
13
31
https://github.com/ludwig-ai/ludwig.git
2
144
1
44
231
Python
{ "docstring": "Save the Triton config.\n\n Return the appropriate artifact.\n ", "language": "en", "n_whitespaces": 22, "n_words": 8, "vocab_size": 7 }
def save_config(self) -> TritonArtifact: device = self.device if self.inference_stage != PREDICTOR: device = "cpu" self.config = TritonConfig( self.full_model_name, self.input_features, self.output_features, self.max_batch_size, self.max_queue_delay_microseconds, device, self.model_instance_count, self.inference_stage, ) config_path = os.path.join(self.base_path, "config.pbtxt") with open(config_path, "w") as f: formatted_config = remove_empty_lines(self.config.get_model_config()) f.write(formatted_config) config_artifact = TritonArtifact( model_name=self.full_model_name, model_version=self.model_version, platform="pytorch_libtorch", path=config_path, content_type="text/x-protobuf", content_length=os.path.getsize(config_path), ) return config_artifact @dataclass
72,740
249,236
258
tests/rest/admin/test_device.py
50
17
def test_unknown_device(self) -> None: url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user ) channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_F
Use literals in place of `HTTPStatus` constants in tests (#13479) Replace - `HTTPStatus.NOT_FOUND` - `HTTPStatus.FORBIDDEN` - `HTTPStatus.UNAUTHORIZED` - `HTTPStatus.CONFLICT` - `HTTPStatus.CREATED` Signed-off-by: Dirk Klimpel <dirk@klimpel.org>
test_unknown_device
1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b
synapse
test_device.py
10
26
https://github.com/matrix-org/synapse.git
1
136
0
31
212
Python
{ "docstring": "\n Tests that a lookup for a device that does not exist returns either 404 or 200.\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 14 }
def test_unknown_device(self) -> None: url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user ) channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body)
6,716
37,029
317
examples/research_projects/codeparrot/scripts/human_eval.py
96
46
def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): gen_token_dict = defaultdict(list) # dict of list of generated tokens for step, batch in tqdm(enumerate(dataloader)): with torch.no_grad(): gen_kwargs["stopping_criteria"][0].start_length = batch["ids"].shape[-1] generated_tokens = accelerator.unwrap_model(model).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=batch_size, **gen_kwargs ) # each task is generated batch_size times generated_tasks = batch["task_id"].repeat(batch_size) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) generated_tokens = generated_tokens.cpu().numpy() generated_tasks = generated_tasks.cpu().numpy() for task, generated_tokens in zip(generated_tasks, generated_tokens): gen_token_dict[task].append(generated_tokens) code_gens = [[] for _ in range(n_tasks)] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) code_gens[task].append(remove_last_block(gen_code)) return code_gens
Jia multi gpu eval (#16428) * add simple multi gpu complet * add human_eval_multi_gpu * use copy strategy to distribute across gpu, to avoid padding * add doc string * update code style * use task id to arrange output * truncate input to avoid zero pad * Stop the copy mechanism * update style * restore copies to scale better in distributed mode * update style * replace human eval * Apply suggestions from code review 1. Tokenize all input at the same time 2. use attention_mask to get the input length 3. other small fixes Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com> * correct typo and update docstring * update code style * remove num sample division constraint * remove max len calculation * use accelerator.gather once to speed up * use accelerate set_seed; update accelerate version * correct gather bug Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com>
complete_code
4868a830db5f19f56712f540979d637368221d50
transformers
human_eval.py
17
23
https://github.com/huggingface/transformers.git
6
246
0
66
387
Python
{ "docstring": "Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute\n the processing to multiple GPUs.\n dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from\n the evalution dataset to the modelm as the following:\n [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1]\n where nc is the number of copies of the prompt, and nt is the number of tasks.\n nc is such that num_sample = nc * batch_size\n\n Parameters\n ----------\n accelerator: Accelerator\n\n model: transformers.PreTrainedModel\n Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = \"lvwerra/codeparrot\"\n\n tokenizer: transformers.AutoTokenizer\n The tokenizer used to train model\n\n dataloader: DataLoader\n The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs.\n\n n_tasks: int\n The number of tasks in the dataset. It is used to determine the length of the output.\n Should be aligned with the number of tasks in the TokenizeDataset.\n\n batch_size: int\n num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies\n\n gen_kwargs: dict\n Keyword arguments for the generation function of the model.\n\n Returns\n -------\n code_gens: list of list of str, of length n_tasks\n List of generated codes for each task.\n Each element is a list of generated codes for each task, with length num_samples\n ", "language": "en", "n_whitespaces": 327, "n_words": 207, "vocab_size": 115 }
def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): gen_token_dict = defaultdict(list) # dict of list of generated tokens for step, batch in tqdm(enumerate(dataloader)): with torch.no_grad(): gen_kwargs["stopping_criteria"][0].start_length = batch["ids"].shape[-1] generated_tokens = accelerator.unwrap_model(model).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=batch_size, **gen_kwargs ) # each task is generated batch_size times generated_tasks = batch["task_id"].repeat(batch_size) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) generated_tokens = generated_tokens.cpu().numpy() generated_tasks = generated_tasks.cpu().numpy() for task, generated_tokens in zip(generated_tasks, generated_tokens): gen_token_dict[task].append(generated_tokens) code_gens = [[] for _ in range(n_tasks)] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) code_gens[task].append(remove_last_block(gen_code)) return code_gens
13,993
65,710
20
erpnext/crm/doctype/contract/contract.py
27
6
def get_status(start_date, end_date): if not end_date: return "Active" start_date = getdate(start_date) end_date = getdate(end_date) now_date = getdate(nowdate()) return "Active" if start_date <= now_date <= end_date els
style: format code with black
get_status
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
contract.py
10
7
https://github.com/frappe/erpnext.git
3
44
0
18
78
Python
{ "docstring": "\n\tGet a Contract's status based on the start, current and end dates\n\n\tArgs:\n\t start_date (str): The start date of the contract\n\t end_date (str): The end date of the contract\n\n\tReturns:\n\t str: 'Active' if within range, otherwise 'Inactive'\n\t", "language": "en", "n_whitespaces": 55, "n_words": 37, "vocab_size": 29 }
def get_status(start_date, end_date): if not end_date: return "Active" start_date = getdate(start_date) end_date = getdate(end_date) now_date = getdate(nowdate()) return "Active" if start_date <= now_date <= end_date else "Inactive"
2,946
19,358
40
PathPlanning/CubicSpline/cubic_spline_planner.py
12
7
def calc_position(self, s): x = self.sx.calc_position(s) y = self.sy.calc_positi
enhance cubic spline path doc (#698) * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc
calc_position
def289b723e9216830c2a7b2577cb31b55710167
PythonRobotics
cubic_spline_planner.py
9
4
https://github.com/AtsushiSakai/PythonRobotics.git
1
32
0
10
53
Python
{ "docstring": "\n calc position\n\n Parameters\n ----------\n s : float\n distance from the start point. if `s` is outside the data point's\n range, return None.\n\n Returns\n -------\n x : float\n x position for given s.\n y : float\n y position for given s.\n ", "language": "en", "n_whitespaces": 148, "n_words": 40, "vocab_size": 28 }
def calc_position(self, s): x = self.sx.calc_position(s) y = self.sy.calc_position(s) return x, y
56,482
221,712
156
python3.10.4/Lib/contextlib.py
55
10
def push(self, exit): # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods. _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume it's a callable. sel
add python 3.10.4 for windows
push
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
contextlib.py
10
9
https://github.com/XX-net/XX-Net.git
2
42
0
46
75
Python
{ "docstring": "Registers a callback with the standard __exit__ method signature.\n\n Can suppress exceptions the same way __exit__ method can.\n Also accepts any object with an __exit__ method (registering a call\n to the method instead of the object itself).\n ", "language": "en", "n_whitespaces": 65, "n_words": 37, "vocab_size": 26 }
def push(self, exit): # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods. _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume it's a callable. self._push_exit_callback(exit) else: self._push_cm_exit(exit, exit_method) return exit # Allow use as a decorator.
104,431
305,647
57
homeassistant/components/mpd/media_player.py
14
6
async def async_media_play(self) -> None: if se
Improve entity type hints [m] (#77816)
async_media_play
6355e682fa4aeb526570597d919ad1fb76755b9a
core
media_player.py
12
6
https://github.com/home-assistant/core.git
2
37
0
13
69
Python
{ "docstring": "Service to send the MPD the command for play/pause.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
async def async_media_play(self) -> None: if self._status["state"] == "pause": await self._client.pause(0) else: await self._client.play()
14,439
67,193
70
erpnext/regional/report/datev/datev.py
109
38
def download_datev_csv(filters): if isinstance(filters, str): filters = json.loads(filters) validate(filters) company = filters.get("company") fiscal_year = get_fiscal_year(date=filters.get("from_date"), company=company) filters["fiscal_year_start"] = fiscal_year[1] # set chart of accounts used coa = frappe.get_value("Company", company, "chart_of_accounts") filters["skr"] = "04" if "SKR04" in coa else ("03" if "SKR03" in coa else "") datev_settings = frappe.get_doc("DATEV Settings", company) filters["account_number_length"] = datev_settings.account_number_length filters["temporary_against_account_number"] = datev_settings.temporary_against_account_number transactions = get_transactions(filters) account_names = get_account_names(filters) customers = get_customers(filters) suppliers = get_suppliers(filters) zip_name = "{} DATEV.zip".format(frappe.utils.datetime.date.today()) zip_and_download( zip_name, [ { "file_name": "EXTF_Buchungsstapel.csv", "csv_data": get_datev_csv(transactions, filters, csv_class=Transactions), }, { "file_name": "EXTF_Kontenbeschriftungen.csv", "csv_data": get_datev_csv(account_names, filters, csv_class=Accoun
style: format code with black
download_datev_csv
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
datev.py
13
38
https://github.com/frappe/erpnext.git
4
248
0
74
421
Python
{ "docstring": "\n\tProvide accounting entries for download in DATEV format.\n\n\tValidate the filters, get the data, produce the CSV file and provide it for\n\tdownload. Can be called like this:\n\n\tGET /api/method/erpnext.regional.report.datev.datev.download_datev_csv\n\n\tArguments / Params:\n\tfilters -- dict of filters to be passed to the sql query\n\t", "language": "en", "n_whitespaces": 39, "n_words": 45, "vocab_size": 38 }
def download_datev_csv(filters): if isinstance(filters, str): filters = json.loads(filters) validate(filters) company = filters.get("company") fiscal_year = get_fiscal_year(date=filters.get("from_date"), company=company) filters["fiscal_year_start"] = fiscal_year[1] # set chart of accounts used coa = frappe.get_value("Company", company, "chart_of_accounts") filters["skr"] = "04" if "SKR04" in coa else ("03" if "SKR03" in coa else "") datev_settings = frappe.get_doc("DATEV Settings", company) filters["account_number_length"] = datev_settings.account_number_length filters["temporary_against_account_number"] = datev_settings.temporary_against_account_number transactions = get_transactions(filters) account_names = get_account_names(filters) customers = get_customers(filters) suppliers = get_suppliers(filters) zip_name = "{} DATEV.zip".format(frappe.utils.datetime.date.today()) zip_and_download( zip_name, [ { "file_name": "EXTF_Buchungsstapel.csv", "csv_data": get_datev_csv(transactions, filters, csv_class=Transactions), }, { "file_name": "EXTF_Kontenbeschriftungen.csv", "csv_data": get_datev_csv(account_names, filters, csv_class=AccountNames), }, { "file_name": "EXTF_Kunden.csv", "csv_data": get_datev_csv(customers, filters, csv_class=DebtorsCreditors), }, { "file_name": "EXTF_Lieferanten.csv", "csv_data": get_datev_csv(suppliers, filters, csv_class=DebtorsCreditors), }, ], )
7,606
42,544
55
nltk/parse/util.py
28
10
def taggedsent_to_conll(sentence):
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <jan.lennartz@ing.com> Co-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com> Co-authored-by: Tom Aarsen <Cubiegamedev@gmail.com>
taggedsent_to_conll
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
nltk
util.py
12
5
https://github.com/nltk/nltk.git
2
64
0
22
109
Python
{ "docstring": "\n A module to convert a single POS tagged sentence into CONLL format.\n\n >>> from nltk import word_tokenize, pos_tag\n >>> text = \"This is a foobar sentence.\"\n >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE\n ... \tprint(line, end=\"\")\n 1\tThis\t_\tDT\tDT\t_\t0\ta\t_\t_\n 2\tis\t_\tVBZ\tVBZ\t_\t0\ta\t_\t_\n 3\ta\t_\tDT\tDT\t_\t0\ta\t_\t_\n 4\tfoobar\t_\tJJ\tJJ\t_\t0\ta\t_\t_\n 5\tsentence\t_\tNN\tNN\t_\t0\ta\t_\t_\n 6\t.\t\t_\t.\t.\t_\t0\ta\t_\t_\n\n :param sentence: A single input sentence to parse\n :type sentence: list(tuple(str, str))\n :rtype: iter(str)\n :return: a generator yielding a single sentence in CONLL format.\n ", "language": "en", "n_whitespaces": 140, "n_words": 121, "vocab_size": 60 }
def taggedsent_to_conll(sentence): for (i, (word, tag)) in enumerate(sentence, start=1): input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"] input_str = "\t".join(input_str) + "\n" yield input_str
51,596
206,637
51
django/utils/encoding.py
19
8
def get_system_encoding(): try: encoding = locale.getdefaultlocale()[1] or "ascii" codecs.lookup(en
Refs #33476 -- Reformatted code with Black.
get_system_encoding
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
encoding.py
12
7
https://github.com/django/django.git
3
33
0
14
71
Python
{ "docstring": "\n The encoding of the default system locale. Fallback to 'ascii' if the\n #encoding is unsupported by Python or could not be determined. See tickets\n #10335 and #5846.\n ", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 26 }
def get_system_encoding(): try: encoding = locale.getdefaultlocale()[1] or "ascii" codecs.lookup(encoding) except Exception: encoding = "ascii" return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding()
16,728
77,977
47
wagtail/contrib/modeladmin/options.py
11
9
def get_menu_item(self): if self.modeladmin_instances: submenu = Menu(items=self.g
Deprecate wagtail.contrib.modeladmin.menus.SubMenu in favour of wagtail.admin.menu.Menu The Menu class was not originally designed to accept menu items at constructor time (instead requiring them to be passed via hooks); ModelAdmin's SubMenu class patched this functionality in, and the documentation for extending admin views piggybacked on this. Add this functionality to the base Menu class so that we don't have this unnecessary dependency on ModelAdmin.
get_menu_item
b8a9a2d319b06fc2318d68d05b5a6cdf85b5b33d
wagtail
options.py
13
4
https://github.com/wagtail/wagtail.git
2
36
0
11
60
Python
{ "docstring": "\n Utilised by Wagtail's 'register_menu_item' hook to create a menu\n for this group with a submenu linking to listing pages for any\n associated ModelAdmin instances\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 21 }
def get_menu_item(self): if self.modeladmin_instances: submenu = Menu(items=self.get_submenu_items()) return GroupMenuItem(self, self.get_menu_order(), submenu)
24,258
110,702
73
lib/matplotlib/backend_bases.py
27
17
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath): path, transform = self._get_text_path_transform(
Soft deprecate the textpath module (import from text instead) The textpath module was created in 2009, but the status has been a bit vague with many examples and exisiting code found on the internet importing from text instead. In this PR everything is changed to point at text, although textpath is still available for backwards compatibility.
_draw_text_as_path
9b8a598d00a4fcf9579415586053583ef80a1add
matplotlib
backend_bases.py
8
6
https://github.com/matplotlib/matplotlib.git
1
69
0
20
94
Python
{ "docstring": "\n Draw the text by converting them to paths using `.TextToPath`.\n\n Parameters\n ----------\n x : float\n The x location of the text in display coords.\n y : float\n The y location of the text baseline in display coords.\n s : str\n The text to be converted.\n prop : `~matplotlib.font_manager.FontProperties`\n The font property.\n angle : float\n Angle in degrees to render the text at.\n ismath : bool or \"TeX\"\n If True, use mathtext parser. If \"TeX\", use tex for rendering.\n ", "language": "en", "n_whitespaces": 215, "n_words": 78, "vocab_size": 49 }
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath): path, transform = self._get_text_path_transform( x, y, s, prop, angle, ismath) color = gc.get_rgb() gc.set_linewidth(0.0) self.draw_path(gc, path, transform, rgbFace=color)
6,912
38,114
341
examples/research_projects/lxmert/modeling_frcnn.py
128
27
def __call__(self, match_quality_matrix): assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full(
Black preview (#17217) * Black preview * Fixup too! * Fix check copies * Use the same version as the CI * Bump black
__call__
afe5d42d8d1d80af911ed980c2936bfe887078f6
transformers
modeling_frcnn.py
13
17
https://github.com/huggingface/transformers.git
4
190
0
100
294
Python
{ "docstring": "\n Args:\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted\n elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`).\n Returns:\n matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M)\n match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored\n ", "language": "en", "n_whitespaces": 139, "n_words": 69, "vocab_size": 56 }
def __call__(self, match_quality_matrix): assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels
else:
55,318
218,453
25
python3.10.4/Lib/inspect.py
9
6
def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: #
add python 3.10.4 for windows
ismemberdescriptor
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
inspect.py
8
2
https://github.com/XX-net/XX-Net.git
1
15
1
9
30
Python
{ "docstring": "Return true if the object is a member descriptor.\n\n Member descriptors are specialized descriptors defined in extension\n modules.", "language": "en", "n_whitespaces": 31, "n_words": 18, "vocab_size": 17 }
def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: # Other implementations
35,336
153,268
142
modin/core/dataframe/base/exchange/dataframe_protocol/utils.py
62
22
def pandas_dtype_to_arrow_c(dtype) -> str: if isinstance(dtype, pandas.CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype("O"): return ArrowCTypes.STRING format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if is_datetime64_dtype(dtype): # Selecting the first char of resolution string: # dtype.str -> '<M8[ns]' resolution
FEAT-#4245: Define base interface for dataframe exchange protocol (#4246) Signed-off-by: Igoshev, Yaroslav <yaroslav.igoshev@intel.com> Co-authored-by: Dmitry Chigarev <dmitry.chigarev@intel.com>
pandas_dtype_to_arrow_c
fc539c3d70a40c9d7aabc5c50dd7280aa5e4637e
modin
utils.py
13
27
https://github.com/modin-project/modin.git
5
107
0
48
177
Python
{ "docstring": "\n Represent pandas `dtype` as a format string in Apache Arrow C notation.\n\n Parameters\n ----------\n dtype : np.dtype\n Datatype of pandas DataFrame to represent.\n\n Returns\n -------\n str\n Format string in Apache Arrow C notation of the given `dtype`.\n ", "language": "en", "n_whitespaces": 76, "n_words": 37, "vocab_size": 30 }
def pandas_dtype_to_arrow_c(dtype) -> str: if isinstance(dtype, pandas.CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype("O"): return ArrowCTypes.STRING format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if is_datetime64_dtype(dtype): # Selecting the first char of resolution string: # dtype.str -> '<M8[ns]' resolution = re.findall(r"\[(.*)\]", dtype.str)[0][:1] return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="") raise NotImplementedError( f"Convertion of {dtype} to Arrow C format string is not implemented." )
40,759
172,105
32
pandas/core/dtypes/inference.py
16
4
def is_file_like(obj) -> bool: if not (hasattr(obj, "read") or hasattr(obj, "write")): return False
Fix some dosctring RT02 error (#50197)
is_file_like
bce995817caf00ab5e82cb4cf1b540f1530cf4ea
pandas
inference.py
11
32
https://github.com/pandas-dev/pandas.git
3
38
0
15
67
Python
{ "docstring": "\n Check if the object is a file-like object.\n\n For objects to be considered file-like, they must\n be an iterator AND have either a `read` and/or `write`\n method as an attribute.\n\n Note: file-like objects must be iterable, but\n iterable objects need not be file-like.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` has file-like properties.\n\n Examples\n --------\n >>> import io\n >>> buffer = io.StringIO(\"data\")\n >>> is_file_like(buffer)\n True\n >>> is_file_like([1, 2, 3])\n False\n ", "language": "en", "n_whitespaces": 147, "n_words": 76, "vocab_size": 61 }
def is_file_like(obj) -> bool: if not (hasattr(obj, "read") or hasattr(obj, "write")): return False return bool(hasattr(obj, "__iter__"))
645
4,252
23
octavia-cli/octavia_cli/apply/resources.py
9
9
def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]: return self._create_or_update(self._update_fn, self.update_payload)
🐙 octavia-cli: `apply` connections (#10881)
update
56bf982cb96f831fe04f5e44a92ee4a669b9e16a
airbyte
resources.py
8
7
https://github.com/airbytehq/airbyte.git
1
28
0
9
43
Python
{ "docstring": "Public function to update the resource on the remote Airbyte instance.\n\n Returns:\n Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource.\n ", "language": "en", "n_whitespaces": 43, "n_words": 18, "vocab_size": 17 }
def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]: return self._create_or_update(self._update_fn, self.update_payload)
71,802
247,638
347
tests/handlers/test_oidc.py
90
17
def test_callback_session(self) -> None: request = Mock(spec=["args", "getCookie", "cookies"]) # Missing cookie request.args = {} request.getCookie.return_value = None self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("missing_session", "No session cookie found") # Missing session parameter request.args = {} request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request", "State parameter is missing") # Invalid cookie request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_session") # Mismatching session session = self._generate_oidc_session_token( state="state", nonce="nonce", client_redirect_url="http://client/redirect", ) request.args = {} request.args[b"state"] = [b"mismatching state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mismatching_session") # Valid session request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assert
Add type hints to some tests/handlers files. (#12224)
test_callback_session
5dd949bee6158a8b651db9f2ae417a62c8184bfd
synapse
test_oidc.py
11
31
https://github.com/matrix-org/synapse.git
1
241
0
42
419
Python
{ "docstring": "The callback verifies the session presence and validity", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_callback_session(self) -> None: request = Mock(spec=["args", "getCookie", "cookies"]) # Missing cookie request.args = {} request.getCookie.return_value = None self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("missing_session", "No session cookie found") # Missing session parameter request.args = {} request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request", "State parameter is missing") # Invalid cookie request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_session") # Mismatching session session = self._generate_oidc_session_token( state="state", nonce="nonce", client_redirect_url="http://client/redirect", ) request.args = {} request.args[b"state"] = [b"mismatching state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mismatching_session") # Valid session request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request")
11,867
59,100
140
src/prefect/filesystems.py
35
18
def _create_repo_url(self) -> str: url_components = urllib.parse.urlparse(self.repository_url) if url_components.scheme == "https" and self.credentials is not None: repo_url = url_components.netloc + url_components.path updated_components = url_components._replace( netloc=f"{self.credentials.get_se
Add private repos
_create_repo_url
bbee097653559003fa0db61ab00f1ff8567eea9a
prefect
filesystems.py
16
16
https://github.com/PrefectHQ/prefect.git
3
73
0
29
142
Python
{ "docstring": "Format the URL provided to the `git clone` command.\n\n For private repos: https://<oauth-key>@github.com/<username>/<repo>.git\n All other repos should be the same as `self.repository`.\n ", "language": "en", "n_whitespaces": 43, "n_words": 22, "vocab_size": 20 }
def _create_repo_url(self) -> str: url_components = urllib.parse.urlparse(self.repository_url) if url_components.scheme == "https" and self.credentials is not None: repo_url = url_components.netloc + url_components.path updated_components = url_components._replace( netloc=f"{self.credentials.get_secret_value()}@{url_components.netloc}" ) full_url = urllib.parse.urlunparse(updated_components) else: full_url = self.repository_url return full_url
51,882
207,149
20
tests/admin_filters/tests.py
6
6
def test_lookup_with_dynamic_value(self):
Refs #33476 -- Reformatted code with Black.
test_lookup_with_dynamic_value
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
8
14
https://github.com/django/django.git
1
86
0
6
25
Python
{ "docstring": "\n Ensure SimpleListFilter can access self.value() inside the lookup.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def test_lookup_with_dynamic_value(self): modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
6,164
33,821
952
tests/test_tokenization_common.py
144
32
def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" elif is_flax_available(): returned_tensor = "jax" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens =
Fix custom tokenizers test (#19052) * Fix CI for custom tokenizers * Add nightly tests * Run CI, run! * Fix paths * Typos * Fix test
test_batch_encode_dynamic_overflowing
f7ce4f1ff789c11f129597a1171b5d549d102e09
transformers
test_tokenization_common.py
17
46
https://github.com/huggingface/transformers.git
10
314
0
71
521
Python
{ "docstring": "\n When calling batch_encode with multiple sequence it can returns different number of\n overflowing encoding for each sequence:\n [\n Sequence 1: [Encoding 1, Encoding 2],\n Sequence 2: [Encoding 1],\n Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]\n ]\n This needs to be padded so that it can represented as a tensor\n ", "language": "en", "n_whitespaces": 121, "n_words": 51, "vocab_size": 42 }
def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" elif is_flax_available(): returned_tensor = "jax" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens = tokenizer.encode_plus( "HuggingFace is solving NLP one commit at a time", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time", "Very tiny input"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6)
@keras_export("keras.optimizers.get")
81,339
275,215
300
keras/optimizers/__init__.py
106
34
def deserialize(config, custom_objects=None): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) # pylint: disable=g-import-not-at-top all_classes = { "adadelta": adadelta_v2.Adadelta, "adagrad": adagrad_v2.Adagrad, "adam": adam_v2.Adam, "adamax": adamax_v2.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_v2.Nadam, "rmsprop": rmsprop_v2.RMSprop, "sgd": gradient_descent_v2.SGD, "ftrl": ftrl.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizer, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config["class_name"].lower() in all_classes: config["class_name"] = config["class_name"].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name="optimizer", ) @keras_export("keras
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
deserialize
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
__init__.py
12
29
https://github.com/keras-team/keras.git
2
156
1
92
275
Python
{ "docstring": "Inverse of the `serialize` function.\n\n Args:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras Optimizer instance.\n ", "language": "en", "n_whitespaces": 71, "n_words": 32, "vocab_size": 30 }
def deserialize(config, custom_objects=None): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) # pylint: disable=g-import-not-at-top all_classes = { "adadelta": adadelta_v2.Adadelta, "adagrad": adagrad_v2.Adagrad, "adam": adam_v2.Adam, "adamax": adamax_v2.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_v2.Nadam, "rmsprop": rmsprop_v2.RMSprop, "sgd": gradient_descent_v2.SGD, "ftrl": ftrl.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizer, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config["class_name"].lower() in all_classes: config["class_name"] = config["class_name"].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name="optimizer", ) @keras_export("keras.optimizers.get")
47,689
196,189
40
sympy/combinatorics/permutations.py
12
7
def commutes_with(self, other): a = s
Updated import locations
commutes_with
498015021131af4dbb07eb110e5badaba8250c7b
sympy
permutations.py
7
4
https://github.com/sympy/sympy.git
1
25
0
11
41
Python
{ "docstring": "\n Checks if the elements are commuting.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> a = Permutation([1, 4, 3, 0, 2, 5])\n >>> b = Permutation([0, 1, 2, 3, 4, 5])\n >>> a.commutes_with(b)\n True\n >>> b = Permutation([2, 3, 5, 4, 1, 0])\n >>> a.commutes_with(b)\n False\n ", "language": "en", "n_whitespaces": 131, "n_words": 46, "vocab_size": 30 }
def commutes_with(self, other): a = self.array_form b = other.array_form return _af_commutes_with(a, b)
3,396
20,492
28
pipenv/patched/notpip/_vendor/pygments/styles/__init__.py
12
5
def get_all_styles(): yield from STYLE_MAP for name, _ in find_plugin_styles(): yield
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
get_all_styles
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
__init__.py
8
4
https://github.com/pypa/pipenv.git
2
19
0
11
35
Python
{ "docstring": "Return a generator for all styles by name,\n both builtin and plugin.", "language": "en", "n_whitespaces": 14, "n_words": 12, "vocab_size": 12 }
def get_all_styles(): yield from STYLE_MAP for name, _ in find_plugin_styles(): yield name
7,231
39,439
37
recommenders/utils/python_utils.py
18
12
def lift(cooccurrence): diag_rows, diag_cols = _get_row_and_column_matrix(co
Add new item similarity metrics for SAR (#1754) * Add mutual information similarity in SAR * Add lexicographers mutual information similarity for SAR * Add cosine similarity for SAR * Add inclusion index for SAR * Typos * Change SARSingleNode to SAR * Convert item similarity matrix to np.array * Update * Update SAR tests * Remove unused imports * Add explanations for new similarity metrics
lift
1d7341e93d1f03387699fb3c6ae0b6c0e464296f
recommenders
python_utils.py
11
5
https://github.com/microsoft/recommenders.git
1
48
0
17
85
Python
{ "docstring": "Helper method to calculate the Lift of a matrix of\n co-occurrences. In comparison with basic co-occurrence and Jaccard\n similarity, lift favours discoverability and serendipity, as\n opposed to co-occurrence that favours the most popular items, and\n Jaccard that is a compromise between the two.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items.\n\n Returns:\n numpy.ndarray: The matrix of Lifts between any two items.\n\n ", "language": "en", "n_whitespaces": 98, "n_words": 63, "vocab_size": 44 }
def lift(cooccurrence): diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) with np.errstate(invalid="ignore", divide="ignore"): result = cooccurrence / (diag_rows * diag_cols) return np.array(result)
22,841
107,629
37
lib/matplotlib/artist.py
12
4
def update(self, props): return self._update_props( props, "{cls.__name__!r} object has no property {prop_name!r}")
Clarify error message for bad keyword arguments. `plot([], [], foo=42)` previously emitted ``` 'Line2D' object has no property 'foo' ``` which refers to the Matplotlib-specific concept of "properties". It now instead emits ``` Line2D.set() got an unexpected keyword argument 'foo' ``` which is modeled after the standard error message for unknown keyword arguments. (To maximize backcompat, the implementation goes through a new _internal_update, which does *not* error when the same prop is passed under different aliases. This could be changed later, but is not the goal of this PR.)
update
d69be2554cf6d1ac711bf433b1d6f176e3290d4f
matplotlib
artist.py
8
3
https://github.com/matplotlib/matplotlib.git
1
17
0
12
30
Python
{ "docstring": "\n Update this artist's properties from the dict *props*.\n\n Parameters\n ----------\n props : dict\n ", "language": "en", "n_whitespaces": 49, "n_words": 13, "vocab_size": 12 }
def update(self, props): return self._update_props( props, "{cls.__name__!r} object has no property {prop_name!r}")
30,007
133,396
75
python/ray/util/sgd/torch/worker_group.py
22
11
def new_workers_size(self): remote_resources = ray.available_resources() max_remote_workers = self._max_workers new_remote_workers = min(remote_resources.get("CPU"
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
new_workers_size
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
worker_group.py
13
7
https://github.com/ray-project/ray.git
2
55
0
16
92
Python
{ "docstring": "Returns number of workers to create based on available resources.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def new_workers_size(self): remote_resources = ray.available_resources() max_remote_workers = self._max_workers new_remote_workers = min(remote_resources.get("CPU", 0), max_remote_workers) if self._use_gpu: new_remote_workers = min(remote_resources.get("GPU", 0), new_remote_workers) return new_remote_workers
52,779
209,787
146
scapy/arch/windows/__init__.py
50
10
def setmonitor(self, enable=True): # type: (bool) -> bool # We must reset the monitor cache if enable: res = self.setmode('monitor') else: res = self.setmode('managed') if not res: log_runtime.error("Npcap WlanHelper returned with an error code !") self.cache_mode = None t
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <pierre@droids-corp.org>
setmonitor
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
scapy
__init__.py
12
10
https://github.com/secdev/scapy.git
4
66
0
40
117
Python
{ "docstring": "Alias for setmode('monitor') or setmode('managed')\n Only available with Npcap", "language": "en", "n_whitespaces": 15, "n_words": 9, "vocab_size": 9 }
def setmonitor(self, enable=True): # type: (bool) -> bool # We must reset the monitor cache if enable: res = self.setmode('monitor') else: res = self.setmode('managed') if not res: log_runtime.error("Npcap WlanHelper returned with an error code !") self.cache_mode = None tmp = self.cache_mode = self.ismonitor() return tmp if enable else (not tmp)
57,477
225,607
393
albumentations/augmentations/geometric/rotate.py
195
26
def _rotated_rect_with_max_area(h, w, angle): angle = math.radians(angle) width_is_longer = w >= h side_long, side_short = (w, h) if width_is_longer else (h, w) # since the solutions for angle, -angl
add `crop_border` option to Rotate (#1214)
_rotated_rect_with_max_area
a4d33e180c4407990afa1fc03aa079718d738ebd
albumentations
rotate.py
14
17
https://github.com/albumentations-team/albumentations.git
5
233
0
102
347
Python
{ "docstring": "\n Given a rectangle of size wxh that has been rotated by 'angle' (in\n degrees), computes the width and height of the largest possible\n axis-aligned rectangle (maximal area) within the rotated rectangle.\n\n Code from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders\n ", "language": "en", "n_whitespaces": 70, "n_words": 34, "vocab_size": 29 }
def _rotated_rect_with_max_area(h, w, angle): angle = math.radians(angle) width_is_longer = w >= h side_long, side_short = (w, h) if width_is_longer else (h, w) # since the solutions for angle, -angle and 180-angle are all the same, # it is sufficient to look at the first quadrant and the absolute values of sin,cos: sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: # half constrained case: two crop corners touch the longer side, # the other two corners are on the mid-line parallel to the longer line x = 0.5 * side_short wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) else: # fully constrained case: crop touches all 4 sides cos_2a = cos_a * cos_a - sin_a * sin_a wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a return dict( x_min=max(0, int(w / 2 - wr / 2)), x_max=min(w, int(w / 2 + wr / 2)), y_min=max(0, int(h / 2 - hr / 2)), y_max=min(h, int(h / 2 + hr / 2)), )
3,362
20,426
84
pipenv/patched/notpip/_vendor/pygments/lexer.py
29
10
def using(_other, **kwargs): gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state')
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
using
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
lexer.py
13
13
https://github.com/pypa/pipenv.git
4
69
0
22
107
Python
{ "docstring": "\n Callback that processes the match with a different lexer.\n\n The keyword arguments are forwarded to the lexer, except `state` which\n is handled separately.\n\n `state` specifies the state that the new lexer will start in, and can\n be an enumerable such as ('root', 'inline', 'string') or a simple\n string which is assumed to be on top of the root state.\n\n Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.\n ", "language": "en", "n_whitespaces": 95, "n_words": 70, "vocab_size": 55 }
def using(_other, **kwargs): gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this:
116,975
319,613
144
src/documents/tests/test_api.py
23
19
def test_unset_document_storage_path(self): self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) bulk_edit.set_storage_path( [self.doc1.id], self.sp1.id, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 4) bulk_edit.set_storage_path( [self.doc1.id], None, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) self.async_task
Feature: Dynamic document storage pathes (#916) * Added devcontainer * Add feature storage pathes * Exclude tests and add versioning * Check escaping * Check escaping * Check quoting * Echo * Escape * Escape : * Double escape \ * Escaping * Remove if * Escape colon * Missing \ * Esacpe : * Escape all * test * Remove sed * Fix exclude * Remove SED command * Add LD_LIBRARY_PATH * Adjusted to v1.7 * Updated test-cases * Remove devcontainer * Removed internal build-file * Run pre-commit * Corrected flak8 error * Adjusted to v1.7 * Updated test-cases * Corrected flak8 error * Adjusted to new plural translations * Small adjustments due to code-review backend * Adjusted line-break * Removed PAPERLESS prefix from settings variables * Corrected style change due to search+replace * First documentation draft * Revert changes to Pipfile * Add sphinx-autobuild with keep-outdated * Revert merge error that results in wrong storage path is evaluated * Adjust styles of generated files ... * Adds additional testing to cover dynamic storage path functionality * Remove unnecessary condition * Add hint to edit storage path dialog * Correct spelling of pathes to paths * Minor documentation tweaks * Minor typo * improving wrapping of filter editor buttons with new storage path button * Update .gitignore * Fix select border radius in non input-groups * Better storage path edit hint * Add note to edit storage path dialog re document_renamer * Add note to bulk edit storage path re document_renamer * Rename FILTER_STORAGE_DIRECTORY to PATH * Fix broken filter rule parsing * Show default storage if unspecified * Remove note re storage path on bulk edit * Add basic validation of filename variables Co-authored-by: Markus Kling <markus@markus-kling.net> Co-authored-by: Trenton Holmes <holmes.trenton@gmail.com> Co-authored-by: Michael Shamoon <4887959+shamoon@users.noreply.github.com> Co-authored-by: Quinn Casey <quinn@quinncasey.com>
test_unset_document_storage_path
69ef26dab04d51e7e102dcb33cd98ddc6ad975fd
paperless-ngx
test_api.py
12
15
https://github.com/paperless-ngx/paperless-ngx.git
1
136
0
17
215
Python
{ "docstring": "\n GIVEN:\n - 4 documents without defined storage path\n - 1 document with a defined storage\n WHEN:\n - Bulk edit called to remove storage path from 1 document\n THEN:\n - Single document storage path removed\n ", "language": "en", "n_whitespaces": 107, "n_words": 34, "vocab_size": 22 }
def test_unset_document_storage_path(self): self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) bulk_edit.set_storage_path( [self.doc1.id], self.sp1.id, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 4) bulk_edit.set_storage_path( [self.doc1.id], None, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) self.async_task.assert_called() args, kwargs = self.async_task.call_args self.assertCountEqual(kwargs["document_ids"], [self.doc1.id])
26,314
118,602
25
lib/tests/streamlit/cache_spinner_test.py
4
6
def test_with_spinner(self):
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
test_with_spinner
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
cache_spinner_test.py
10
3
https://github.com/streamlit/streamlit.git
1
21
0
4
39
Python
{ "docstring": "If the show_spinner flag is set, there should be one element in the\n report queue.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 14 }
def test_with_spinner(self): function_with_spinner() self.assertFalse(self.forward_msg_queue.is_empty())
72,586
249,079
337
tests/rest/admin/test_device.py
82
26
def test_update_device_too_long_display_name(self) -> None:
Use literals in place of `HTTPStatus` constants in tests (#13469)
test_update_device_too_long_display_name
c97042f7eef3748e17c90e48a4122389a89c4735
synapse
test_device.py
14
29
https://github.com/matrix-org/synapse.git
1
159
0
61
257
Python
{ "docstring": "\n Update a device with a display name that is invalid (too long).\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
def test_update_device_too_long_display_name(self) -> None: # Set iniital display name. update = {"display_name": "new display"} self.get_success( self.handler.update_device( self.other_user, self.other_user_device_id, update ) ) # Request to update a device display name with a new value that is longer than allowed. update = { "display_name": "a" * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) } channel = self.make_request( "PUT", self.url, access_token=self.admin_user_tok, content=update, ) self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"]) # Ensure the display name was not updated. channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new display", channel.json_body["display_name"])