ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79,322 | 268,048 | 87 | test/lib/ansible_test/_internal/test.py | 30 | 6 | def format_command(self) -> str:
command = 'ansible-test %s' % self.command
if self.test | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annotation type comments to native type hints.
* ansible-test - Use more native type hints.
Conversion of single-line function annotation type comments with default values to native type hints.
* ansible-test - Use more native type hints.
Manual conversion of type annotation comments for functions which have pylint directives. | format_command | 3eb0485dd92c88cc92152d3656d94492db44b183 | ansible | test.py | 10 | 8 | https://github.com/ansible/ansible.git | 3 | 41 | 0 | 20 | 74 | Python | {
"docstring": "Return a string representing the CLI command associated with the test failure.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def format_command(self) -> str:
command = 'ansible-test %s' % self.command
if self.test:
command += ' --test %s' % self.test
if self.python_version:
command += ' --python %s' % self.python_version
return command
|
|
47,131 | 194,950 | 343 | projects/seeker/scripts/generate_lm_data.py | 102 | 31 | def act(self):
obs = self.observation
reply = {'text': INVALID, 'id': self.getID(), 'episode_done': False}
if obs is None or obs['text'] == DO_NOT_RETRIEVE:
return Message(reply)
# construct the search query
labels = obs.get('labels', obs.get('eval_labels', None))
search_query = self.construct_search_query(labels)
if (
self.opt['min_num_search_words'] > 0
and len(search_query[0].split()) <= self.opt['min_num_search_words']
):
return Message(reply)
# retrieve
self.search_engine.set_search_queries(search_query)
retrieved, _ = self.search_engine.retrieve_and_score(self.dummy)
all_docs = [d.get_tokenization_str() for d in retrieved[0]] # batched
# Find the right doc
best_f1, best_doc, best_doc_idx = self.get_best_doc(a | SeeKeR (#4447)
* seeker
* todo
* readme updates; add test
* small config changes
* various updates
* readme fix
* model card
* add arxiv link
* surround spacy with try catch
* more protected
* more protection of imports
* lint | act | 7e453008fde751aff0cfd752662e19fe2adc7410 | ParlAI | generate_lm_data.py | 13 | 25 | https://github.com/facebookresearch/ParlAI.git | 7 | 219 | 0 | 74 | 379 | Python | {
"docstring": "\n Search for overlap with the observation label.\n\n Return the best fitting document. A document is valid if the f1 is above the\n threshold AND the f1 is less than 1.0 AND the target label is not in the\n document.\n ",
"language": "en",
"n_whitespaces": 75,
"n_words": 39,
"vocab_size": 27
} | def act(self):
obs = self.observation
reply = {'text': INVALID, 'id': self.getID(), 'episode_done': False}
if obs is None or obs['text'] == DO_NOT_RETRIEVE:
return Message(reply)
# construct the search query
labels = obs.get('labels', obs.get('eval_labels', None))
search_query = self.construct_search_query(labels)
if (
self.opt['min_num_search_words'] > 0
and len(search_query[0].split()) <= self.opt['min_num_search_words']
):
return Message(reply)
# retrieve
self.search_engine.set_search_queries(search_query)
retrieved, _ = self.search_engine.retrieve_and_score(self.dummy)
all_docs = [d.get_tokenization_str() for d in retrieved[0]] # batched
# Find the right doc
best_f1, best_doc, best_doc_idx = self.get_best_doc(all_docs, labels)
if best_doc:
assert best_doc_idx is not None
reply['knowledge'] = f'{TOKEN_KNOWLEDGE}{best_doc}{TOKEN_END_KNOWLEDGE}'
reply['f1_overlap'] = best_f1
reply['text'] = labels[0]
reply['retrieved_docs'] = all_docs
reply['gold_doc'] = all_docs[best_doc_idx]
reply['search_query'] = search_query[0]
return Message(reply)
|
|
84,618 | 284,002 | 31 | openbb_terminal/forex/quantitative_analysis/qa_controller.py | 10 | 9 | def print_help(self):
he | Adds QA and Pred to forex (#1652)
* added qa and pred to forex
* updated test help
* Add forex/qa notebooks api wrapper
* Add forex/qa tests
* Add all menu commands to the integration test script
Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> | print_help | 5bf4618b398492f0ab2d09b3827467c7089831ec | OpenBBTerminal | qa_controller.py | 9 | 33 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 22 | 0 | 10 | 54 | Python | {
"docstring": "Print help[cmds]\n pick pick target column for analysis[/cmds]\n\n[param]Pair: [/param]{self.ticker}\n[param]Target Column: [/param]{self.target}\n[cmds]\n[info]Statistics:[/info]\n summary brief summary statistics of loaded pair.\n normality normality statistics and tests\n unitroot unit root test for stationarity (ADF, KPSS)\n[info]Plots:[/info]\n line line plot of selected target\n hist histogram with density plot\n cdf cumulative distribution function\n bw box and whisker plot\n acf (partial) auto-correlation function differentials of prices\n qqplot residuals against standard normal curve\n[info]Rolling Metrics:[/info]\n rolling rolling mean and std deviation of prices\n spread rolling variance and std deviation of prices\n quantile rolling median and quantile of prices\n skew rolling skewness of distribution of prices\n kurtosis rolling kurtosis of distribution of prices\n[info]Risk:[/info]\n var display value at risk\n es display expected shortfall\n[info]Other:[/info]\n raw print raw data\n decompose decomposition in cyclic-trend, season, and residuals of prices\n cusum detects abrupt changes using cumulative sum algorithm of prices[/cmds]\n ",
"language": "en",
"n_whitespaces": 315,
"n_words": 142,
"vocab_size": 95
} | def print_help(self):
help_text = f
console.print(text=help_text, menu="Forex - Quantitative Analysis")
|
|
51,259 | 205,879 | 112 | django/db/models/sql/query.py | 30 | 11 | def chain(self, klass=None):
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_a | Refs #33476 -- Reformatted code with Black. | chain | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | query.py | 10 | 10 | https://github.com/django/django.git | 5 | 64 | 0 | 22 | 108 | Python | {
"docstring": "\n Return a copy of the current Query that's ready for another operation.\n The klass argument changes the type of the Query, e.g. UpdateQuery.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 20
} | def chain(self, klass=None):
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, "_setup_query"):
obj._setup_query()
return obj
|
|
1,280 | 7,846 | 209 | tests/integration_tests/test_gbm.py | 81 | 42 | def run_test_gbm_non_number_inputs(tmpdir, backend_config):
input_features = [binary_feature(), category_feature(encoder={"reduce_output": "sum"})]
output_feature = binary_feature()
output_features = [output_feature]
csv_filename = os.path.join(tmpdir, "training.csv")
dataset_filename = generate_data(input_features, output_features, csv_filename, num_examples=100)
| Bugfix: non-number inputs to GBM (#2418) | run_test_gbm_non_number_inputs | 24f6583aa3b384aa6179c3579be600760897f1d8 | ludwig | test_gbm.py | 13 | 28 | https://github.com/ludwig-ai/ludwig.git | 2 | 222 | 0 | 65 | 354 | Python | {
"docstring": "Test that the GBM model can train and predict with non-number inputs.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def run_test_gbm_non_number_inputs(tmpdir, backend_config):
input_features = [binary_feature(), category_feature(encoder={"reduce_output": "sum"})]
output_feature = binary_feature()
output_features = [output_feature]
csv_filename = os.path.join(tmpdir, "training.csv")
dataset_filename = generate_data(input_features, output_features, csv_filename, num_examples=100)
config = {
MODEL_TYPE: "gbm",
"input_features": input_features,
"output_features": output_features,
TRAINER: {"num_boost_round": 2},
}
model = LudwigModel(config, backend=backend_config)
_, _, output_directory = model.train(
dataset=dataset_filename,
output_directory=tmpdir,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
skip_save_log=True,
)
model.load(os.path.join(tmpdir, "api_experiment_run", "model"))
preds, _ = model.predict(dataset=dataset_filename, output_directory=output_directory)
prob_col = preds[output_feature["name"] + "_probabilities"]
if backend_config["type"] == "ray":
prob_col = prob_col.compute()
assert len(prob_col.iloc[0]) == 2
assert prob_col.apply(sum).mean() == pytest.approx(1.0)
|
|
52,762 | 209,644 | 34 | scapy/contrib/pnio_rpc.py | 13 | 7 | def i2len(self, pkt, val):
fld_len = self.f | [MS-RPCE] and [MS-SMB] major update (#3683)
* Various fixes regarding DCE/RPC build
* DCE/RPC sessions
* Cleanup unused code
* Add missing GSS_WRAP algo names
* Add find_dcerpc_interface
* Split SMB client and server
* Missing StrFixedLenFieldUtf16
* Remove unfinished smbserver feature
* Friendlier getter for SMB2
* DceRpcNak
* Improve NDR parsing (a lot)
* Minor SMB2 improvements
* BIG NDR refactor + Dissect pointer deferal
* Build with pointer deferral
* Small build bugs
* SMB2 logoff, fix rawToken in SMB standalone
* Add security providers from MS-RPCE to DCERPC
* Cleanup ptr_pack of NDRPacketListField
* Clearer exception in find_dcerpc_interface
* Add minor_version attribute
* Fix computation of auth_pad in sec_trailer
* Fix a WTF bug
* Compute length for NDR arrays
* Pass enum to EnumField
* Match union attributes from response with request
* Improve SMB server
* Small bug in pointer deferal dissection
* Add user-friendly utils
* Add a few NDR tests
* More user-friendly improvements
* Bug: parent not copied in clone_with
* Build: propagate NDR64 and bug fix
* Default close response parameters
* Fix Python 2.7
* Fix SMB2_Create_Context offset
* Fix SMB2 create context
* SMB2: support chain, improvements
* Fix ioctl error
* SMB: check computeNTProofStr
* Fix UTCField default
* Improve FileId capabilities
* SMB2: contexts
* Typos
* Minor NDRUnion fixes
* Py2 fixes | i2len | ca10c5cf00425d0178998ec0b006cbb65ddbfb54 | scapy | pnio_rpc.py | 9 | 3 | https://github.com/secdev/scapy.git | 1 | 33 | 0 | 12 | 51 | Python | {
"docstring": "get the length of the field, including the padding length",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 7
} | def i2len(self, pkt, val):
fld_len = self.fld.i2len(pkt, val)
return fld_len + self.padlen(fld_len, pkt)
|
|
23,525 | 109,326 | 121 | lib/matplotlib/_mathtext.py | 30 | 16 | def get_kerning(self, next):
advance = self._metrics.advance - self.width
kern = 0.
if isinstance | Replace MathtextBackend mechanism.
The MathtextBackend ("MB") mechanism was previously used to let actual
backends customize how they received mathtext results -- either as lists
of glyphs and rectangles (for vector backends: MathtextBackendPath),
or a bitmap (for raster backends: MathtextBackendAgg); in both cases,
metrics are also provided. MBs also controlled font hinting. Note that
the MB mechanism was not publically user-extendable (this would require
touching the private MathTextParser._backend_mapping dict), so third
parties could not meaningfully provide their own backends.
MBs were attached to _mathtext.Fonts objects, which were central to
the "shipping" stage of the parse (ship(), which converts the nested
parse tree created by pyparsing into flat calls to render_glyph and
render_rect_filled). This led to a slightly curious API, where
the old MathtextBackendAgg.get_results() (for example) calls
`_mathtext.ship(0, 0, box)` and this somehow magically mutates self --
this is because self is indirectly attached to sub-elements of box.
This PR changes the implementation to instead detach output logic
from Fonts (which become restricted to providing glyph metrics and
related info), and makes ship() instead return a simple Output object
(lists of glyphs and rects) which is itself able either to convert to
a VectorParse or a RasterParse -- namedtuples that are backcompatible
with the tuples previously returned by MathTextParser.parse(). (While
technically these are "new" classes in the API, they are simply there to
(slightly) better document the return value of MathtextBackend.parse().)
In summary, this patch
- removes the non-extensible MB system,
- detaches output logic from Fonts objects, thus avoiding "action at
distance" where `ship(0, 0, box)` would mutate the calling MB,
- (weakly) documents the return value of MathtextBackend.parse().
Unrelatedly, also deprecate the unused MathTextWarning. | get_kerning | 349f8678f1cf225d6070a236cf41a5e1f044cb18 | matplotlib | _mathtext.py | 11 | 9 | https://github.com/matplotlib/matplotlib.git | 2 | 79 | 0 | 25 | 114 | Python | {
"docstring": "\n Return the amount of kerning between this and the given character.\n\n This method is called when characters are strung together into `Hlist`\n to create `Kern` nodes.\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 26,
"vocab_size": 25
} | def get_kerning(self, next):
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.fontset.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
|
|
80,845 | 271,691 | 34 | keras/engine/training_generator_v1.py | 15 | 10 | def _get_num_samples_or_steps(data, steps_per_epoch):
flat_inputs = tf.nest.flatten(data)
if hasattr(flat_inputs[0], "shape"):
return int(flat_inputs[0].shape[0]), False
return steps_per_epoch, True
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _get_num_samples_or_steps | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | training_generator_v1.py | 13 | 5 | https://github.com/keras-team/keras.git | 2 | 48 | 0 | 14 | 77 | Python | {
"docstring": "Returns number of samples or steps, and whether to use steps count mode.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def _get_num_samples_or_steps(data, steps_per_epoch):
flat_inputs = tf.nest.flatten(data)
if hasattr(flat_inputs[0], "shape"):
return int(flat_inputs[0].shape[0]), False
return steps_per_epoch, True
|
|
4,980 | 26,394 | 73 | saleor/graphql/product/tests/test_attributes.py | 34 | 13 | def test_retrieve_product_attributes_input_type(staff_api_client, product, channel_USD):
query =
variables = {"channel": channel_USD.slug}
found_products = get_graphql_content(
staff_api_client.post_graphql(query, variables)
)["data"]["products"]["edges"]
assert len(found_products) == 1
for gql_attr in found_products[0]["node"]["attributes"]:
assert len(gq | Better permissions (#9363)
* Better permissions
* Add OWNER permission
* WIP Add enums to represent function-based permissions
* Rename OWNER to IS_OWNER
* Add flag to skip autogenerated permission message
* Rename InternalPermissions to PermissionFunctions
* Add permission descriptions for meta mutations
* Better permissions validation
* Reuse user checking functions
* Rename permission functions enums
* Update schema
* Rename permission functions enums | test_retrieve_product_attributes_input_type | ab45ebda5a14df6806046fd552e2c6d08f025503 | saleor | test_attributes.py | 13 | 24 | https://github.com/saleor/saleor.git | 2 | 87 | 0 | 26 | 156 | Python | {
"docstring": "\n query ($channel: String){\n products(first: 10, channel: $channel) {\n edges {\n node {\n attributes {\n values {\n inputType\n }\n }\n }\n }\n }\n }\n \n mutation ProductTypeReorderAttributes(\n $productTypeId: ID!\n $moves: [ReorderInput!]!\n $type: ProductAttributeType!\n ) {\n productTypeReorderAttributes(\n productTypeId: $productTypeId\n moves: $moves\n type: $type\n ) {\n productType {\n id\n variantAttributes {\n id\n slug\n }\n productAttributes {\n id\n }\n }\n\n errors {\n field\n message\n code\n attributes\n }\n }\n }\n",
"language": "en",
"n_whitespaces": 433,
"n_words": 64,
"vocab_size": 39
} | def test_retrieve_product_attributes_input_type(staff_api_client, product, channel_USD):
query =
variables = {"channel": channel_USD.slug}
found_products = get_graphql_content(
staff_api_client.post_graphql(query, variables)
)["data"]["products"]["edges"]
assert len(found_products) == 1
for gql_attr in found_products[0]["node"]["attributes"]:
assert len(gql_attr["values"]) == 1
assert gql_attr["values"][0]["inputType"] == "DROPDOWN"
ATTRIBUTES_RESORT_QUERY =
|
|
51,087 | 205,311 | 606 | django/db/migrations/migration.py | 124 | 21 | def apply(self, project_state, schema_editor, collect_sql=False):
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFO | Refs #33476 -- Reformatted code with Black. | apply | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | migration.py | 15 | 27 | https://github.com/django/django.git | 9 | 160 | 0 | 87 | 266 | Python | {
"docstring": "\n Take a project_state representing all migrations prior to this one\n and a schema_editor for a live database and apply the migration\n in a forwards order.\n\n Return the resulting project state for efficient reuse by following\n Migrations.\n ",
"language": "en",
"n_whitespaces": 79,
"n_words": 36,
"vocab_size": 30
} | def apply(self, project_state, schema_editor, collect_sql=False):
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
atomic_operation = operation.atomic or (
self.atomic and operation.atomic is not False
)
if not schema_editor.atomic_migration and atomic_operation:
# Force a transaction on a non-transactional-DDL backend or an
# atomic operation inside a non-atomic migration.
with atomic(schema_editor.connection.alias):
operation.database_forwards(
self.app_label, schema_editor, old_state, project_state
)
else:
# Normal behaviour
operation.database_forwards(
self.app_label, schema_editor, old_state, project_state
)
return project_state
|
|
30,960 | 136,637 | 440 | python/ray/autoscaler/_private/kuberay/node_provider.py | 122 | 25 | def safe_to_scale(self) -> bool:
# Get the list of nodes.
node_set = set(self.node_data_dict.keys())
worker_groups = self._raycluster["spec"].get("workerGroupSpecs", [])
# Accumulates the indices of worker groups with non-empty workersToDelete
non_empty_worker_group_indices = []
for group_index, worker_group in enumerate(worker_groups):
workersToDelete = worker_group.get("scaleStrategy", {}).get(
"workersToDelete", []
)
if workersToDelete:
non_empty_worker_group_indices.append(group_index)
for worker in workersToDelete:
if worker in node_set:
# The ope | KubeRay node provider refactor (#30281)
Implements KubeRay node provider as a "BatchingNodeProvider".
Builds on #29933.
Summary of design
An autoscaler update now works like this:
list pod data from k8s
check if it's safe to proceed with update. Abort the update if not.
do some internal calculation to determine desired scale
submit a single patch to the RayCluster CR if a scale change is required
Everything is single-threaded and there are O(1) K8s API calls per autoscaler update.
Signed-off-by: Dmitri Gekhtman <dmitri.m.gekhtman@gmail.com> | safe_to_scale | c976799dfd96806ec9972a287835f7a034ec3d2c | ray | node_provider.py | 15 | 40 | https://github.com/ray-project/ray.git | 7 | 147 | 0 | 79 | 262 | Python | {
"docstring": "Returns False iff non_terminated_nodes contains any pods in the RayCluster's\n workersToDelete lists.\n\n Explanation:\n If there are any workersToDelete which are non-terminated,\n we should wait for the operator to do its job and delete those\n pods. Therefore, we back off the autoscaler update.\n\n If, on the other hand, all of the workersToDelete have already been cleaned up,\n then we patch away the workersToDelete lists and return True.\n In the future, we may consider having the operator clean up workersToDelete\n on it own:\n https://github.com/ray-project/kuberay/issues/733\n\n Note (Dmitri):\n It is stylistically bad that this function has a side effect.\n ",
"language": "en",
"n_whitespaces": 186,
"n_words": 95,
"vocab_size": 76
} | def safe_to_scale(self) -> bool:
# Get the list of nodes.
node_set = set(self.node_data_dict.keys())
worker_groups = self._raycluster["spec"].get("workerGroupSpecs", [])
# Accumulates the indices of worker groups with non-empty workersToDelete
non_empty_worker_group_indices = []
for group_index, worker_group in enumerate(worker_groups):
workersToDelete = worker_group.get("scaleStrategy", {}).get(
"workersToDelete", []
)
if workersToDelete:
non_empty_worker_group_indices.append(group_index)
for worker in workersToDelete:
if worker in node_set:
# The operator hasn't removed this worker yet. Abort
# the autoscaler update.
logger.warning(f"Waiting for operator to remove worker {worker}.")
return False
# All required workersToDelete have been removed.
# Clean up the workersToDelete field.
patch_payload = []
for group_index in non_empty_worker_group_indices:
patch = worker_delete_patch(group_index, workers_to_delete=[])
patch_payload.append(patch)
if patch_payload:
logger.info("Cleaning up workers to delete.")
logger.info(f"Submitting patch {patch_payload}.")
self._submit_raycluster_patch(patch_payload)
# It's safe to proceed with the autoscaler update.
return True
|
|
50,543 | 203,818 | 240 | django/contrib/gis/db/backends/oracle/operations.py | 58 | 14 | def get_distance(self, f, value, lookup_type):
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(
value, Distance.unit_attname(f.units_name(self.connection))
)
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == " | Refs #33476 -- Reformatted code with Black. | get_distance | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | operations.py | 18 | 16 | https://github.com/django/django.git | 5 | 89 | 0 | 42 | 148 | Python | {
"docstring": "\n Return the distance parameters given the value and the lookup type.\n On Oracle, geometry columns with a geodetic coordinate system behave\n implicitly like a geography column, and thus meters will be used as\n the distance parameter on them.\n ",
"language": "en",
"n_whitespaces": 74,
"n_words": 38,
"vocab_size": 32
} | def get_distance(self, f, value, lookup_type):
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(
value, Distance.unit_attname(f.units_name(self.connection))
)
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == "dwithin":
dist_param = "distance=%s" % dist_param
return [dist_param]
|
|
16,326 | 74,848 | 38 | wagtail/documents/tests/test_models.py | 10 | 10 | def test_standard_get_document_model(self):
del settings.WAGTAILDOCS_DOCUMENT_MODEL
from wagtail.documents.models import Document
self.assertIs(get_document_model(), Document)
| Reformat with black | test_standard_get_document_model | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_models.py | 9 | 4 | https://github.com/wagtail/wagtail.git | 1 | 28 | 0 | 10 | 46 | Python | {
"docstring": "Test get_document_model with no WAGTAILDOCS_DOCUMENT_MODEL",
"language": "en",
"n_whitespaces": 4,
"n_words": 5,
"vocab_size": 5
} | def test_standard_get_document_model(self):
del settings.WAGTAILDOCS_DOCUMENT_MODEL
from wagtail.documents.models import Document
self.assertIs(get_document_model(), Document)
|
|
76,908 | 261,639 | 92 | sklearn/utils/__init__.py | 51 | 8 | def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
column_indexer = (
slice(None, None, None) if column_indexer is None else column_indexer
)
if | MAINT test globally setting output via context manager (#24932)
Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr> | _safe_assign | af16e5934ae269d05fd7df983b97def7c0ef0bd2 | scikit-learn | __init__.py | 10 | 9 | https://github.com/scikit-learn/scikit-learn.git | 4 | 80 | 0 | 33 | 120 | Python | {
"docstring": "Safe assignment to a numpy array, sparse matrix, or pandas dataframe.\n\n Parameters\n ----------\n X : {ndarray, sparse-matrix, dataframe}\n Array to be modified. It is expected to be 2-dimensional.\n\n values : ndarray\n The values to be assigned to `X`.\n\n row_indexer : array-like, dtype={int, bool}, default=None\n A 1-dimensional array to select the rows of interest. If `None`, all\n rows are selected.\n\n column_indexer : array-like, dtype={int, bool}, default=None\n A 1-dimensional array to select the columns of interest. If `None`, all\n columns are selected.\n ",
"language": "en",
"n_whitespaces": 143,
"n_words": 80,
"vocab_size": 50
} | def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
column_indexer = (
slice(None, None, None) if column_indexer is None else column_indexer
)
if hasattr(X, "iloc"): # pandas dataframe
X.iloc[row_indexer, column_indexer] = values
else: # numpy array or sparse matrix
X[row_indexer, column_indexer] = values
|
|
55,690 | 219,662 | 31 | python3.10.4/Lib/_pydecimal.py | 10 | 6 | def copy_sign(self, a, b):
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
| add python 3.10.4 for windows | copy_sign | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _pydecimal.py | 9 | 3 | https://github.com/XX-net/XX-Net.git | 1 | 27 | 0 | 10 | 43 | Python | {
"docstring": "Copies the second operand's sign to the first one.\n\n In detail, it returns a copy of the first operand with the sign\n equal to the sign of the second operand.\n\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(1, -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(Decimal(1), -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(1, Decimal(-2))\n Decimal('-1')\n ",
"language": "en",
"n_whitespaces": 179,
"n_words": 60,
"vocab_size": 32
} | def copy_sign(self, a, b):
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
|
|
15,591 | 70,979 | 319 | wagtail/contrib/forms/views.py | 82 | 21 | def get_validated_ordering(self):
orderable_fields = self.orderable_fields or ()
| Fix warnings from flake8-comprehensions. | get_validated_ordering | de3fcba9e95818e9634ab7de6bfcb1f4221f2775 | wagtail | views.py | 16 | 20 | https://github.com/wagtail/wagtail.git | 11 | 122 | 0 | 58 | 205 | Python | {
"docstring": " Return a dict of field names with ordering labels if ordering is valid ",
"language": "en",
"n_whitespaces": 14,
"n_words": 13,
"vocab_size": 12
} | def get_validated_ordering(self):
orderable_fields = self.orderable_fields or ()
ordering = {}
if self.is_export:
# Revert to CSV order_by submit_time ascending for backwards compatibility
default_ordering = self.ordering_csv or ()
else:
default_ordering = self.ordering or ()
if isinstance(default_ordering, str):
default_ordering = (default_ordering,)
ordering_strs = self.request.GET.getlist('order_by') or list(default_ordering)
for order in ordering_strs:
try:
_, prefix, field_name = order.rpartition('-')
if field_name in orderable_fields:
ordering[field_name] = (
prefix, 'descending' if prefix == '-' else 'ascending'
)
except (IndexError, ValueError):
continue # invalid ordering specified, skip it
return ordering
|
|
@pytest.mark.parametrize("p", (3, 5, 7, 11, 13)) | 42,333 | 177,309 | 83 | networkx/generators/tests/test_expanders.py | 48 | 9 | def test_chordal_cycle_graph(p):
G = nx.chordal_cycle_graph(p)
assert len(G) == p
# TODO The second largest eigenvalue should be smaller than a constant,
# independent of the number of nodes in the graph:
#
# eigs = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(G).toarray()))
# assert_less( | Minor updates to expanders generator tests (#6027)
* Split MGG test into two based on dependencies.
* Parametrize tests on prime numbers.
* Use fns from nx namespace, rm explicit imports.
* Parametrize exception test and check message. | test_chordal_cycle_graph | 06dc63c62822a56d3a8ed36c65630298d8954cff | networkx | test_expanders.py | 8 | 3 | https://github.com/networkx/networkx.git | 1 | 21 | 1 | 39 | 74 | Python | {
"docstring": "Test for the :func:`networkx.chordal_cycle_graph` function.",
"language": "en",
"n_whitespaces": 4,
"n_words": 5,
"vocab_size": 5
} | def test_chordal_cycle_graph(p):
G = nx.chordal_cycle_graph(p)
assert len(G) == p
# TODO The second largest eigenvalue should be smaller than a constant,
# independent of the number of nodes in the graph:
#
# eigs = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(G).toarray()))
# assert_less(eigs[-2], ...)
#
@pytest.mark.parametrize("p", (3, 5, 7, 11, 13)) # Primes |
79,938 | 269,171 | 674 | keras/utils/dataset_utils.py | 278 | 12 | def convert_dataset_split_sizes(left_size,right_size,total_size):
left_size_type = type(left_size)
right_size_type = type(right_size)
if left_size is not None and left_size_type not in [int,float]:
raise ValueError(f'Invalid `left_size` type Got {left_size_type}'
'It should be one of float,int or None')
if right_size is not None and right_size_type not in [int,float]:
raise ValueError(f'Invalid `right_size` type Got {right_size_type}'
'It should be one of float,int or None')
if (left_size_type == int
and (left_size <= 0 or left | fixes dataset slicing errors | convert_dataset_split_sizes | a127de7007fe49413bd9167e179f5df12b6c100e | keras | dataset_utils.py | 13 | 51 | https://github.com/keras-team/keras.git | 25 | 278 | 0 | 115 | 496 | Python | {
"docstring": "Helper function to convert left_size/right_size relative to dataset's size\n ",
"language": "en",
"n_whitespaces": 11,
"n_words": 9,
"vocab_size": 8
} | def convert_dataset_split_sizes(left_size,right_size,total_size):
left_size_type = type(left_size)
right_size_type = type(right_size)
if left_size is not None and left_size_type not in [int,float]:
raise ValueError(f'Invalid `left_size` type Got {left_size_type}'
'It should be one of float,int or None')
if right_size is not None and right_size_type not in [int,float]:
raise ValueError(f'Invalid `right_size` type Got {right_size_type}'
'It should be one of float,int or None')
if (left_size_type == int
and (left_size <= 0 or left_size>= total_size)
or left_size_type == float
and (left_size <= 0 or left_size>= 1) ):
raise ValueError('`left_size` should be either a positive integer'
f'and smaller than {total_size} or a float '
'within the range `[0, 1]`')
if (right_size_type == int
and (right_size <= 0 or right_size>= total_size)
or right_size_type == float
and (right_size <= 0 or right_size>= 1)):
raise ValueError('`right_size` should be either a positive integer '
f'and smaller than {total_size} or'
'a float within the range `[0, 1]`')
if right_size_type == left_size_type == float and right_size + left_size > 1:
raise ValueError('sum of `left_size` and `right_size`'
' should be within `[0,1]`'
f'Got {right_size + left_size} ,'
'reduce the `left_size` or `right_size`')
if left_size_type == float:
left_size = math.ceil(left_size*total_size)
else:
left_size = float(left_size)
if right_size_type == float:
right_size = math.ceil(right_size*total_size)
else:
right_size = float(right_size)
if left_size is None:
left_size = total_size - right_size
elif right_size is None:
right_size = total_size - left_size
if left_size + right_size > total_size:
raise ValueError('The sum of `left_size` and `right_size`'
f' should be smaller than the samples {total_size} '
' reduce `left_size` or `right_size` ' )
if left_size == 0:
raise ValueError(f'with dataset of length={total_size}'
'`left_size`={left_size} and `right_size`={right_size} '
'resulting left dataset split will be empty, '
'adjust any of the aforementioned parameters')
left_size,right_size = int(left_size) ,int(right_size)
return left_size,right_size
|
|
17,116 | 80,945 | 18 | awx/main/managers.py | 4 | 10 | def active_count(self):
return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count()
| Fixes case sensitive host count | active_count | f52ef6e9677b01c111b012a8725da43a2580d8f1 | awx | managers.py | 15 | 2 | https://github.com/ansible/awx.git | 1 | 37 | 0 | 4 | 68 | Python | {
"docstring": "Return count of active, unique hosts for licensing.\n Construction of query involves:\n - remove any ordering specified in model's Meta\n - Exclude hosts sourced from another Tower\n - Restrict the query to only return the name column\n - Only consider results that are unique\n - Return the count of this query\n ",
"language": "en",
"n_whitespaces": 105,
"n_words": 51,
"vocab_size": 37
} | def active_count(self):
return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count()
|
|
73,183 | 249,886 | 80 | tests/handlers/test_sso.py | 27 | 9 | async def test_set_avatar_incorrect_mime_type(self) -> None:
handler = self.hs.get_sso_handler()
# any random user works since image check is supposed to fail
us | Add support for handling avatar with SSO login (#13917)
This commit adds support for handling a provided avatar picture URL
when logging in via SSO.
Signed-off-by: Ashish Kumar <ashfame@users.noreply.github.com>
Fixes #9357. | test_set_avatar_incorrect_mime_type | 09de2aecb05cb46e0513396e2675b24c8beedb68 | synapse | test_sso.py | 12 | 7 | https://github.com/matrix-org/synapse.git | 1 | 38 | 0 | 26 | 70 | Python | {
"docstring": "Tests that saving an avatar fails when its mime type is not allowed",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | async def test_set_avatar_incorrect_mime_type(self) -> None:
handler = self.hs.get_sso_handler()
# any random user works since image check is supposed to fail
user_id = "@sso-user:test"
self.assertFalse(
self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
)
|
|
81,585 | 276,201 | 118 | keras/saving/saved_model/utils.py | 35 | 10 | def layer_uses_training_bool(layer):
if layer._expects_training_arg: # pylint: disable=protected-access
return True
visited = {layer}
to_visit = list_all_layers(la | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | layer_uses_training_bool | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | utils.py | 11 | 14 | https://github.com/keras-team/keras.git | 5 | 69 | 0 | 27 | 117 | Python | {
"docstring": "Returns whether this layer or any of its children uses the training arg.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def layer_uses_training_bool(layer):
if layer._expects_training_arg: # pylint: disable=protected-access
return True
visited = {layer}
to_visit = list_all_layers(layer)
while to_visit:
layer = to_visit.pop()
if layer in visited:
continue
if getattr(layer, "_expects_training_arg", True):
return True
visited.add(layer)
to_visit.extend(list_all_layers(layer))
return False
|
|
# This is basically test_edgeql_for_in_computable_01 but with
# a WITH binding in front of the whole shape
await self.assert_query_result(
r'''
WITH
U := (
SELECT User {
select_deck := (
FOR letter IN {'I', 'B'}
UNION (
SELECT User.deck {User | 41,674 | 176,084 | 54 | tests/test_edgeql_for.py | 23 | 8 | async def test_edgeql_for_in_computable_09(self):
# This | Add a `bag` type that tells assert_query_result to ignore order (#3314)
assert_query_result currently supports using sets to ignore order,
but that doesn't work for objects, which can't be hashed or sorted.
There is a system for specifying a sort key for internal data, but it
is way clunkier than just saying we don't care about the order.
I converted some places that were using sort= to use this. | test_edgeql_for_in_computable_09 | 26be7d28bdb4eb96c888e373e08f46e6b85711e3 | edgedb | test_edgeql_for.py | 6 | 30 | https://github.com/edgedb/edgedb.git | 1 | 48 | 2 | 22 | 34 | Python | {
"docstring": "\n WITH\n U := (\n SELECT User {\n select_deck := (\n FOR letter IN {'I', 'B'}\n UNION (\n SELECT User.deck {\n name,\n # just define an ad-hoc link prop",
"language": "en",
"n_whitespaces": 287,
"n_words": 28,
"vocab_size": 23
} | async def test_edgeql_for_in_computable_09(self):
# This is basically test_edgeql_for_in_computable_01 but with
# a WITH binding in front of the whole shape
await self.assert_query_result(
r |
13,815 | 65,173 | 33 | erpnext/accounts/report/budget_variance_report/budget_variance_report.py | 52 | 26 | def get_actual_details(name, filters):
budget_against = frappe.scrub(filters.get("budget_against"))
cond = ""
if filters.get("budget_against") == "Cost Center":
cc_lft, cc_rgt = frappe.db.get_value("Cost Center", name, ["lft", "rgt"])
cond = .format(
lft=cc_lft, rgt=cc_rgt
)
ac_details = frappe.db.sql(
.format(
tab=filters.budget_against, budget_against=budget_against, cond=cond
),
(filters.from_fiscal_year, filters.to_fiscal_year, name),
as_dict=1,
)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.account, []).append(d)
return cc_ac | style: format code with black | get_actual_details | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | budget_variance_report.py | 12 | 53 | https://github.com/frappe/erpnext.git | 3 | 138 | 0 | 43 | 223 | Python | {
"docstring": "\n\t\t\t\tand lft >= \"{lft}\"\n\t\t\t\tand rgt <= \"{rgt}\"\n\t\t\t\n\t\t\tselect\n\t\t\t\tgl.account,\n\t\t\t\tgl.debit,\n\t\t\t\tgl.credit,\n\t\t\t\tgl.fiscal_year,\n\t\t\t\tMONTHNAME(gl.posting_date) as month_name,\n\t\t\t\tb.{budget_against} as budget_against\n\t\t\tfrom\n\t\t\t\t`tabGL Entry` gl,\n\t\t\t\t`tabBudget Account` ba,\n\t\t\t\t`tabBudget` b\n\t\t\twhere\n\t\t\t\tb.name = ba.parent\n\t\t\t\tand b.docstatus = 1\n\t\t\t\tand ba.account=gl.account\n\t\t\t\tand b.{budget_against} = gl.{budget_against}\n\t\t\t\tand gl.fiscal_year between %s and %s\n\t\t\t\tand b.{budget_against} = %s\n\t\t\t\tand exists(\n\t\t\t\t\tselect\n\t\t\t\t\t\tname\n\t\t\t\t\tfrom\n\t\t\t\t\t\t`tab{tab}`\n\t\t\t\t\twhere\n\t\t\t\t\t\tname = gl.{budget_against}\n\t\t\t\t\t\t{cond}\n\t\t\t\t)\n\t\t\t\tgroup by\n\t\t\t\t\tgl.name\n\t\t\t\torder by gl.fiscal_year\n\t\t",
"language": "en",
"n_whitespaces": 38,
"n_words": 70,
"vocab_size": 46
} | def get_actual_details(name, filters):
budget_against = frappe.scrub(filters.get("budget_against"))
cond = ""
if filters.get("budget_against") == "Cost Center":
cc_lft, cc_rgt = frappe.db.get_value("Cost Center", name, ["lft", "rgt"])
cond = .format(
lft=cc_lft, rgt=cc_rgt
)
ac_details = frappe.db.sql(
.format(
tab=filters.budget_against, budget_against=budget_against, cond=cond
),
(filters.from_fiscal_year, filters.to_fiscal_year, name),
as_dict=1,
)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.account, []).append(d)
return cc_actual_details
|
|
4,477 | 22,864 | 68 | VoiceAssistant/Project_Basic_struct/textRead.py | 28 | 8 | def print_index(toc):
dash = "-"*(100 - 7)
spa | VoiceAssistant
This is Voice Assistant coded using Python which can do the following: -
1. Speak Text entered by User.
2. Search anything on Google.
3. Search anything on Wikipedia.
4. Read an MS Word(docx) document.
5. Read a book(PDF).
6. Can be used as a Dictator. | print_index | 39c49e07066b2a53e176d555af6a7bf8aabb8a9c | Python | textRead.py | 14 | 8 | https://github.com/geekcomputers/Python.git | 2 | 55 | 0 | 24 | 131 | Python | {
"docstring": "Prints out the index in proper format with title name and page number\r\n\r\n Args:\r\n toc (nested list): toc[1] - Topic name\r\n toc[2] - Page number\r\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 25,
"vocab_size": 22
} | def print_index(toc):
dash = "-"*(100 - 7)
space = " "*47
print(f"{space}INDEX")
print(f"\n\nName : {dash} PageNo.\n\n\n")
for topic in toc:
eq_dash = "-"*(100 - len(topic[1]))
print(f"{topic[1]} {eq_dash} {topic[2]}")
|
|
22,463 | 106,836 | 304 | py/visdom/__init__.py | 63 | 18 | def pie(self, X, win=None, env=None, opts=None):
X = np.squeeze(X)
assert X.ndim == 1, "X should be one-dimensional"
assert np.all(np.greater_equal(X, 0)), "X cannot contain negative values"
opts = {} if opts is None else opts
_title2str(opts)
_assert_opts(opts)
data = [
| apply black py to all python files | pie | 5b8b7f267cfaf76a2a39a727ef31a62b3909a093 | visdom | __init__.py | 12 | 23 | https://github.com/fossasia/visdom.git | 2 | 128 | 0 | 55 | 213 | Python | {
"docstring": "\n This function draws a pie chart based on the `N` tensor `X`.\n\n The following `opts` are supported:\n\n - `opts.legend`: `list` containing legend names\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 23,
"vocab_size": 23
} | def pie(self, X, win=None, env=None, opts=None):
X = np.squeeze(X)
assert X.ndim == 1, "X should be one-dimensional"
assert np.all(np.greater_equal(X, 0)), "X cannot contain negative values"
opts = {} if opts is None else opts
_title2str(opts)
_assert_opts(opts)
data = [
{
"values": X.tolist(),
"labels": opts.get("legend"),
"type": "pie",
}
]
return self._send(
{
"data": data,
"win": win,
"eid": env,
"layout": _opts2layout(opts),
"opts": opts,
}
)
|
|
9,046 | 46,963 | 30 | airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py | 9 | 10 | def dry_run(self) -> None:
pod = self.build_pod_request_obj() | Cleanup dup code now that k8s provider requires 2.3.0+ (#22845) | dry_run | 04082ac091e92587b22c8323170ebe38bc68a19a | airflow | kubernetes_pod.py | 13 | 8 | https://github.com/apache/airflow.git | 1 | 35 | 0 | 9 | 62 | Python | {
"docstring": "\n Prints out the pod definition that would be created by this operator.\n Does not include labels specific to the task instance (since there isn't\n one in a dry_run) and excludes all empty elements.\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 33,
"vocab_size": 32
} | def dry_run(self) -> None:
pod = self.build_pod_request_obj()
print(yaml.dump(prune_dict(pod.to_dict(), mode='strict')))
|
|
48,111 | 196,693 | 20 | sympy/stats/crv_types.py | 17 | 7 | def ExponentialPower(name, mu, alpha, beta):
r
return rv(name, ExponentialPowerDistribution, (mu, alpha, beta))
#-------------------------------------------------------------------------------
# F distribution ---------------------------- | Documentation cleanup 5 | ExponentialPower | 9ad8ab9fe58051cf11626ba6654852fcfec60147 | sympy | crv_types.py | 8 | 63 | https://github.com/sympy/sympy.git | 1 | 28 | 0 | 16 | 40 | Python | {
"docstring": "\n Create a Continuous Random Variable with Exponential Power distribution.\n This distribution is known also as Generalized Normal\n distribution version 1.\n\n Explanation\n ===========\n\n The density of the Exponential Power distribution is given by\n\n .. math::\n f(x) := \\frac{\\beta}{2\\alpha\\Gamma(\\frac{1}{\\beta})}\n e^{{-(\\frac{|x - \\mu|}{\\alpha})^{\\beta}}}\n\n with :math:`x \\in [ - \\infty, \\infty ]`.\n\n Parameters\n ==========\n\n mu : Real number\n A location.\n alpha : Real number,`\\alpha > 0`\n A scale.\n beta : Real number, `\\beta > 0`\n A shape.\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import ExponentialPower, density, cdf\n >>> from sympy import Symbol, pprint\n >>> z = Symbol(\"z\")\n >>> mu = Symbol(\"mu\")\n >>> alpha = Symbol(\"alpha\", positive=True)\n >>> beta = Symbol(\"beta\", positive=True)\n >>> X = ExponentialPower(\"x\", mu, alpha, beta)\n >>> pprint(density(X)(z), use_unicode=False)\n beta\n /|mu - z|\\\n -|--------|\n \\ alpha /\n beta*e\n ---------------------\n / 1 \\\n 2*alpha*Gamma|----|\n \\beta/\n >>> cdf(X)(z)\n 1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta))\n\n References\n ==========\n\n .. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html\n .. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1\n\n ",
"language": "en",
"n_whitespaces": 387,
"n_words": 152,
"vocab_size": 109
} | def ExponentialPower(name, mu, alpha, beta):
r
return rv(name, ExponentialPowerDistribution, (mu, alpha, beta))
#-------------------------------------------------------------------------------
# F distribution ---------------------------------------------------------------
|
|
1,559 | 9,135 | 211 | parsing/dml_csr/utils/miou.py | 99 | 8 | def get_palette(num_cls):
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
| Create miou.py | get_palette | 995b44897fe6158bb70ad03a3e79f517f65f9034 | insightface | miou.py | 16 | 16 | https://github.com/deepinsight/insightface.git | 3 | 161 | 0 | 41 | 239 | Python | {
"docstring": " Returns the color map for visualizing the segmentation mask.\n Args:\n num_cls: Number of classes\n Returns:\n The color map\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 18,
"vocab_size": 15
} | def get_palette(num_cls):
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
|
|
12,067 | 60,287 | 348 | code/deep/BJMMD/caffe/python/caffe/pycaffe.py | 144 | 37 | def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: | Balanced joint maximum mean discrepancy for deep transfer learning | _Net_forward_backward_all | cc4d0564756ca067516f71718a3d135996525909 | transferlearning | pycaffe.py | 14 | 23 | https://github.com/jindongwang/transferlearning.git | 15 | 326 | 0 | 90 | 500 | Python | {
"docstring": "\n Run net forward + backward in batches.\n\n Parameters\n ----------\n blobs: list of blobs to extract as in forward()\n diffs: list of diffs to extract as in backward()\n kwargs: Keys are input (for forward) and output (for backward) blob names\n and values are ndarrays. Refer to forward() and backward().\n Prefilled variants are called for lack of input or output blobs.\n\n Returns\n -------\n all_blobs: {blob name: blob ndarray} dict.\n all_diffs: {blob name: diff ndarray} dict.\n ",
"language": "en",
"n_whitespaces": 129,
"n_words": 73,
"vocab_size": 51
} | def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in six.iteritems(batch_blobs):
all_outs[out].extend(out_blobs.copy())
for diff, out_diffs in six.iteritems(batch_diffs):
all_diffs[diff].extend(out_diffs.copy())
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
|
|
72,737 | 249,233 | 94 | tests/rest/admin/test_device.py | 19 | 13 | def test_user_does_not_exist(self) -> None:
url = "/_synapse/admin/v2/users/@unknown_person:test/devices"
channe | Use literals in place of `HTTPStatus` constants in tests (#13479)
Replace
- `HTTPStatus.NOT_FOUND`
- `HTTPStatus.FORBIDDEN`
- `HTTPStatus.UNAUTHORIZED`
- `HTTPStatus.CONFLICT`
- `HTTPStatus.CREATED`
Signed-off-by: Dirk Klimpel <dirk@klimpel.org> | test_user_does_not_exist | 1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b | synapse | test_device.py | 10 | 12 | https://github.com/matrix-org/synapse.git | 1 | 59 | 0 | 18 | 96 | Python | {
"docstring": "\n Tests that a lookup for a user that does not exist returns a 404\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 11
} | def test_user_does_not_exist(self) -> None:
url = "/_synapse/admin/v2/users/@unknown_person:test/devices"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
|
|
71,133 | 246,289 | 364 | synapse/replication/tcp/protocol.py | 66 | 20 | def send_ping(self) -> None:
now = self.clock.time_msec()
if self.time_we_closed:
if now - self.time_we_closed > PING_TIMEOUT_MS:
logger.info(
"[%s] Failed to close connection gracefully, aborting", self.id()
)
assert self.transport is not None
self | Add missing type hints to synapse.replication. (#11938) | send_ping | d0e78af35e519ff76bd23e786007f3e7130d90f7 | synapse | protocol.py | 16 | 25 | https://github.com/matrix-org/synapse.git | 6 | 120 | 0 | 52 | 199 | Python | {
"docstring": "Periodically sends a ping and checks if we should close the connection\n due to the other side timing out.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 19,
"vocab_size": 18
} | def send_ping(self) -> None:
now = self.clock.time_msec()
if self.time_we_closed:
if now - self.time_we_closed > PING_TIMEOUT_MS:
logger.info(
"[%s] Failed to close connection gracefully, aborting", self.id()
)
assert self.transport is not None
self.transport.abortConnection()
else:
if now - self.last_sent_command >= PING_TIME:
self.send_command(PingCommand(str(now)))
if (
self.received_ping
and now - self.last_received_command > PING_TIMEOUT_MS
):
logger.info(
"[%s] Connection hasn't received command in %r ms. Closing.",
self.id(),
now - self.last_received_command,
)
self.send_error("ping timeout")
|
|
31,619 | 139,165 | 86 | python/ray/workflow/workflow_context.py | 27 | 13 | def workflow_logging_context(job_id) -> None:
node = ray.worker._global_node
original_out_file, original_err_file = node.get_log_file_handles(
get_worker_log_file_name("WORKER")
)
out_file, err_file = node.get_log_file_handles(
get_worker_log_file_name("WORKER", job_id)
)
try:
| [Workflow]Make workflow logs publish to the correct driver. (#24089)
All workflow tasks are executed as remote functions that submitted from WorkflowManagmentActor. WorkflowManagmentActor is a detached long-running actor whose owner is the first driver in the cluster that runs the very first workflow execution. Therefore, for new drivers that run workflows, the loggings won't be properly published back to the driver because loggings are saved and published based on job_id and the job_id is always the first driver's job_id as the ownership goes like: first_driver -> WorkflowManagmentActor -> workflow executions using remote functions.
To solve this, during workflow execution, we pass the actual driver's job_id along with execution, and re-configure the logging files on each worker that runs the remote functions. Notice that we need to do this in multiple places as a workflow task is executed with more than one remote functions that are running in different workers. | workflow_logging_context | e8fc66af348f2afd2b578fe1c6776cc88ea82499 | ray | workflow_context.py | 11 | 27 | https://github.com/ray-project/ray.git | 2 | 60 | 0 | 23 | 104 | Python | {
"docstring": "Initialize the workflow logging context.\n\n Workflow executions are running as remote functions from\n WorkflowManagementActor. Without logging redirection, workflow\n inner execution logs will be pushed to the driver that initially\n created WorkflowManagementActor rather than the driver that\n actually submits the current workflow execution.\n We use this conext manager to re-configure the log files to send\n the logs to the correct driver, and to restore the log files once\n the execution is done.\n\n Args:\n job_id: The ID of the job that submits the workflow execution.\n ",
"language": "en",
"n_whitespaces": 120,
"n_words": 83,
"vocab_size": 56
} | def workflow_logging_context(job_id) -> None:
node = ray.worker._global_node
original_out_file, original_err_file = node.get_log_file_handles(
get_worker_log_file_name("WORKER")
)
out_file, err_file = node.get_log_file_handles(
get_worker_log_file_name("WORKER", job_id)
)
try:
configure_log_file(out_file, err_file)
yield
finally:
configure_log_file(original_out_file, original_err_file)
|
|
5,455 | 30,270 | 87 | spotdl/console/entry_point.py | 25 | 14 | def console_entry_point():
if "--profile" in sys.argv:
with cProfile.Profile() as profile:
entry_point()
stats = pstats.Stats(profile)
stats.sort_stats(pstats.SortKey.TIME)
| added option to profile code
fized pylint warnings | console_entry_point | cf9030f843079d3f69cd1414050f8b594c84cee1 | spotify-downloader | entry_point.py | 12 | 9 | https://github.com/spotDL/spotify-downloader.git | 2 | 53 | 0 | 24 | 101 | Python | {
"docstring": "\n Wrapper around `entry_point` so we can profile the code\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | def console_entry_point():
if "--profile" in sys.argv:
with cProfile.Profile() as profile:
entry_point()
stats = pstats.Stats(profile)
stats.sort_stats(pstats.SortKey.TIME)
# Use snakeviz to visualize the profile
stats.dump_stats("spotdl.profile")
else:
entry_point()
|
|
34,012 | 147,576 | 31 | rllib/agents/trainer_config.py | 10 | 3 | def callbacks(self, callbacks_class) -> "TrainerConfig":
self.callbacks_class = callbacks_c | [RLlib] POC: Config objects instead of dicts (PPO only). (#23491) | callbacks | 2eaa54bd763ae0e63158ae0d939633c804394b78 | ray | trainer_config.py | 7 | 14 | https://github.com/ray-project/ray.git | 1 | 17 | 0 | 10 | 31 | Python | {
"docstring": "Sets the callbacks configuration.\n\n Args:\n callbacks_class: Callbacks class, whose methods will be run during\n various phases of training and environment sample collection.\n See the `DefaultCallbacks` class and\n `examples/custom_metrics_and_callbacks.py` for more usage information.\n\n Returns:\n This updated TrainerConfig object.\n ",
"language": "en",
"n_whitespaces": 125,
"n_words": 37,
"vocab_size": 35
} | def callbacks(self, callbacks_class) -> "TrainerConfig":
self.callbacks_class = callbacks_class
return self
|
|
18,249 | 87,194 | 184 | tests/sentry/api/endpoints/test_project_details.py | 30 | 16 | def test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases(self):
self.project.update_option("sentry:dynamic_sampling", self.dynamic_sampling_data)
with Feature(
{
self.universal_ds_flag: True,
self.old_ds_flag: True,
self.new_ds_flag: True,
}
):
response = self.get_success_response(
self.organization.slug, self.project.slug, method="get"
| feat(ds): Support new DS behaviour in project_details endpoint (#40387)
Supports new adaptive dynamic sampling behaviour alongside
the deprecated dynamic sampling behaviour and achieves that
through feature flag differentiation
This PR achieve that through the following:
- Introducing a new `DynamicSamplingBiasSerializer` which is composed of
id representing the bias name and a boolean flag indicating whether that
particular flag is active or not
- Modifies current existing behavior for both old sampling flag and new
sampling flag. Essentially the new setup entails that to be on the old
dynamic sampling, the following flags need to be enabled
"organizations:server-side-sampling" and
"organizations:server-side-sampling-ui", and to be on the new dynamic
sampling configurations, you need the following flags to be enabled
"organizations:dynamic-sampling-basic" and
"organizations:server-side-sampling"
P.S. 1: These flags will be replaced
"organizations:server-side-sampling-ui" ->
"organizations:dynamic-sampling-deprecated"
"organizations:server-side-sampling-basic" ->
"organizations:dynamic-sampling"
Hence, these feature flags need to be updated once this PR lands
https://github.com/getsentry/sentry/pull/40388
P.S. 2: If a project is on the new plan and the old plan, the new plan
takes precedence
- Introduces default biases that are enabled by default and can be
overwritten. The motivation to do this is to be able to add new biases
that are enabled by default, and both the GET and PUT request honor this
list
- `GET` and `POST` endpoint does a dictionary update of user's stored
biases on the default biases that are hardcoded, and returns them to the
UI/ relay. This means that the introduced project option
"sentry:dynamic_sampling_biases" might not have all the toggles
enabled/disabled through the UI but only the ones that a customer chose
to modify
Followup:
- This new feature flag behaviour needs to be reflected in ProjectConfig
computations | test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases | 5462ee11ad11ebb9a50323befcd286816d7898c8 | sentry | test_project_details.py | 12 | 14 | https://github.com/getsentry/sentry.git | 1 | 83 | 0 | 27 | 135 | Python | {
"docstring": "\n Tests the case when an organization was in EA/LA and has setup previously Dynamic Sampling rules,\n and now they have migrated to an AM2 plan, but haven't manipulated the bias toggles yet so they get the\n default biases. This also ensures that they no longer receive the deprecated dynamic sampling rules.\n ",
"language": "en",
"n_whitespaces": 80,
"n_words": 51,
"vocab_size": 44
} | def test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases(self):
self.project.update_option("sentry:dynamic_sampling", self.dynamic_sampling_data)
with Feature(
{
self.universal_ds_flag: True,
self.old_ds_flag: True,
self.new_ds_flag: True,
}
):
response = self.get_success_response(
self.organization.slug, self.project.slug, method="get"
)
assert response.data["dynamicSampling"] is None
assert response.data["dynamicSamplingBiases"] == DEFAULT_BIASES
|
|
12,450 | 61,225 | 77 | .venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py | 38 | 4 | def strtobool(val):
# type: (str) -> int
| upd; format | strtobool | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | misc.py | 12 | 8 | https://github.com/jindongwang/transferlearning.git | 3 | 59 | 0 | 34 | 117 | Python | {
"docstring": "Convert a string representation of truth to true (1) or false (0).\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 39,
"vocab_size": 35
} | def strtobool(val):
# type: (str) -> int
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {val!r}")
|
|
23,210 | 108,482 | 59 | lib/matplotlib/artist.py | 20 | 7 | def convert_xunits(self, x):
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
| Update artist.py (#23150) | convert_xunits | 3df958c760dbde3a6c576fefa7827a136385b5c3 | matplotlib | artist.py | 9 | 5 | https://github.com/matplotlib/matplotlib.git | 3 | 40 | 0 | 17 | 65 | Python | {
"docstring": "\n Convert *x* using the unit type of the xaxis.\n\n If the artist is not contained in an Axes or if the xaxis does not\n have units, *x* itself is returned.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 30,
"vocab_size": 24
} | def convert_xunits(self, x):
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
return x
return ax.xaxis.convert_units(x)
|
|
53,206 | 212,222 | 204 | bokeh/models/widgets/sliders.py | 81 | 26 | def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None:
| Add DatetimeRangeSlider (#12034)
* Add DatetimeRangeSlider
* Add tests
* Add docs | value_as_datetime | c9751009161f092b2e403d8cccccf5252c0dce1a | bokeh | sliders.py | 11 | 16 | https://github.com/bokeh/bokeh.git | 4 | 87 | 0 | 49 | 267 | Python | {
"docstring": " Convenience property to retrieve the value tuple as a tuple of\n datetime objects.\n \n Initial or selected range.\n \n Initial or selected value, throttled to report only on mouseup.\n \n The minimum allowable value.\n \n The maximum allowable value.\n \n The step between consecutive values, in units of milliseconds.\n Default is one hour.\n ",
"language": "en",
"n_whitespaces": 101,
"n_words": 48,
"vocab_size": 38
} | def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None:
if self.value is None:
return None
v1, v2 = self.value
if isinstance(v1, numbers.Number):
d1 = datetime.utcfromtimestamp(v1 / 1000)
else:
d1 = v1
if isinstance(v2, numbers.Number):
d2 = datetime.utcfromtimestamp(v2 / 1000)
else:
d2 = v2
return d1, d2
value = NonNullable(Tuple(Datetime, Datetime), help=)
value_throttled = Readonly(NonNullable(Tuple(Datetime, Datetime)), help=)
start = NonNullable(Datetime, help=)
end = NonNullable(Datetime, help=)
step = Int(default=3_600_000, help=)
format = Override(default="%d %b %Y %H:%M:%S")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
18,579 | 89,862 | 887 | tests/sentry/receivers/test_onboarding.py | 88 | 23 | def test_first_event_with_minified_stack_trace_received(self, record_analytics):
now = timezone.now()
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=type(project))
url = "http://localhost:3000"
data = load_data("javascript")
data["tags"] = [("url", url)]
data["exception"] = {
"values": [
{
**data["exception"]["values"][0],
"raw_stacktrace": {
"frames": [
{
"function": "o",
"filename": "/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"abs_path": "https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"lineno": 2,
"colno": 37098,
| ref(onboarding): Add function to record first event per project with min stack trace -(#42208) | test_first_event_with_minified_stack_trace_received | ce841204ef3b20d0f6ac812ebb06aebbc63547ac | sentry | test_onboarding.py | 18 | 45 | https://github.com/getsentry/sentry.git | 1 | 198 | 0 | 70 | 339 | Python | {
"docstring": "\n Test that an analytics event is recorded when\n a first event with minified stack trace is received\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 17,
"vocab_size": 15
} | def test_first_event_with_minified_stack_trace_received(self, record_analytics):
now = timezone.now()
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=type(project))
url = "http://localhost:3000"
data = load_data("javascript")
data["tags"] = [("url", url)]
data["exception"] = {
"values": [
{
**data["exception"]["values"][0],
"raw_stacktrace": {
"frames": [
{
"function": "o",
"filename": "/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"abs_path": "https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"lineno": 2,
"colno": 37098,
"pre_context": [
"/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}"
],
"context_line": "{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}",
"post_context": [
"//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}"
],
"in_app": False,
},
],
},
}
]
}
self.store_event(
project_id=project.id,
data=data,
)
record_analytics.assert_called_with(
"first_event_with_minified_stack_trace_for_project.sent",
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform=data["platform"],
url=url,
)
|
|
36,854 | 157,103 | 25 | dask/array/backends.py | 11 | 8 | def arange(start, /, stop=None, step=1, *, dtype=None, meta=None, **kwargs):
raise NotImplementedError
| Backend library dispatching for IO in Dask-Array and Dask-DataFrame (#9475) | arange | c4d35f5515191409913827fd4faa3b69a3d7399a | dask | backends.py | 6 | 2 | https://github.com/dask/dask.git | 1 | 31 | 0 | 11 | 46 | Python | {
"docstring": "Create an ascending or descending array\n\n Returns evenly spaced values within the half-open interval\n ``[start, stop)`` as a one-dimensional array.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 20,
"vocab_size": 20
} | def arange(start, /, stop=None, step=1, *, dtype=None, meta=None, **kwargs):
raise NotImplementedError
|
|
32,341 | 141,365 | 35 | python/ray/tune/checkpoint_manager.py | 14 | 10 | def best_checkpoints(self):
checkpoints = sorted(self._top_persisted_checkpoints, key=lambda c: c.priority)
return [wrappe | [tune/train] Consolidate checkpoint manager 3: Ray Tune (#24430)
**Update**: This PR is now part 3 of a three PR group to consolidate the checkpoints.
1. Part 1 adds the common checkpoint management class #24771
2. Part 2 adds the integration for Ray Train #24772
3. This PR builds on #24772 and includes all changes. It moves the Ray Tune integration to use the new common checkpoint manager class.
Old PR description:
This PR consolidates the Ray Train and Tune checkpoint managers. These concepts previously did something very similar but in different modules. To simplify maintenance in the future, we've consolidated the common core.
- This PR keeps full compatibility with the previous interfaces and implementations. This means that for now, Train and Tune will have separate CheckpointManagers that both extend the common core
- This PR prepares Tune to move to a CheckpointStrategy object
- In follow-up PRs, we can further unify interfacing with the common core, possibly removing any train- or tune-specific adjustments (e.g. moving to setup on init rather on runtime for Ray Train)
Co-authored-by: Antoni Baum <antoni.baum@protonmail.com> | best_checkpoints | 8affbc7be6fdce169264b8db5b0276dbcc719f6d | ray | checkpoint_manager.py | 11 | 3 | https://github.com/ray-project/ray.git | 2 | 33 | 0 | 14 | 53 | Python | {
"docstring": "Returns best PERSISTENT checkpoints, sorted by score.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def best_checkpoints(self):
checkpoints = sorted(self._top_persisted_checkpoints, key=lambda c: c.priority)
return [wrapped.tracked_checkpoint for wrapped in checkpoints]
|
|
24,568 | 112,077 | 94 | nni/runtime/config.py | 44 | 15 | def get_config_directory() -> Path:
if os.getenv('NNI_CONFIG_DIR') is not None:
config_dir = Path(os.getenv('NNI_CONFIG_DIR')) # type: ignore
elif sys.prefix != sys.base_prefix or Path(sys.prefix, 'conda-meta').is_dir():
config_dir = Path(sys.prefix, 'nni')
elif sys.platform == 'win32':
config_dir = Path(os.environ['APPDATA'], 'nni')
else:
config_dir = Path.home() / '.config/nni'
c | Typehint and copyright header (#4669) | get_config_directory | 5136a86d11a3602b283bad15098335fc6f005ae0 | nni | config.py | 13 | 15 | https://github.com/microsoft/nni.git | 5 | 106 | 0 | 34 | 186 | Python | {
"docstring": "\n Get NNI config directory.\n Create it if not exist.\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 9,
"vocab_size": 9
} | def get_config_directory() -> Path:
if os.getenv('NNI_CONFIG_DIR') is not None:
config_dir = Path(os.getenv('NNI_CONFIG_DIR')) # type: ignore
elif sys.prefix != sys.base_prefix or Path(sys.prefix, 'conda-meta').is_dir():
config_dir = Path(sys.prefix, 'nni')
elif sys.platform == 'win32':
config_dir = Path(os.environ['APPDATA'], 'nni')
else:
config_dir = Path.home() / '.config/nni'
config_dir.mkdir(parents=True, exist_ok=True)
return config_dir
|
|
71,748 | 247,570 | 124 | tests/storage/test_background_update.py | 30 | 8 | def test_background_update_min_batch_set_in_config(self):
# a very long-running individual update
duration_ms = 50
self.get_success(
self.store.db_pool. | Add config settings for background update parameters (#11980) | test_background_update_min_batch_set_in_config | ef3619e61d84493d98470eb2a69131d15eb1166b | synapse | test_background_update.py | 13 | 19 | https://github.com/matrix-org/synapse.git | 1 | 103 | 0 | 24 | 71 | Python | {
"docstring": "\n Test that the minimum batch size set in the config is used\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def test_background_update_min_batch_set_in_config(self):
# a very long-running individual update
duration_ms = 50
self.get_success(
self.store.db_pool.simple_insert(
"background_updates",
values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
)
)
# Run the update with the long-running update item |
|
45,962 | 188,999 | 114 | psutil/_pswindows.py | 79 | 18 | def swap_memory():
mem = cext.virtual_mem()
total_phys = mem[0]
free_phys = mem[1]
total_system = mem[2]
free_system = mem[3]
# Despite the name PageFile refers to total system | Fix typos | swap_memory | 471b19d2aa799cd73bded23379e864dd35bec2b6 | psutil | _pswindows.py | 9 | 11 | https://github.com/giampaolo/psutil.git | 1 | 85 | 0 | 53 | 142 | Python | {
"docstring": "Swap system memory as a (total, used, free, sin, sout) tuple.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def swap_memory():
mem = cext.virtual_mem()
total_phys = mem[0]
free_phys = mem[1]
total_system = mem[2]
free_system = mem[3]
# Despite the name PageFile refers to total system memory here
# thus physical memory values need to be subtracted to get swap values
total = total_system - total_phys
free = min(total, free_system - free_phys)
used = total - free
percent = usage_percent(used, total, round_=1)
return _common.sswap(total, used, free, percent, 0, 0)
# =====================================================================
# --- disk
# =====================================================================
disk_io_counters = cext.disk_io_counters
|
|
13,832 | 65,243 | 9 | erpnext/accounts/report/general_ledger/general_ledger.py | 17 | 9 | def get_supplier_invoice_details():
inv_details = {}
for d in frappe.db.sql(
,
as_dict=1,
):
inv_details[d.name] = d.bill_no
return inv_details
| style: format code with black | get_supplier_invoice_details | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | general_ledger.py | 10 | 9 | https://github.com/frappe/erpnext.git | 2 | 37 | 0 | 15 | 59 | Python | {
"docstring": " select name, bill_no from `tabPurchase Invoice`\n\t\twhere docstatus = 1 and bill_no is not null and bill_no != '' ",
"language": "en",
"n_whitespaces": 19,
"n_words": 19,
"vocab_size": 16
} | def get_supplier_invoice_details():
inv_details = {}
for d in frappe.db.sql(
,
as_dict=1,
):
inv_details[d.name] = d.bill_no
return inv_details
|
|
52,665 | 209,387 | 58 | scapy/contrib/dce_rpc.py | 23 | 3 | def dce_rpc_endianess(pkt):
if pkt.endianness == 0: # big endian
return ">"
elif pkt.endianness == 1: # little endian
return "<"
| Add SPDX License identifiers (#3655)
* Add SPDX License identifiers
* Relicense `ldp.py` with author consent
See https://github.com/secdev/scapy/issues/3478
* Apply guedou suggestions
* Relicense someim under GPL2
* DCE/RPC licensing | dce_rpc_endianess | 9420c2229bf5330c2cc580f114f63f920a68db10 | scapy | dce_rpc.py | 9 | 7 | https://github.com/secdev/scapy.git | 3 | 28 | 0 | 17 | 56 | Python | {
"docstring": "Determine the right endianness sign for a given DCE/RPC packet",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def dce_rpc_endianess(pkt):
if pkt.endianness == 0: # big endian
return ">"
elif pkt.endianness == 1: # little endian
return "<"
else:
return "!"
|
|
78,298 | 266,117 | 79 | netbox/utilities/utils.py | 30 | 16 | def deserialize_object(model, fields, pk=None):
content_type = ContentType.objects.get_fo | Closes #10851: New staging mechanism (#10890)
* WIP
* Convert checkout() context manager to a class
* Misc cleanup
* Drop unique constraint from Change model
* Extend staging tests
* Misc cleanup
* Incorporate M2M changes
* Don't cancel wipe out creation records when an object is deleted
* Rename Change to StagedChange
* Add documentation for change staging | deserialize_object | a5308ea28e851a4ddb65a4e7ca2297b641e5891f | netbox | utils.py | 12 | 11 | https://github.com/netbox-community/netbox.git | 2 | 83 | 0 | 25 | 144 | Python | {
"docstring": "\n Instantiate an object from the given model and field data. Functions as\n the complement to serialize_object().\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 15
} | def deserialize_object(model, fields, pk=None):
content_type = ContentType.objects.get_for_model(model)
if 'custom_fields' in fields:
fields['custom_field_data'] = fields.pop('custom_fields')
data = {
'model': '.'.join(content_type.natural_key()),
'pk': pk,
'fields': fields,
}
instance = list(serializers.deserialize('python', [data]))[0]
return instance
|
|
108,848 | 310,161 | 45 | tests/test_setup.py | 23 | 12 | async def test_component_not_installed_if_requirement_fails(hass):
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["package==0.0.1"]))
with patch("homeassistant.util.package.install_package", return_value=False):
assert not await setup.async_ | Make setup tests async (#64456)
Co-authored-by: Franck Nijhof <git@frenck.dev> | test_component_not_installed_if_requirement_fails | 7d85c00b91cd989dfead3246a65eb297d27e935b | core | test_setup.py | 12 | 6 | https://github.com/home-assistant/core.git | 1 | 61 | 0 | 21 | 108 | Python | {
"docstring": "Component setup should fail if requirement can't install.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def test_component_not_installed_if_requirement_fails(hass):
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["package==0.0.1"]))
with patch("homeassistant.util.package.install_package", return_value=False):
assert not await setup.async_setup_component(hass, "comp", {})
assert "comp" not in hass.config.components
|
|
40,078 | 167,694 | 174 | pandas/core/config_init.py | 105 | 52 | def use_numba_cb(key) -> None:
from pandas.core.util import numba_
numba_.set_use_numba(cf.get_option(key))
with cf.config_prefix("compute"):
cf.register_option(
"use_bottleneck",
True,
use_bottleneck_doc,
validator=is_bool,
cb=use_bottleneck_cb,
)
cf.register_option(
"use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
)
cf.register_option(
"use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
)
#
# options from the "display" namespace
pc_precision_doc =
pc_colspace_doc =
pc_max_rows_doc =
pc_min_rows_doc =
pc_max_cols_doc =
pc_max_categories_doc =
pc_max_info_cols_doc =
pc_nb_repr_h_doc =
pc_pprint_nest_depth =
pc_multi_sparse_doc =
float_format_doc =
max_colwidth_doc =
colheader_justify_doc =
pc_expand_repr_doc =
pc_show_dimensions_doc =
pc_east_asian_width_doc =
pc_ambiguous_as_wide_doc =
pc_latex_repr_doc =
pc_table_schema_doc =
pc_html_border_doc =
pc_html_use_mathjax_doc =
pc_max_dir_items =
pc_width_doc =
pc_chop_threshold_doc =
pc_max_se | TYP: return values in core/*.py (#47587)
* TYP: return values in core/*.py
* fix test
* to_html
* to_html part 2
* DataFrame.query
* more overloads
* fix query?
* increase stacklevel by one
* fix rename_axis
* and an overload for DataFrame.eval
* address comments
* fix typevar | use_numba_cb | 9612375ca28ade056f15d4338f1bfde5d045c9fc | pandas | config_init.py | 9 | 3 | https://github.com/pandas-dev/pandas.git | 1 | 26 | 0 | 64 | 372 | Python | {
"docstring": "\n: int\n Floating point output precision in terms of number of places after the\n decimal, for regular formatting as well as scientific notation. Similar\n to ``precision`` in :meth:`numpy.set_printoptions`.\n\n: int\n Default space for DataFrame columns.\n\n: int\n If max_rows is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 and pandas will auto-detect\n the height of the terminal and print a truncated object which fits\n the screen height. The IPython notebook, IPython qtconsole, or\n IDLE do not run in a terminal and hence it is not possible to do\n correct auto-detection.\n\n: int\n The numbers of rows to show in a truncated view (when `max_rows` is\n exceeded). Ignored when `max_rows` is set to None or 0. When set to\n None, follows the value of `max_rows`.\n\n: int\n If max_cols is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 and pandas will auto-detect\n the width of the terminal and print a truncated object which fits\n the screen width. The IPython notebook, IPython qtconsole, or IDLE\n do not run in a terminal and hence it is not possible to do\n correct auto-detection.\n\n: int\n This sets the maximum number of categories pandas should output when\n printing out a `Categorical` or a Series of dtype \"category\".\n\n: int\n max_info_columns is used in DataFrame.info method to decide if\n per column information will be printed.\n\n: boolean\n When True, IPython notebook will use html representation for\n pandas objects (if it is available).\n\n: int\n Controls the number of nested levels to process when pretty-printing\n\n: boolean\n \"sparsify\" MultiIndex display (don't display repeated\n elements in outer levels within groups)\n\n: callable\n The callable should accept a floating point number and return\n a string with the desired format of the number. This is used\n in some places like SeriesFormatter.\n See formats.format.EngFormatter for an example.\n\n: int or None\n The maximum width in characters of a column in the repr of\n a pandas data structure. When the column overflows, a \"...\"\n placeholder is embedded in the output. A 'None' value means unlimited.\n\n: 'left'/'right'\n Controls the justification of column headers. used by DataFrameFormatter.\n\n: boolean\n Whether to print out the full DataFrame repr for wide DataFrames across\n multiple lines, `max_columns` is still respected, but the output will\n wrap-around across multiple \"pages\" if its width exceeds `display.width`.\n\n: boolean or 'truncate'\n Whether to print out dimensions at the end of DataFrame repr.\n If 'truncate' is specified, only print out the dimensions if the\n frame is truncated (e.g. not display all rows and/or columns)\n\n: boolean\n Whether to use the Unicode East Asian Width to calculate the display text\n width.\n Enabling this may affect to the performance (default: False)\n\n: boolean\n Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)\n (default: False)\n\n: boolean\n Whether to produce a latex DataFrame representation for jupyter\n environments that support it.\n (default: False)\n\n: boolean\n Whether to publish a Table Schema representation for frontends\n that support it.\n (default: False)\n\n: int\n A ``border=value`` attribute is inserted in the ``<table>`` tag\n for the DataFrame HTML repr.\n\\\n: boolean\n When True, Jupyter notebook will process table contents using MathJax,\n rendering mathematical expressions enclosed by the dollar symbol.\n (default: True)\n\\\n: int\n The number of items that will be added to `dir(...)`. 'None' value means\n unlimited. Because dir is cached, changing this option will not immediately\n affect already existing dataframes until a column is deleted or added.\n\n This is for instance used to suggest columns from a dataframe to tab\n completion.\n\n: int\n Width of the display in characters. In case python/IPython is running in\n a terminal this can be set to None and pandas will correctly auto-detect\n the width.\n Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a\n terminal and hence it is not possible to correctly detect the width.\n\n: float or None\n if set to a float value, all float values smaller then the given threshold\n will be displayed as exactly 0 by repr and friends.\n\n: int or None\n When pretty-printing a long sequence, no more then `max_seq_items`\n will be printed. If items are omitted, they will be denoted by the\n addition of \"...\" to the resulting string.\n\n If set to None, the number of items to be printed is unlimited.\n\n: int or None\n df.info() will usually show null-counts for each column.\n For large frames this can be quite slow. max_info_rows and max_info_cols\n limit this null check only to frames with smaller dimensions than\n specified.\n\n: 'truncate'/'info'\n For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can\n show a truncated table (the default from 0.13), or switch to the view from\n df.info() (the behaviour in earlier versions of pandas).\n\n: bool, string or None\n This specifies if the memory usage of a DataFrame should be displayed when\n df.info() is called. Valid values True,False,'deep'\n\n: bool\n This specifies if the to_latex method of a Dataframe uses escapes special\n characters.\n Valid values: False,True\n\n:bool\n This specifies if the to_latex method of a Dataframe uses the longtable\n format.\n Valid values: False,True\n\n: bool\n This specifies if the to_latex method of a Dataframe uses multicolumns\n to pretty-print MultiIndex columns.\n Valid values: False,True\n\n: string\n This specifies the format for multicolumn headers.\n Can be surrounded with '|'.\n Valid values: 'l', 'c', 'r', 'p{<width>}'\n\n: bool\n This specifies if the to_latex method of a Dataframe uses multirows\n to pretty-print MultiIndex rows.\n Valid values: False,True\n",
"language": "en",
"n_whitespaces": 1237,
"n_words": 960,
"vocab_size": 361
} | def use_numba_cb(key) -> None:
from pandas.core.util import numba_
numba_.set_use_numba(cf.get_option(key))
with cf.config_prefix("compute"):
cf.register_option(
"use_bottleneck",
True,
use_bottleneck_doc,
validator=is_bool,
cb=use_bottleneck_cb,
)
cf.register_option(
"use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
)
cf.register_option(
"use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
)
#
# options from the "display" namespace
pc_precision_doc =
pc_colspace_doc =
pc_max_rows_doc =
pc_min_rows_doc =
pc_max_cols_doc =
pc_max_categories_doc =
pc_max_info_cols_doc =
pc_nb_repr_h_doc =
pc_pprint_nest_depth =
pc_multi_sparse_doc =
float_format_doc =
max_colwidth_doc =
colheader_justify_doc =
pc_expand_repr_doc =
pc_show_dimensions_doc =
pc_east_asian_width_doc =
pc_ambiguous_as_wide_doc =
pc_latex_repr_doc =
pc_table_schema_doc =
pc_html_border_doc =
pc_html_use_mathjax_doc =
pc_max_dir_items =
pc_width_doc =
pc_chop_threshold_doc =
pc_max_seq_items =
pc_max_info_rows_doc =
pc_large_repr_doc =
pc_memory_usage_doc =
pc_latex_escape =
pc_latex_longtable =
pc_latex_multicolumn =
pc_latex_multicolumn_format =
pc_latex_multirow =
|
|
72,393 | 248,638 | 295 | tests/rest/media/v1/test_html_preview.py | 55 | 8 | def test_twitter_tag(self) -> None:
html = b
tree = decode_body(html, "http://example.com/test.html")
og = parse_html_to_open_graph(tree)
self.assertEqual(
og,
{
"og:title": None,
"og:description": "Description",
"og:site_name": "@matrixdotorg",
},
)
# But they shouldn't override Ope | Improve URL previews for sites with only Twitter card information. (#13056)
Pull out `twitter:` meta tags when generating a preview and
use it to augment any `og:` meta tags.
Prefers Open Graph information over Twitter card information. | test_twitter_tag | 0fcc0ae37c959116c910f349a8025bd6921fdfc8 | synapse | test_html_preview.py | 10 | 38 | https://github.com/matrix-org/synapse.git | 1 | 88 | 0 | 34 | 159 | Python | {
"docstring": "Twitter card tags should be used if nothing else is available.\n <html>\n <meta name=\"twitter:card\" content=\"summary\">\n <meta name=\"twitter:description\" content=\"Description\">\n <meta name=\"twitter:site\" content=\"@matrixdotorg\">\n </html>\n \n <html>\n <meta name=\"twitter:card\" content=\"summary\">\n <meta name=\"twitter:description\" content=\"Description\">\n <meta property=\"og:description\" content=\"Real Description\">\n <meta name=\"twitter:site\" content=\"@matrixdotorg\">\n <meta property=\"og:site_name\" content=\"matrix.org\">\n </html>\n ",
"language": "en",
"n_whitespaces": 139,
"n_words": 40,
"vocab_size": 25
} | def test_twitter_tag(self) -> None:
html = b
tree = decode_body(html, "http://example.com/test.html")
og = parse_html_to_open_graph(tree)
self.assertEqual(
og,
{
"og:title": None,
"og:description": "Description",
"og:site_name": "@matrixdotorg",
},
)
# But they shouldn't override Open Graph values.
html = b
tree = decode_body(html, "http://example.com/test.html")
og = parse_html_to_open_graph(tree)
self.assertEqual(
og,
{
"og:title": None,
"og:description": "Real Description",
"og:site_name": "matrix.org",
},
)
|
|
14,812 | 68,528 | 108 | erpnext/accounts/doctype/tax_rule/tax_rule.py | 159 | 39 | def get_tax_template(posting_date, args):
args = frappe._dict(args)
conditions = []
if posting_date:
conditions.append(
f
)
else:
conditions.appen | refactor: tax rule validity query (#30934) | get_tax_template | 05dd1d6d15c6c8c66165e9f267078c3cf9aec10e | erpnext | tax_rule.py | 18 | 51 | https://github.com/frappe/erpnext.git | 15 | 312 | 0 | 103 | 559 | Python | {
"docstring": "Get matching tax rule(from_date is null or from_date <= '{posting_date}')\n\t\t\tand (to_date is null or to_date >= '{posting_date}')select * from `tabTax Rule`\n\t\twhere {0}",
"language": "en",
"n_whitespaces": 21,
"n_words": 24,
"vocab_size": 21
} | def get_tax_template(posting_date, args):
args = frappe._dict(args)
conditions = []
if posting_date:
conditions.append(
f
)
else:
conditions.append("(from_date is null) and (to_date is null)")
conditions.append(
"ifnull(tax_category, '') = {0}".format(frappe.db.escape(cstr(args.get("tax_category"))))
)
if "tax_category" in args.keys():
del args["tax_category"]
for key, value in args.items():
if key == "use_for_shopping_cart":
conditions.append("use_for_shopping_cart = {0}".format(1 if value else 0))
elif key == "customer_group":
if not value:
value = get_root_of("Customer Group")
customer_group_condition = get_customer_group_condition(value)
conditions.append("ifnull({0}, '') in ('', {1})".format(key, customer_group_condition))
else:
conditions.append("ifnull({0}, '') in ('', {1})".format(key, frappe.db.escape(cstr(value))))
tax_rule = frappe.db.sql(
.format(
" and ".join(conditions)
),
as_dict=True,
)
if not tax_rule:
return None
for rule in tax_rule:
rule.no_of_keys_matched = 0
for key in args:
if rule.get(key):
rule.no_of_keys_matched += 1
def cmp(a, b):
# refernce: https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
return int(a > b) - int(a < b)
rule = sorted(
tax_rule,
key=functools.cmp_to_key(
lambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority)
),
)[0]
tax_template = rule.sales_tax_template or rule.purchase_tax_template
doctype = "{0} Taxes and Charges Template".format(rule.tax_type)
if frappe.db.get_value(doctype, tax_template, "disabled") == 1:
return None
return tax_template
|
|
42,010 | 176,628 | 97 | networkx/generators/classic.py | 40 | 16 | def wheel_graph(n, create_using=None):
_, nodes = n
G = empty_graph(nodes, create_using)
if G.is_directed():
raise | Adjust the usage of nodes_or_number decorator (#5599)
* recorrect typo in decorators.py
* Update tests to show troubles in current code
* fix troubles with usage of nodes_or_number
* fix typo
* remove nodes_or_number where that makes sense
* Reinclude nodes_or_numbers and add some tests for nonstandard usage
* fix typowq
* hopefully final tweaks (no behavior changes
* Update test_classic.py
Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com> | wheel_graph | de1d00f20e0bc14f1cc911b3486e50225a8fa168 | networkx | classic.py | 14 | 11 | https://github.com/networkx/networkx.git | 5 | 86 | 0 | 32 | 139 | Python | {
"docstring": "Return the wheel graph\n\n The wheel graph consists of a hub node connected to a cycle of (n-1) nodes.\n\n Parameters\n ----------\n n : int or iterable\n If an integer, node labels are 0 to n with center 0.\n If an iterable of nodes, the center is the first.\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Node labels are the integers 0 to n - 1.\n ",
"language": "en",
"n_whitespaces": 117,
"n_words": 76,
"vocab_size": 51
} | def wheel_graph(n, create_using=None):
_, nodes = n
G = empty_graph(nodes, create_using)
if G.is_directed():
raise NetworkXError("Directed Graph not supported")
if len(nodes) > 1:
hub, *rim = nodes
G.add_edges_from((hub, node) for node in rim)
if len(rim) > 1:
G.add_edges_from(pairwise(rim, cyclic=True))
return G
|
|
45,974 | 189,036 | 199 | scripts/internal/print_announce.py | 70 | 18 | def get_changes():
with open(HISTORY) as f:
lines = f.readlines()
block = []
# eliminate the part preceding the first block
for i, line in enumerate(lines):
line = lines.pop(0)
if line.startswith('===='):
break
lines.pop(0)
for i, line in enumerate(lines):
line = lines.pop(0)
line = line.rstrip()
if re.match(r"^- \d+_", line):
line = re.sub(r"^- (\d+)_", r"- #\1", line)
if line.startswith('===='):
break
block.append(line)
| fix print_announce.py | get_changes | c14744db097b1955f2b668dc753b2d2439db0bdf | psutil | print_announce.py | 13 | 21 | https://github.com/giampaolo/psutil.git | 7 | 151 | 0 | 44 | 260 | Python | {
"docstring": "Get the most recent changes for this release by parsing\n HISTORY.rst file.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 12,
"vocab_size": 12
} | def get_changes():
with open(HISTORY) as f:
lines = f.readlines()
block = []
# eliminate the part preceding the first block
for i, line in enumerate(lines):
line = lines.pop(0)
if line.startswith('===='):
break
lines.pop(0)
for i, line in enumerate(lines):
line = lines.pop(0)
line = line.rstrip()
if re.match(r"^- \d+_", line):
line = re.sub(r"^- (\d+)_", r"- #\1", line)
if line.startswith('===='):
break
block.append(line)
# eliminate bottom empty lines
block.pop(-1)
while not block[-1]:
block.pop(-1)
return "\n".join(block)
|
|
20,182 | 100,727 | 102 | lib/gui/popup_session.py | 28 | 13 | def _check_valid_data(self) -> bool:
logger.debug("Validating data. %s",
{key: len(val) for key, val in self._display_data.stats.items()})
if any(len(val) == 0 # pylint:disable=len-as-condition
for val in self._display_data.stats.values()):
| Bugfixes:
- Stats graph - Handle NaNs in data
- logger - de-elevate matplotlib font messages | _check_valid_data | afec52309326304f4323029039e49bfcf928ef43 | faceswap | popup_session.py | 13 | 15 | https://github.com/deepfakes/faceswap.git | 4 | 64 | 0 | 24 | 105 | Python | {
"docstring": " Check that the selections holds valid data to display\n NB: len-as-condition is used as data could be a list or a numpy array\n\n Returns\n -------\n bool\n ``True` if there is data to be displayed, otherwise ``False``\n ",
"language": "en",
"n_whitespaces": 87,
"n_words": 36,
"vocab_size": 30
} | def _check_valid_data(self) -> bool:
logger.debug("Validating data. %s",
{key: len(val) for key, val in self._display_data.stats.items()})
if any(len(val) == 0 # pylint:disable=len-as-condition
for val in self._display_data.stats.values()):
return False
return True
|
|
90,441 | 291,332 | 538 | homeassistant/components/ibeacon/coordinator.py | 144 | 22 | def _async_check_unavailable_groups_with_random_macs(self) -> None:
now = MONOTONIC_TIME()
gone_unavailable = [
group_id
for group_id in self._group_ids_random_macs
if group_id not in self._unavailable_group_ids
and (service_info := self._last_seen_by_group_id.get(group_id))
and (
# We will not be callbacks for iBeacons with random macs
# that rotate infrequently since their advertisement data is
# does not change as the bluetooth.async_register_callback API
# suppresses callbacks for duplicate advertisements to avoid
# exposing integrations to the firehose of bluetooth advertisements.
#
# To solve this we need to ask for the latest service info for
# the address we last saw to get the latest timestamp.
#
| Fix iBeacons with infrequent random mac address changes unexpectedly going unavailable (#82668)
fixes https://github.com/home-assistant/core/issues/79781 | _async_check_unavailable_groups_with_random_macs | 09c3df7eb258295211a8216c2039843b09aa244b | core | coordinator.py | 17 | 20 | https://github.com/home-assistant/core.git | 7 | 100 | 0 | 92 | 166 | Python | {
"docstring": "Check for random mac groups that have not been seen in a while and mark them as unavailable.",
"language": "en",
"n_whitespaces": 17,
"n_words": 18,
"vocab_size": 18
} | def _async_check_unavailable_groups_with_random_macs(self) -> None:
now = MONOTONIC_TIME()
gone_unavailable = [
group_id
for group_id in self._group_ids_random_macs
if group_id not in self._unavailable_group_ids
and (service_info := self._last_seen_by_group_id.get(group_id))
and (
# We will not be callbacks for iBeacons with random macs
# that rotate infrequently since their advertisement data is
# does not change as the bluetooth.async_register_callback API
# suppresses callbacks for duplicate advertisements to avoid
# exposing integrations to the firehose of bluetooth advertisements.
#
# To solve this we need to ask for the latest service info for
# the address we last saw to get the latest timestamp.
#
# If there is no last service info for the address we know that
# the device is no longer advertising.
not (
latest_service_info := bluetooth.async_last_service_info(
self.hass, service_info.address, connectable=False
)
)
or now - latest_service_info.time > UNAVAILABLE_TIMEOUT
)
]
for group_id in gone_unavailable:
self._unavailable_group_ids.add(group_id)
async_dispatcher_send(self.hass, signal_unavailable(group_id))
|
|
69,645 | 241,673 | 275 | pytorch_lightning/trainer/connectors/checkpoint_connector.py | 76 | 9 | def restore_optimizers_and_schedulers(self) -> None:
if not self._loaded_checkpoint:
return
if self.trainer.strategy.lightning_restore_optimizer:
# validation
if "optimizer_states" not in self._loaded_checkpoint:
| Fix restoring lr scheduler states with deepspeed strategy (#11322)
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
Co-authored-by: thomas chaton <thomas@grid.ai> | restore_optimizers_and_schedulers | 9c8f52ccd1a1859502f705e0567f2d83d57ff93a | lightning | checkpoint_connector.py | 13 | 17 | https://github.com/Lightning-AI/lightning.git | 5 | 62 | 0 | 42 | 117 | Python | {
"docstring": "Restores the optimizers and learning rate scheduler states from the pre-loaded checkpoint.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def restore_optimizers_and_schedulers(self) -> None:
if not self._loaded_checkpoint:
return
if self.trainer.strategy.lightning_restore_optimizer:
# validation
if "optimizer_states" not in self._loaded_checkpoint:
raise KeyError(
"Trying to restore optimizer state but checkpoint contains only the model."
" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."
)
self.restore_optimizers()
if "lr_schedulers" not in self._loaded_checkpoint:
raise KeyError(
"Trying to restore learning rate scheduler state but checkpoint contains only the model."
" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."
)
self.restore_lr_schedulers()
|
|
4,210 | 22,138 | 57 | pipenv/patched/pip/_vendor/requests/utils.py | 32 | 11 | def urldefragauth(url):
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = pat | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | urldefragauth | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | utils.py | 10 | 6 | https://github.com/pypa/pipenv.git | 2 | 64 | 0 | 23 | 99 | Python | {
"docstring": "\n Given a url remove the fragment and the authentication part.\n\n :rtype: str\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 12,
"vocab_size": 11
} | def urldefragauth(url):
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit("@", 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ""))
|
|
73,004 | 249,582 | 68 | tests/storage/test_registration.py | 19 | 12 | def test_approval_not_required(self) -> None:
self.get_success(self.store.register_user(self.user_id, self.pwhash))
user = self.get_success(self.store.get_user_by_id(self.user_id))
assert user is not None
self.assertTrue(user["approved"])
approved = self.get_success(self.store.is_user_a | Allow admins to require a manual approval process before new accounts can be used (using MSC3866) (#13556) | test_approval_not_required | be76cd8200b18f3c68b895f85ac7ef5b0ddc2466 | synapse | test_registration.py | 11 | 10 | https://github.com/matrix-org/synapse.git | 1 | 81 | 0 | 17 | 132 | Python | {
"docstring": "Tests that if we don't require approval for new accounts, newly created\n accounts are automatically marked as approved.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 18,
"vocab_size": 18
} | def test_approval_not_required(self) -> None:
self.get_success(self.store.register_user(self.user_id, self.pwhash))
user = self.get_success(self.store.get_user_by_id(self.user_id))
assert user is not None
self.assertTrue(user["approved"])
approved = self.get_success(self.store.is_user_approved(self.user_id))
self.assertTrue(approved)
|
|
57,068 | 223,791 | 107 | python3.10.4/Lib/email/message.py | 28 | 12 | def get_all(self, name, failobj=None):
valu | add python 3.10.4 for windows | get_all | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | message.py | 14 | 9 | https://github.com/XX-net/XX-Net.git | 4 | 64 | 0 | 24 | 103 | Python | {
"docstring": "Return a list of all the values for the named field.\n\n These will be sorted in the order they appeared in the original\n message, and may contain duplicates. Any fields deleted and\n re-inserted are always appended to the header list.\n\n If no such fields exist, failobj is returned (defaults to None).\n ",
"language": "en",
"n_whitespaces": 87,
"n_words": 51,
"vocab_size": 43
} | def get_all(self, name, failobj=None):
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(self.policy.header_fetch_parse(k, v))
if not values:
return failobj
return values
|
|
15,828 | 72,102 | 105 | wagtail/admin/tests/test_privacy.py | 31 | 14 | def test_explorer_private_child(self):
response = self.client.get(
reverse("wagtailadmin_explore", args=(self.private_child_page.id,))
)
# Check the response
self.assertEqual(response.status_code, 200)
# Check the privacy indicator is public
self.a | Reformat with black | test_explorer_private_child | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_privacy.py | 14 | 8 | https://github.com/wagtail/wagtail.git | 1 | 64 | 0 | 25 | 110 | Python | {
"docstring": "\n This tests that the privacy indicator on the private child pages explore view is set to \"PRIVATE\"\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 17,
"vocab_size": 16
} | def test_explorer_private_child(self):
response = self.client.get(
reverse("wagtailadmin_explore", args=(self.private_child_page.id,))
)
# Check the response
self.assertEqual(response.status_code, 200)
# Check the privacy indicator is public
self.assertTemplateUsed(response, "wagtailadmin/pages/_privacy_switch.html")
self.assertContains(response, '<div class="privacy-indicator private">')
self.assertNotContains(response, '<div class="privacy-indicator public">')
|
|
120,535 | 334,167 | 40 | utils/check_dummies.py | 18 | 10 | def find_backend(line):
if _re_test_backend.search(line) is None:
return No | upload some cleaning tools | find_backend | 95f4256fc905b6e29e5ea0f245dcf88f72a9ddd1 | diffusers | check_dummies.py | 10 | 6 | https://github.com/huggingface/diffusers.git | 3 | 47 | 0 | 17 | 79 | Python | {
"docstring": "Find one (or multiple) backend in a code line of the init.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def find_backend(line):
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
|
|
42,246 | 177,039 | 70 | networkx/classes/graphviews.py | 36 | 18 | def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter):
newG = nx.freeze(G.__class__())
newG._NODE_OK | Attempt to reverse slowdown from hasattr needed for cached_property (#5836)
* Automate reset of cache for _adj,_pred,_succ
* Make G._adj a data descriptor that resets G.adj when needed.
* update places in the code where both G._succ and G._adj are changed
This is no longer needed since G._succ and G._adj are synced during __set__
* testing hasattr(G, `_adj`) no longer ensures an instance.
* Make mypy happy
* Switch to hardcode attribute names in the data descriptors
* Improve doc_strings for the data descriptors | subgraph_view | 2fb00bb8b9ed1e2917e5bc1aac04c558bd23c6d8 | networkx | graphviews.py | 10 | 19 | https://github.com/networkx/networkx.git | 3 | 132 | 0 | 29 | 114 | Python | {
"docstring": "View of `G` applying a filter on nodes and edges.\n\n `subgraph_view` provides a read-only view of the input graph that excludes\n nodes and edges based on the outcome of two filter functions `filter_node`\n and `filter_edge`.\n\n The `filter_node` function takes one argument --- the node --- and returns\n `True` if the node should be included in the subgraph, and `False` if it\n should not be included.\n\n The `filter_edge` function takes two (or three arguments if `G` is a\n multi-graph) --- the nodes describing an edge, plus the edge-key if\n parallel edges are possible --- and returns `True` if the edge should be\n included in the subgraph, and `False` if it should not be included.\n\n Both node and edge filter functions are called on graph elements as they\n are queried, meaning there is no up-front cost to creating the view.\n\n Parameters\n ----------\n G : networkx.Graph\n A directed/undirected graph/multigraph\n\n filter_node : callable, optional\n A function taking a node as input, which returns `True` if the node\n should appear in the view.\n\n filter_edge : callable, optional\n A function taking as input the two nodes describing an edge (plus the\n edge-key if `G` is a multi-graph), which returns `True` if the edge\n should appear in the view.\n\n Returns\n -------\n graph : networkx.Graph\n A read-only graph view of the input graph.\n\n Examples\n --------\n >>> G = nx.path_graph(6)\n\n Filter functions operate on the node, and return `True` if the node should\n appear in the view:\n\n >>> def filter_node(n1):\n ... return n1 != 5\n ...\n >>> view = nx.subgraph_view(G, filter_node=filter_node)\n >>> view.nodes()\n NodeView((0, 1, 2, 3, 4))\n\n We can use a closure pattern to filter graph elements based on additional\n data --- for example, filtering on edge data attached to the graph:\n\n >>> G[3][4][\"cross_me\"] = False\n >>> def filter_edge(n1, n2):\n ... return G[n1][n2].get(\"cross_me\", True)\n ...\n >>> view = nx.subgraph_view(G, filter_edge=filter_edge)\n >>> view.edges()\n EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)])\n\n >>> view = nx.subgraph_view(G, filter_node=filter_node, filter_edge=filter_edge,)\n >>> view.nodes()\n NodeView((0, 1, 2, 3, 4))\n >>> view.edges()\n EdgeView([(0, 1), (1, 2), (2, 3)])\n ",
"language": "en",
"n_whitespaces": 528,
"n_words": 333,
"vocab_size": 150
} | def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter):
newG = nx.freeze(G.__class__())
newG._NODE_OK = filter_node
newG._EDGE_OK = filter_edge
# create view by assigning attributes from G
newG._graph = G
newG.graph = G.graph
newG._node = FilterAtlas(G._node, filter_node)
if G.is_multigraph():
Adj = FilterMultiAdjacency
|
|
7,451 | 41,875 | 153 | seaborn/utils.py | 47 | 9 | def _deprecate_ci(errorbar, ci):
if ci != "deprecated":
if ci is None:
errorbar = None
elif ci == "sd":
errorbar = "sd"
else:
errorbar = ("ci", ci)
| Housekeeping on relational plot parameters (#2855)
* Do some housekeeping on lineplot ci deprecation
* Remove some unused parameters from scatterplot
* Remove incorrect statement from relplot docstring
* Update lineplot ci= deprecation test | _deprecate_ci | 26bf4b3b645edc405ca52b533b8d68273aeba7d1 | seaborn | utils.py | 14 | 14 | https://github.com/mwaskom/seaborn.git | 4 | 59 | 0 | 37 | 117 | Python | {
"docstring": "\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 45,
"vocab_size": 42
} | def _deprecate_ci(errorbar, ci):
if ci != "deprecated":
if ci is None:
errorbar = None
elif ci == "sd":
errorbar = "sd"
else:
errorbar = ("ci", ci)
msg = (
"\n\nThe `ci` parameter is deprecated. "
f"Use `errorbar={repr(errorbar)}` for the same effect.\n"
)
warnings.warn(msg, FutureWarning, stacklevel=3)
return errorbar
|
|
38,770 | 160,870 | 42 | numpy/ma/core.py | 10 | 7 | def __sub__(self, other):
if self._delegate_binop(other):
| ENH: Adding __array_ufunc__ capability to MaskedArrays.
This enables any ufunc numpy operations that are called on a
MaskedArray to use the masked version of that function automatically
without needing to resort to np.ma.func() calls. | __sub__ | 6d77c591c59b5678f14ae5af2127eebb7d2415bc | numpy | core.py | 7 | 4 | https://github.com/numpy/numpy.git | 2 | 27 | 0 | 9 | 44 | Python | {
"docstring": "\n Subtract other from self, and return a new masked array.\n\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def __sub__(self, other):
if self._delegate_binop(other):
return NotImplemented
return np.subtract(self, other)
|
|
@pytest.fixture | 87,135 | 287,952 | 146 | tests/components/plugwise/conftest.py | 51 | 21 | def mock_smile_adam_2() -> Generator[None, MagicMock, None]:
chosen_env = "m_adam_heating"
with patch(
"homeassistant.components.plugwise.gateway.Smile", autospec=True
) as smile_mock:
smile = smile_mock.return_value
smile.gateway_id = "da224107914542988a88561b4 | Bump plugwise to v0.21.3, add related new features (#76610)
Co-authored-by: Franck Nijhof <frenck@frenck.nl> | mock_smile_adam_2 | 2667f0b792b1f936aeb5958cc40d5dee26350bf6 | core | conftest.py | 11 | 17 | https://github.com/home-assistant/core.git | 1 | 95 | 1 | 39 | 180 | Python | {
"docstring": "Create a 2nd Mock Adam environment for testing exceptions.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def mock_smile_adam_2() -> Generator[None, MagicMock, None]:
chosen_env = "m_adam_heating"
with patch(
"homeassistant.components.plugwise.gateway.Smile", autospec=True
) as smile_mock:
smile = smile_mock.return_value
smile.gateway_id = "da224107914542988a88561b4452b0f6"
smile.heater_id = "056ee145a816487eaa69243c3280f8bf"
smile.smile_version = "3.6.4"
smile.smile_type = "thermostat"
smile.smile_hostname = "smile98765"
smile.smile_name = "Adam"
smile.connect.return_value = True
smile.notifications = _read_json(chosen_env, "notifications")
smile.async_update.return_value = _read_json(chosen_env, "all_data")
yield smile
@pytest.fixture |
22,518 | 106,941 | 1,017 | lib/mpl_toolkits/mplot3d/axes3d.py | 393 | 52 | def plot_wireframe(self, X, Y, Z, **kwargs):
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 1)
cstride = kwargs.pop('cstride', 1)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
if has_count:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
if not has_stride:
| Remove *args deprecations | plot_wireframe | 6ef6b37fc2113c041f7d2643d70b553ec335d597 | matplotlib | axes3d.py | 19 | 54 | https://github.com/matplotlib/matplotlib.git | 30 | 539 | 0 | 193 | 846 | Python | {
"docstring": "\n Plot a 3D wireframe.\n\n .. note::\n\n The *rcount* and *ccount* kwargs, which both default to 50,\n determine the maximum number of samples used in each direction. If\n the input data is larger, it will be downsampled (by slicing) to\n these numbers of points.\n\n Parameters\n ----------\n X, Y, Z : 2D arrays\n Data values.\n\n rcount, ccount : int\n Maximum number of samples used in each direction. If the input\n data is larger, it will be downsampled (by slicing) to these\n numbers of points. Setting a count to zero causes the data to be\n not sampled in the corresponding direction, producing a 3D line\n plot rather than a wireframe plot. Defaults to 50.\n\n rstride, cstride : int\n Downsampling stride in each direction. These arguments are\n mutually exclusive with *rcount* and *ccount*. If only one of\n *rstride* or *cstride* is set, the other defaults to 1. Setting a\n stride to zero causes the data to be not sampled in the\n corresponding direction, producing a 3D line plot rather than a\n wireframe plot.\n\n 'classic' mode uses a default of ``rstride = cstride = 1`` instead\n of the new default of ``rcount = ccount = 50``.\n\n **kwargs\n Other arguments are forwarded to `.Line3DCollection`.\n ",
"language": "en",
"n_whitespaces": 474,
"n_words": 198,
"vocab_size": 105
} | def plot_wireframe(self, X, Y, Z, **kwargs):
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 1)
cstride = kwargs.pop('cstride', 1)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
if has_count:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
if not has_stride:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(range(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1):
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(range(0, cols, cstride))
# Add the last index only if needed
if cols > 0 and cii[-1] != (cols - 1):
cii += [cols-1]
else:
cii = []
if rstride == 0 and cstride == 0:
raise ValueError("Either rstride or cstride must be non zero")
# If the inputs were empty, then just
# reset everything.
if Z.size == 0:
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = ([list(zip(xl, yl, zl))
for xl, yl, zl in zip(xlines, ylines, zlines)]
+ [list(zip(xl, yl, zl))
for xl, yl, zl in zip(txlines, tylines, tzlines)])
linec = art3d.Line3DCollection(lines, **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(X, Y, Z, had_data)
return linec
|
|
50,774 | 204,534 | 372 | django/core/handlers/base.py | 97 | 14 | def check_response(self, response, callback, name=None):
if not (response is None or asyncio.iscoroutine(response)):
return
if not name:
if isinstance(callback, types.FunctionType): # FBV
| Refs #33476 -- Reformatted code with Black. | check_response | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 15 | 22 | https://github.com/django/django.git | 7 | 105 | 0 | 63 | 181 | Python | {
"docstring": "\n Raise an error if the view returned None or an uncalled coroutine.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def check_response(self, response, callback, name=None):
if not (response is None or asyncio.iscoroutine(response)):
return
if not name:
if isinstance(callback, types.FunctionType): # FBV
name = "The view %s.%s" % (callback.__module__, callback.__name__)
else: # CBV
name = "The view %s.%s.__call__" % (
callback.__module__,
callback.__class__.__name__,
)
if response is None:
raise ValueError(
"%s didn't return an HttpResponse object. It returned None "
"instead." % name
)
elif asyncio.iscoroutine(response):
raise ValueError(
"%s didn't return an HttpResponse object. It returned an "
"unawaited coroutine instead. You may need to add an 'await' "
"into your view." % name
)
# Other utility methods.
|
|
36,894 | 157,247 | 630 | dask/dataframe/io/io.py | 234 | 39 | def _meta_from_array(x, columns=None, index=None, meta=None):
if x.ndim > 2:
raise ValueError(
"from_array does not input more than 2D array, got"
" array with shape %r" % (x.shape,)
)
if index is not None:
if not isinstance(index, Index):
raise ValueError("'index' must be an instance of dask.dataframe.Index")
index = index._meta
if meta is None:
meta = meta_lib_from_array(x).DataFrame()
if getattr(x.dtype, "names", None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError(f"dtype {x.dtype} doesn't have fields {extra}")
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else "f8" for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return meta._constructor_sliced(
[], name=columns, dtype=x.dtype, index=index
)
elif len(columns) == 1:
return meta._constructor(
np.array([], dtype=x.dtype), columns=columns, index=index
| Support `cupy.ndarray` to `cudf.DataFrame` dispatching in `dask.dataframe` (#9579) | _meta_from_array | 0d8e12be4c2261b3457978c16aba7e893b1cf4a1 | dask | io.py | 18 | 47 | https://github.com/dask/dask.git | 21 | 397 | 0 | 136 | 656 | Python | {
"docstring": "Create empty DataFrame or Series which has correct dtype",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _meta_from_array(x, columns=None, index=None, meta=None):
if x.ndim > 2:
raise ValueError(
"from_array does not input more than 2D array, got"
" array with shape %r" % (x.shape,)
)
if index is not None:
if not isinstance(index, Index):
raise ValueError("'index' must be an instance of dask.dataframe.Index")
index = index._meta
if meta is None:
meta = meta_lib_from_array(x).DataFrame()
if getattr(x.dtype, "names", None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError(f"dtype {x.dtype} doesn't have fields {extra}")
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else "f8" for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return meta._constructor_sliced(
[], name=columns, dtype=x.dtype, index=index
)
elif len(columns) == 1:
return meta._constructor(
np.array([], dtype=x.dtype), columns=columns, index=index
)
raise ValueError(
"For a 1d array, columns must be a scalar or single element list"
)
else:
if np.isnan(x.shape[1]):
raise ValueError("Shape along axis 1 must be known")
if columns is None:
columns = list(range(x.shape[1])) if x.ndim == 2 else [0]
elif len(columns) != x.shape[1]:
raise ValueError(
"Number of column names must match width of the array. "
f"Got {len(columns)} names for {x.shape[1]} columns"
)
dtypes = [x.dtype] * len(columns)
data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}
return meta._constructor(data, columns=columns, index=index)
|
|
16,070 | 73,615 | 82 | wagtail/contrib/typed_table_block/blocks.py | 16 | 9 | def rows(self):
for row in self.row_data:
yield [
column["block"].bind(value)
for column, | Reformat with black | rows | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | blocks.py | 14 | 6 | https://github.com/wagtail/wagtail.git | 3 | 41 | 0 | 14 | 68 | Python | {
"docstring": "\n Iterate over the rows of the table, with each row returned as a list of BoundBlocks\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 16,
"vocab_size": 14
} | def rows(self):
for row in self.row_data:
yield [
column["block"].bind(value)
for column, value in zip(self.columns, row["values"])
]
|
|
48,104 | 196,686 | 18 | sympy/stats/crv_types.py | 15 | 6 | def Uniform(name, left, right):
r
return rv(name, UniformDistribution, (left, right))
#-------------------------------------------------------- | Documentation cleanup 5 | Uniform | 9ad8ab9fe58051cf11626ba6654852fcfec60147 | sympy | crv_types.py | 8 | 60 | https://github.com/sympy/sympy.git | 1 | 24 | 0 | 15 | 36 | Python | {
"docstring": "\n Create a continuous random variable with a uniform distribution.\n\n Explanation\n ===========\n\n The density of the uniform distribution is given by\n\n .. math::\n f(x) := \\begin{cases}\n \\frac{1}{b - a} & \\text{for } x \\in [a,b] \\\\\n 0 & \\text{otherwise}\n \\end{cases}\n\n with :math:`x \\in [a,b]`.\n\n Parameters\n ==========\n\n a : Real number, :math:`-\\infty < a`, the left boundary\n b : Real number, :math:`a < b < \\infty`, the right boundary\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import Uniform, density, cdf, E, variance\n >>> from sympy import Symbol, simplify\n\n >>> a = Symbol(\"a\", negative=True)\n >>> b = Symbol(\"b\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = Uniform(\"x\", a, b)\n\n >>> density(X)(z)\n Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True))\n\n >>> cdf(X)(z)\n Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True))\n\n >>> E(X)\n a/2 + b/2\n\n >>> simplify(variance(X))\n a**2/12 - a*b/6 + b**2/12\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29\n .. [2] http://mathworld.wolfram.com/UniformDistribution.html\n\n ",
"language": "en",
"n_whitespaces": 331,
"n_words": 157,
"vocab_size": 111
} | def Uniform(name, left, right):
r
return rv(name, UniformDistribution, (left, right))
#-------------------------------------------------------------------------------
# UniformSum distribution ------------------------------------------------------
|
|
56,953 | 223,527 | 71 | python3.10.4/Lib/email/_header_value_parser.py | 29 | 12 | def get_ttext(value):
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext | add python 3.10.4 for windows | get_ttext | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _header_value_parser.py | 12 | 10 | https://github.com/XX-net/XX-Net.git | 2 | 61 | 0 | 23 | 106 | Python | {
"docstring": "ttext = <matches _ttext_matcher>\n\n We allow any non-TOKEN_ENDS in ttext, but add defects to the token's\n defects list if we find non-ttext characters. We also register defects for\n *any* non-printables even though the RFC doesn't exclude all of them,\n because we follow the spirit of RFC 5322.\n\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 47,
"vocab_size": 39
} | def get_ttext(value):
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
|
|
47,945 | 196,497 | 84 | sympy/codegen/ast.py | 34 | 9 | def kwargs(self, exclude=(), apply=None):
kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude}
| Fixed issues with __slots__ (overlaps and omission in base classes)
Across several modules, two types of slot problems were detected.
1) Overlaps
A class redefines slots already present in a superclass.
This reduces the memory savings from slots, as well as
potentially introduces unpredictable behavior.
2) Omission in base classes
A class defines slots, but one of its superclasses does not.
This reduces the memory savings from slots, as well as allows
`__dict__` to be created and non-slot attributes to be set.
Most of these issues were straightforward to fix, except in the `codegen`
module, which makes use of slots to generate constructors. Here a change
to the constructor logic was needed in order to solve the slots issues. | kwargs | 338775324184a00c6bf50b8339ebd805c2bf4879 | sympy | ast.py | 11 | 17 | https://github.com/sympy/sympy.git | 5 | 67 | 0 | 25 | 103 | Python | {
"docstring": " Get instance's attributes as dict of keyword arguments.\n\n Parameters\n ==========\n\n exclude : collection of str\n Collection of keywords to exclude.\n\n apply : callable, optional\n Function to apply to all values.\n ",
"language": "en",
"n_whitespaces": 88,
"n_words": 30,
"vocab_size": 24
} | def kwargs(self, exclude=(), apply=None):
kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude}
if apply is not None:
return {k: apply(v) for k, v in kwargs.items()}
else:
return kwargs
|
|
31,883 | 140,165 | 22 | python/ray/serve/deployment_executor_node.py | 8 | 6 | def _execute_impl(self, *args, **kwargs) -> RayServeHandle:
return self._deployment_handle
| [Serve][Deployment Graph][Perf] Add minimal executor DAGNode (#24754)
closes #24475
Current deployment graph has big perf issues compare with using plain deployment handle, mostly because overhead of DAGNode traversal mechanism. We need this mechanism to empower DAG API, specially deeply nested objects in args where we rely on pickling; But meanwhile the nature of each execution becomes re-creating and replacing every `DAGNode` instances involved upon each execution, that incurs overhead.
Some overhead is inevitable due to pickling and executing DAGNode python code, but they could be quite minimal. As I profiled earlier, pickling itself is quite fast for our benchmarks at magnitude of microseconds.
Meanwhile the elephant in the room is DeploymentNode and its relatives are doing too much work in constructor that's beyond necessary, thus slowing everything down. So the fix is as simple as
1) Introduce a new set of executor dag node types that contains absolute minimal information that only preserves the DAG structure with traversal mechanism, and ability to call relevant deployment handles.
2) Add a simple new pass in our build() that generates and replaces nodes with executor dag to produce a final executor dag to run the graph.
Current ray dag -> serve dag mixed a lot of stuff related to deployment generation and init args, in longer term we should remove them but our correctness depends on it so i rather leave it as separate PR.
### Current 10 node chain with deployment graph `.bind()`
```
chain_length: 10, num_clients: 1
latency_mean_ms: 41.05, latency_std_ms: 15.18
throughput_mean_tps: 27.5, throughput_std_tps: 3.2
```
### Using raw deployment handle without dag overhead
```
chain_length: 10, num_clients: 1
latency_mean_ms: 20.39, latency_std_ms: 4.57
throughput_mean_tps: 51.9, throughput_std_tps: 1.04
```
### After this PR:
```
chain_length: 10, num_clients: 1
latency_mean_ms: 20.35, latency_std_ms: 0.87
throughput_mean_tps: 48.4, throughput_std_tps: 1.43
``` | _execute_impl | f27e85cd7df5ca2873ef6231200a1530e16ac35d | ray | deployment_executor_node.py | 6 | 6 | https://github.com/ray-project/ray.git | 1 | 18 | 0 | 8 | 30 | Python | {
"docstring": "Does not call into anything or produce a new value, as the time\n this function gets called, all child nodes are already resolved to\n ObjectRefs.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 25,
"vocab_size": 25
} | def _execute_impl(self, *args, **kwargs) -> RayServeHandle:
return self._deployment_handle
|
|
16,040 | 73,523 | 87 | wagtail/contrib/settings/tests/test_templates.py | 23 | 9 | def test_settings_use_default_site(self):
context = {}
# This should use the default site
template = '{{ settings("tests.testsetting", use_default_site=True).title}}'
self.assertEqual(
self.render(template, context, request_co | Reformat with black | test_settings_use_default_site | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_templates.py | 10 | 7 | https://github.com/wagtail/wagtail.git | 1 | 37 | 0 | 22 | 62 | Python | {
"docstring": "\n Check that the {{ settings(use_default_site=True) }} option works with\n no site in the context\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 13
} | def test_settings_use_default_site(self):
context = {}
# This should use the default site
template = '{{ settings("tests.testsetting", use_default_site=True).title}}'
self.assertEqual(
self.render(template, context, request_context=False),
self.default_site_settings.title,
)
|
|
43,365 | 181,571 | 40 | tests/test_ffmpeg_reader.py | 22 | 6 | def test_stream_square_brackets_and_language():
infos =
d = FFmpegInfosParser(infos, "clip.mp4").parse()
assert d
assert len(d["inputs"][0]["streams"]) == 2
assert d["inputs"][0]["streams"][0]["language"] == "eng"
assert d["inputs"][0]["streams"][1]["language"] is None
| Handle brackets and language in FFMPEG output (#1837)
* Improve regex to handle brackets and language
* Update CHANGELOG.md
* Simplify `if` | test_stream_square_brackets_and_language | 1393889d5bc29c8b7c4ed45bca4736d6dfdfad8d | moviepy | test_ffmpeg_reader.py | 12 | 12 | https://github.com/Zulko/moviepy.git | 1 | 75 | 0 | 16 | 132 | Python | {
"docstring": "\nInput #0, mpeg, from 'clip.mp4':\n Duration: 00:02:15.00, start: 52874.498178, bitrate: 266 kb/s\n Stream #0:0[0x1e0](eng): Video: ..., 25 tbr, 90k tbn, 50 tbc\n Stream #0:1[0x1c0](und): Audio: mp2, 0 channels, s16p\nAt least one output file must be specified",
"language": "en",
"n_whitespaces": 42,
"n_words": 37,
"vocab_size": 36
} | def test_stream_square_brackets_and_language():
infos =
d = FFmpegInfosParser(infos, "clip.mp4").parse()
assert d
assert len(d["inputs"][0]["streams"]) == 2
assert d["inputs"][0]["streams"][0]["language"] == "eng"
assert d["inputs"][0]["streams"][1]["language"] is None
|
|
11,991 | 60,126 | 80 | src/prefect/_internal/concurrency/primitives.py | 19 | 8 | async def wait(self) -> None:
| Add thread-safe async primitives `Event` and `Future` (#7865)
Co-authored-by: Serina Grill <42048900+serinamarie@users.noreply.github.com> | wait | a368874d1b145c1ec5201e5efd3c26ce7c1e8611 | prefect | primitives.py | 10 | 12 | https://github.com/PrefectHQ/prefect.git | 3 | 44 | 0 | 17 | 78 | Python | {
"docstring": "\n Wait until the flag has been set.\n\n If the flag has already been set when this method is called, it returns immediately.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 18
} | async def wait(self) -> None:
if self._is_set:
return
if not self._loop:
self._loop = get_running_loop()
self._event = asyncio.Event()
await self._event.wait()
|
|
11,516 | 56,385 | 570 | src/prefect/agent.py | 134 | 42 | async def get_and_submit_flow_runs(self) -> List[FlowRun]:
if not self.started:
raise RuntimeError("Agent is not started. Use `async with OrionAgent()...`")
self.logger.debug("Checking for flow runs...")
before = pendulum.now("utc").add(
seconds=self.prefetch_seconds or PREFECT_AGENT_PREFETCH_SECONDS.value()
)
# Use the work queue id or load one from the name
work_queue_id = self.work_queue_id or await self.work_queue_id_from_name()
if not work_queue_id:
return []
try:
submittable_runs = await self.client.get_runs_in_work_queue(
id=work_queue_id, limit=10, scheduled_before=before
)
except httpx.HTTPStatusError as exc:
if exc.response.status_code == status.HTTP_404_NOT_FOUND:
raise ValueError(
f"No work queue found with id '{work_queue_id}'"
) from None
else:
raise
# Check for a paused work queue for display purposes
if not submittable_runs:
work_queue = await sel | Add message to indicate a work queue is paused
The agent now checks if the work queue is paused when it does not find any submittable runs. We may want to reduce the frequency of this API call in the future, but it seems reasonable as a starting point. | get_and_submit_flow_runs | 78825acff7ee179ddb1e98da6efa6d39e4e3d1bf | prefect | agent.py | 14 | 41 | https://github.com/PrefectHQ/prefect.git | 11 | 202 | 0 | 90 | 362 | Python | {
"docstring": "\n The principle method on agents. Queries for scheduled flow runs and submits\n them for execution in parallel.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 17,
"vocab_size": 16
} | async def get_and_submit_flow_runs(self) -> List[FlowRun]:
if not self.started:
raise RuntimeError("Agent is not started. Use `async with OrionAgent()...`")
self.logger.debug("Checking for flow runs...")
before = pendulum.now("utc").add(
seconds=self.prefetch_seconds or PREFECT_AGENT_PREFETCH_SECONDS.value()
)
# Use the work queue id or load one from the name
work_queue_id = self.work_queue_id or await self.work_queue_id_from_name()
if not work_queue_id:
return []
try:
submittable_runs = await self.client.get_runs_in_work_queue(
id=work_queue_id, limit=10, scheduled_before=before
)
except httpx.HTTPStatusError as exc:
if exc.response.status_code == status.HTTP_404_NOT_FOUND:
raise ValueError(
f"No work queue found with id '{work_queue_id}'"
) from None
else:
raise
# Check for a paused work queue for display purposes
if not submittable_runs:
work_queue = await self.client.read_work_queue(work_queue_id)
if work_queue.is_paused:
self.logger.info(
f"Work queue {work_queue.name!r} ({work_queue.id}) is paused."
)
for flow_run in submittable_runs:
self.logger.info(f"Submitting flow run '{flow_run.id}'")
# don't resubmit a run
if flow_run.id in self.submitting_flow_run_ids:
continue
self.submitting_flow_run_ids.add(flow_run.id)
self.task_group.start_soon(
self.submit_run,
flow_run,
)
return submittable_runs
|
|
81,709 | 276,718 | 120 | keras/utils/conv_utils.py | 68 | 8 | def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
if input_lengt | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | conv_output_length | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | conv_utils.py | 11 | 12 | https://github.com/keras-team/keras.git | 5 | 95 | 0 | 39 | 160 | Python | {
"docstring": "Determines output length of a convolution given input length.\n\n Args:\n input_length: integer.\n filter_size: integer.\n padding: one of \"same\", \"valid\", \"full\", \"causal\"\n stride: integer.\n dilation: dilation rate, integer.\n\n Returns:\n The output length (integer).\n ",
"language": "en",
"n_whitespaces": 83,
"n_words": 32,
"vocab_size": 26
} | def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
if input_length is None:
return None
assert padding in {"same", "valid", "full", "causal"}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ["same", "causal"]:
output_length = input_length
elif padding == "valid":
output_length = input_length - dilated_filter_size + 1
elif padding == "full":
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
|
|
77,807 | 264,784 | 28 | netbox/dcim/models/cables.py | 7 | 9 | def get_split_nodes(self):
rearport = path_node_to_object(self._nodes[-1])
return FrontPort.objects.filter(rear_port=rearp | Migrate CablePath to use two-dimensional array | get_split_nodes | 82706eb3a68e963d7ac089478788b87892d4ee79 | netbox | cables.py | 10 | 3 | https://github.com/netbox-community/netbox.git | 1 | 29 | 0 | 7 | 49 | Python | {
"docstring": "\n Return all available next segments in a split cable path.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def get_split_nodes(self):
rearport = path_node_to_object(self._nodes[-1])
return FrontPort.objects.filter(rear_port=rearport)
|
|
51,781 | 206,882 | 391 | django/views/generic/list.py | 113 | 16 | def get_template_names(self):
try:
names = super().get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, "model"):
opts = self.object_list.model._meta
names.append(
"%s/%s%s.html"
% (opts.app_label, opts.model_name, self.template_name_suffix)
)
elif not names:
raise ImproperlyConfigured(
"%(cls)s requires either a 'template_name' attribute "
"or a get_queryset() method that returns a QuerySet." | Refs #33476 -- Reformatted code with Black. | get_template_names | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | list.py | 15 | 20 | https://github.com/django/django.git | 4 | 86 | 0 | 85 | 155 | Python | {
"docstring": "\n Return a list of template names to be used for the request. Must return\n a list. May not be called if render_to_response is overridden.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 24,
"vocab_size": 22
} | def get_template_names(self):
try:
names = super().get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, "model"):
opts = self.object_list.model._meta
names.append(
"%s/%s%s.html"
% (opts.app_label, opts.model_name, self.template_name_suffix)
)
elif not names:
raise ImproperlyConfigured(
"%(cls)s requires either a 'template_name' attribute "
"or a get_queryset() method that returns a QuerySet."
% {
"cls": self.__class__.__name__,
}
)
return names
|
|
45,965 | 189,007 | 352 | scripts/internal/fix_flake8.py | 112 | 26 | def remove_lines(fname, entries):
to_remove = []
for entry in entries:
msg, issue, lineno, pos, descr = entry
# 'module imported but not used'
if issue == 'F401' and handle_f401(fname, lineno):
to_remove.append(lineno)
# 'blank line(s) at end of file'
elif issue == 'W391':
lines = read_lines(fname)
i = len(lines) - 1
while lines[i] == '\n':
| Fix typos | remove_lines | 471b19d2aa799cd73bded23379e864dd35bec2b6 | psutil | fix_flake8.py | 16 | 25 | https://github.com/giampaolo/psutil.git | 11 | 185 | 0 | 80 | 310 | Python | {
"docstring": "Check if we should remove lines, then do it.\n Return the number of lines removed.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 15,
"vocab_size": 15
} | def remove_lines(fname, entries):
to_remove = []
for entry in entries:
msg, issue, lineno, pos, descr = entry
# 'module imported but not used'
if issue == 'F401' and handle_f401(fname, lineno):
to_remove.append(lineno)
# 'blank line(s) at end of file'
elif issue == 'W391':
lines = read_lines(fname)
i = len(lines) - 1
while lines[i] == '\n':
to_remove.append(i + 1)
i -= 1
# 'too many blank lines'
elif issue == 'E303':
howmany = descr.replace('(', '').replace(')', '')
howmany = int(howmany[-1])
for x in range(lineno - howmany, lineno):
to_remove.append(x)
if to_remove:
newlines = []
for i, line in enumerate(read_lines(fname), 1):
if i not in to_remove:
newlines.append(line)
print("removing line(s) from %s" % fname)
write_file(fname, newlines)
return len(to_remove)
|
|
24,770 | 112,855 | 217 | nni/algorithms/hpo/bohb_advisor/bohb_advisor.py | 39 | 12 | def _get_one_trial_job(self):
if not self.generated_hyper_configs:
ret = {
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
self.send(CommandType.NoMoreTrialJobs, nni.dump(ret))
| Support multiple HPO experiments in one process (#4855) | _get_one_trial_job | 98c1a77f61900d486f46d284c49fb65675dbee6a | nni | bohb_advisor.py | 11 | 18 | https://github.com/microsoft/nni.git | 2 | 95 | 0 | 26 | 164 | Python | {
"docstring": "get one trial job, i.e., one hyperparameter configuration.\n\n If this function is called, Command will be sent by BOHB:\n a. If there is a parameter need to run, will return \"NewTrialJob\" with a dict:\n {\n 'parameter_id': id of new hyperparameter\n 'parameter_source': 'algorithm'\n 'parameters': value of new hyperparameter\n }\n b. If BOHB don't have parameter waiting, will return \"NoMoreTrialJobs\" with\n {\n 'parameter_id': '-1_0_0',\n 'parameter_source': 'algorithm',\n 'parameters': ''\n }\n ",
"language": "en",
"n_whitespaces": 189,
"n_words": 67,
"vocab_size": 48
} | def _get_one_trial_job(self):
if not self.generated_hyper_configs:
ret = {
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
self.send(CommandType.NoMoreTrialJobs, nni.dump(ret))
return None
assert self.generated_hyper_configs
params = self.generated_hyper_configs.pop(0)
ret = {
'parameter_id': params[0],
'parameter_source': 'algorithm',
'parameters': params[1]
}
self.parameters[params[0]] = params[1]
return ret
|
|
27,125 | 122,221 | 77 | jax/experimental/pjit.py | 39 | 28 | def global_array_to_host_local_array(global_inputs, global_mesh, pspecs):
def _convert(arr, pspec): | Add `host_local_array_to_global_array` and `global_array_to_host_local_array` for enabling transition to jax.Array.
Also support `FROM_GDA` for `jax.Array` as a backwards compatible change so that users can continue to use that until they transition to jax.Array. Its currently required because of usage like `in_axis_resources = (FROM_GDA, FROM_GDA, P('data'), None)` and changing this on users side will require input from users so we as JAX can just support it as a temporary thing since GDA and Array act similarly in pjit.
PiperOrigin-RevId: 479035326 | global_array_to_host_local_array | 4da72cf3988b4918f65b1401e46c40b7c4504963 | jax | pjit.py | 12 | 7 | https://github.com/google/jax.git | 1 | 54 | 0 | 34 | 150 | Python | {
"docstring": "Converts a global `jax.Array` to a host local `jax.Array`.\n\n You can use this function to transition to `jax.Array`. Using `jax.Array` with\n `pjit` has the same semantics of using GDA with pjit i.e. all `jax.Array`\n inputs to pjit should be globally shaped and the output from `pjit` will also\n be globally shaped `jax.Array`s\n\n You can use this function to convert the globally shaped `jax.Array` output\n from pjit to host local values again so that the transition to jax.Array can\n be a mechanical change.\n\n Example usage:\n\n ```\n global_inputs = jax.experimental.pjit.host_local_array_to_global_array(\n host_local_inputs, global_mesh, in_pspecs)\n\n with mesh:\n global_out = pjitted_fun(global_inputs)\n\n host_local_output = jax.experimental.pjit.global_array_to_host_local_array(\n global_out, mesh, out_pspecs)\n ```\n\n Args:\n global_inputs: A Pytree of global `jax.Array`s.\n global_mesh: The global mesh.\n pspecs: A Pytree of PartitionSpecs.\n ",
"language": "en",
"n_whitespaces": 152,
"n_words": 119,
"vocab_size": 73
} | def global_array_to_host_local_array(global_inputs, global_mesh, pspecs):
def _convert(arr, pspec):
local_aval = global_mesh._global_to_local(
pxla._get_array_mapping(pspec), arr.aval)
return array.ArrayImpl(
local_aval, MeshPspecSharding(global_mesh.local_mesh, pspec),
arr._arrays, committed=True)
flattened_inps, out_tree = tree_flatten(global_inputs)
out_pspecs = flatten_axis_resources(
'output pspecs', out_tree, pspecs, tupled_args=True)
out = tree_map(_convert, tuple(flattened_inps), out_pspecs)
return tree_unflatten(out_tree, out)
|
|
53,642 | 213,099 | 29 | samtranslator/utils/py27hash_fix.py | 8 | 8 | def __setitem__(self, key, value):
| fix: Py27hash fix (#2182)
* Add third party py27hash code
* Add Py27UniStr and unit tests
* Add py27hash_fix utils and tests
* Add to_py27_compatible_template and tests
* Apply py27hash fix to wherever it is needed
* Apply py27hash fix, all tests pass except api_with_any_method_in_swagger
* apply py27hash fix in openapi + run black
* remove py27 testing
* remove other py27 references
* black fixes
* fixes/typos
* remove py27 from tox.ini
* refactoring
* third party notice
* black
* Fix py27hash fix to deal with null events
* Fix Py27UniStr repr for unicode literals
* black reformat
* Update _template_has_api_resource to check data type more defensively
* Apply py27Dict in _get_authorizers
* Apply Py27Dict to authorizers and gateway responses which will go into swagger
* Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class
* Rename _convert_to_py27_dict to _convert_to_py27_type
* Apply Py27UniStr to path param name
* Handle HttpApi resource under to_py27_compatible_template
* Fix InvalidDocumentException to not sort different exceptions
* black reformat
* Remove unnecessary test files
Co-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com> | __setitem__ | a5db070f446b7cfebdaa6ad2e3dcf78f6105a272 | serverless-application-model | py27hash_fix.py | 9 | 3 | https://github.com/aws/serverless-application-model.git | 1 | 31 | 0 | 8 | 49 | Python | {
"docstring": "\n Override of __setitem__ to track keys and simulate Python2.7 dict\n\n Parameters\n ----------\n key: hashable\n value: Any\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 16,
"vocab_size": 16
} | def __setitem__(self, key, value):
super(Py27Dict, self).__setitem__(key, value)
self.keylist.add(key)
|
|
3,172 | 20,004 | 75 | pipenv/patched/notpip/_internal/utils/virtualenv.py | 43 | 6 | def virtualenv_no_global() -> bool:
# PEP 405 compliance needs to be checked firs | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | virtualenv_no_global | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | virtualenv.py | 9 | 7 | https://github.com/pypa/pipenv.git | 3 | 27 | 0 | 35 | 52 | Python | {
"docstring": "Returns a boolean, whether running in venv with no system site-packages.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def virtualenv_no_global() -> bool:
# PEP 405 compliance needs to be checked first since virtualenv >=20 would
# return True for both checks, but is only able to use the PEP 405 config.
if _running_under_venv():
return _no_global_under_venv()
if _running_under_regular_virtualenv():
return _no_global_under_regular_virtualenv()
return False
|
|
16,409 | 75,478 | 95 | wagtail/search/backends/database/mysql/mysql.py | 22 | 13 | def autocomplete(self):
texts = []
for field in self.search_fields:
for current_field, value in self.prepare_field(self.obj, field):
if isinstance(current_field, | Reformat with black | autocomplete | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | mysql.py | 14 | 7 | https://github.com/wagtail/wagtail.git | 4 | 56 | 0 | 20 | 91 | Python | {
"docstring": "\n Returns all values to index as \"autocomplete\". This is the value of all AutocompleteFields\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 13
} | def autocomplete(self):
texts = []
for field in self.search_fields:
for current_field, value in self.prepare_field(self.obj, field):
if isinstance(current_field, AutocompleteField):
texts.append((value))
return " ".join(texts)
|
|
80,856 | 271,833 | 25 | keras/engine/training_utils.py | 9 | 5 | def list_to_tuple(maybe_list):
if isinstance(maybe_list, list):
return tuple | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | list_to_tuple | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | training_utils.py | 9 | 4 | https://github.com/keras-team/keras.git | 2 | 21 | 0 | 8 | 36 | Python | {
"docstring": "Datasets will stack the list of tensor, so switch them to tuples.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def list_to_tuple(maybe_list):
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
|
|
8,100 | 43,926 | 22 | tests/models/test_taskinstance.py | 8 | 6 | def test_not_recorded_for_unused(self, dag_maker, xcom_value):
| Add TaskMap and TaskInstance.map_id (#20286)
Co-authored-by: Ash Berlin-Taylor <ash_github@firemirror.com> | test_not_recorded_for_unused | d48a3a357fd89ec805d086d5b6c1f1d4daf77b9a | airflow | test_taskinstance.py | 12 | 8 | https://github.com/apache/airflow.git | 1 | 63 | 0 | 8 | 38 | Python | {
"docstring": "A value not used for task-mapping should not be recorded.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def test_not_recorded_for_unused(self, dag_maker, xcom_value):
with dag_maker(dag_id="test_not_recorded_for_unused") as dag:
|
|
@keras_export(
"keras.metrics.mean_absolute_error",
"keras.metrics.mae",
"keras.metrics.MAE",
"keras.losses.mean_absolute_error",
"keras.losses.mae",
"keras.losses.MAE",
)
@tf.__internal__.dispatch.add_dispatch_support | 81,233 | 274,555 | 37 | keras/losses.py | 16 | 10 | def _ragged_tensor_mse(y_true, y_pred):
return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)
@keras_export(
"keras.metrics.mean_abso | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _ragged_tensor_mse | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | losses.py | 7 | 2 | https://github.com/keras-team/keras.git | 1 | 17 | 1 | 16 | 71 | Python | {
"docstring": "Implements support for handling RaggedTensors.\n\n Args:\n y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.\n When the number of dimensions of the batch feature vector [d0, .. dN] is\n greater than one the return value is a RaggedTensor. Otherwise a Dense\n tensor with dimensions [batch_size] is returned.\n ",
"language": "en",
"n_whitespaces": 108,
"n_words": 69,
"vocab_size": 47
} | def _ragged_tensor_mse(y_true, y_pred):
return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)
@keras_export(
"keras.metrics.mean_absolute_error",
"keras.metrics.mae",
"keras.metrics.MAE",
"keras.losses.mean_absolute_error",
"keras.losses.mae",
"keras.losses.MAE",
)
@tf.__internal__.dispatch.add_dispatch_support |
46,496 | 191,358 | 32 | tests/unit_tests/test_formatting.py | 16 | 9 | def test_does_not_allow_extra_kwargs() -> None:
template = "This is a {foo} test."
with pytest.raises(KeyError):
formatter.for | initial commit | test_does_not_allow_extra_kwargs | 18aeb720126a68201c7e3b5a617139c27c779496 | langchain | test_formatting.py | 11 | 5 | https://github.com/hwchase17/langchain.git | 1 | 32 | 0 | 16 | 61 | Python | {
"docstring": "Test formatting does not allow extra key word arguments.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_does_not_allow_extra_kwargs() -> None:
template = "This is a {foo} test."
with pytest.raises(KeyError):
formatter.format(template, foo="good", bar="oops")
|
|
75,342 | 258,632 | 107 | sklearn/neighbors/_lof.py | 33 | 19 | def score_samples(self, X):
check_is_fitted(self)
X = check_array(X, accept_sparse="csr")
distances_X, neighbors_indices_X = self.kneighbors(
X, n_neighbors=self.n_neighbors_
)
X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X)
lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
# as bigger is better:
return -np.mean(lrd_r | DOC improve LOF documentation wrt difference of predict and fit_predict (#21878)
* improve LOF documentation
* Update sklearn/neighbors/_lof.py
Co-authored-by: Alexandre Gramfort <alexandre.gramfort@m4x.org>
Co-authored-by: Alexandre Gramfort <alexandre.gramfort@m4x.org> | score_samples | 0dfaaadfe2d0e0b4fd9d2ba22a75b7b1b1903049 | scikit-learn | _lof.py | 10 | 9 | https://github.com/scikit-learn/scikit-learn.git | 1 | 77 | 0 | 30 | 122 | Python | {
"docstring": "Opposite of the Local Outlier Factor of X.\n\n It is the opposite as bigger is better, i.e. large values correspond\n to inliers.\n\n **Only available for novelty detection (when novelty is set to True).**\n The argument X is supposed to contain *new data*: if X contains a\n point from training, it considers the later in its own neighborhood.\n Also, the samples in X are not considered in the neighborhood of any\n point. Because of this, the scores obtained via ``score_samples`` may\n differ from the standard LOF scores.\n The standard LOF scores for the training data is available via the\n ``negative_outlier_factor_`` attribute.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. the training samples.\n\n Returns\n -------\n opposite_lof_scores : ndarray of shape (n_samples,)\n The opposite of the Local Outlier Factor of each input samples.\n The lower, the more abnormal.\n ",
"language": "en",
"n_whitespaces": 311,
"n_words": 148,
"vocab_size": 93
} | def score_samples(self, X):
check_is_fitted(self)
X = check_array(X, accept_sparse="csr")
distances_X, neighbors_indices_X = self.kneighbors(
X, n_neighbors=self.n_neighbors_
)
X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X)
lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
|
|
14,380 | 66,920 | 44 | erpnext/payroll/doctype/payroll_period/payroll_period.py | 63 | 18 | def get_payroll_period_days(start_date, end_date, employee, company=None):
if not company:
company = frappe.db.get_value("Employee", employee, "company")
payroll_period = frappe.db.sql(
,
{"company": company, "st | style: format code with black | get_payroll_period_days | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | payroll_period.py | 16 | 26 | https://github.com/frappe/erpnext.git | 4 | 165 | 0 | 48 | 256 | Python | {
"docstring": "\n\t\tselect name, start_date, end_date\n\t\tfrom `tabPayroll Period`\n\t\twhere\n\t\t\tcompany=%(company)s\n\t\t\tand %(start_date)s between start_date and end_date\n\t\t\tand %(end_date)s between start_date and end_date\n\t",
"language": "en",
"n_whitespaces": 15,
"n_words": 21,
"vocab_size": 14
} | def get_payroll_period_days(start_date, end_date, employee, company=None):
if not company:
company = frappe.db.get_value("Employee", employee, "company")
payroll_period = frappe.db.sql(
,
{"company": company, "start_date": start_date, "end_date": end_date},
)
if len(payroll_period) > 0:
actual_no_of_days = date_diff(getdate(payroll_period[0][2]), getdate(payroll_period[0][1])) + 1
working_days = actual_no_of_days
if not cint(
frappe.db.get_value("Payroll Settings", None, "include_holidays_in_total_working_days")
):
holidays = get_holiday_dates_for_employee(
employee, getdate(payroll_period[0][1]), getdate(payroll_period[0][2])
)
working_days -= len(holidays)
return payroll_period[0][0], working_days, actual_no_of_days
return False, False, False
|
|
42,041 | 176,699 | 120 | networkx/algorithms/bipartite/basic.py | 52 | 12 | def density(B, nodes):
n = len(B)
m = nx.number_of_edges(B)
nb = len(nodes)
nt = n - nb
if m == 0: # includes cases n==0 and n==1
d = 0.0
else:
if B.is_directed():
| Remove redundant py2 numeric conversions (#5661)
* Remove redundant float conversion
* Remove redundant int conversion
* Use integer division
Co-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com> | density | 2a05ccdb07cff88e56661dee8a9271859354027f | networkx | basic.py | 15 | 13 | https://github.com/networkx/networkx.git | 3 | 76 | 0 | 31 | 124 | Python | {
"docstring": "Returns density of bipartite graph B.\n\n Parameters\n ----------\n B : NetworkX graph\n\n nodes: list or container\n Nodes in one node set of the bipartite graph.\n\n Returns\n -------\n d : float\n The bipartite density\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.complete_bipartite_graph(3, 2)\n >>> X = set([0, 1, 2])\n >>> bipartite.density(G, X)\n 1.0\n >>> Y = set([3, 4])\n >>> bipartite.density(G, Y)\n 1.0\n\n Notes\n -----\n The container of nodes passed as argument must contain all nodes\n in one of the two bipartite node sets to avoid ambiguity in the\n case of disconnected graphs.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n See Also\n --------\n color\n ",
"language": "en",
"n_whitespaces": 208,
"n_words": 113,
"vocab_size": 79
} | def density(B, nodes):
n = len(B)
m = nx.number_of_edges(B)
nb = len(nodes)
nt = n - nb
if m == 0: # includes cases n==0 and n==1
d = 0.0
else:
if B.is_directed():
d = m / (2 * nb * nt)
else:
d = m / (nb * nt)
return d
|
|
8,956 | 46,701 | 118 | airflow/www/views.py | 27 | 9 | def redirect_or_json(origin, msg, status=""):
if request.headers.get('Accept') == 'application/json':
return {'status': status, 'message': msg}
else:
if status:
flash(msg, status)
else:
flash(msg)
return redirect(origin)
######################################################################################
# Error handlers
################################################################################### | Add details drawer to Grid View (#22123)
* make UI and tree work with mapped tasks
basic slide drawer
reformat grid background colors
improve rendering and add selected dag run
fix hover and extra prop
switch from drawer to details section
add tooltip info to details
use API
make side panel collapsible, useTasks,
dag run actions
dag run actions w/ react-query
task instance links
task actions
remove modals
adjust panel width and use status color
minor details styling
add duration to tooltips
add last scheduling decision and fix tests
* move ref and selection to providers
* fix test with mock providers
* update TI and DR buttons
* download logs and external logs
* add extra links to TI details
* download log bug fixes
* fix extra links, hide local TZ if UTC,
* confirm mark task failed/success
* Update confirm modals for runs and tasks
- async/await on mutations instead of useeffect
- add confirmation for run actions
* Fix dialog scrolling
* Code cleanup and fix task clear
* Fix task/run label, dialog focus, dag details overflow, panel open/close
* Add timezone provider
* Fix TimezoneEvent import
* Improve button UX
- Remove details panel title
- Add button to reset root
- Make "More Details" buttons more specific
- Specify timezone as DAG timezone
* autorefresh dag run details
* auto-refresh task instance details
* revert useTreeData changes
None of these changes were relevant to this PR. Better to be done separately.
* Address PR feedback
- useState vs useDisclosure
- Remove extraneous elements
- Copy changes
- Wire up params for runTask
- Breadcrumb padding
* Handle task/run action sideeffects by separating autorefresh and treeData hooks
* Clean up views.py endpoints
- Pass 'Accept' headers for json returns
- Consolidate more endpoints to return json or redirect
* pass request as arg
* remove request as arg
* Anticipate when the 'Accept' header is not present
* Fix argument count errors
* Replace hard coded urls
* Replace hard coded urls in react components
* Update filter upstream link
* Split TaskInstance details component
* Fix undefined variables in tests
* init_api_connexion in tests
- add readme
- rename context providers to avoid confusion with Airflow Providers
* Fix url params, hide last item breadcrumb links
* Update task run failed copy
* Fix taskinstance/list buttons
Co-authored-by: Tzu-ping Chung <tp@astronomer.io> | redirect_or_json | 2bb26a38070a4b949bfb210ef1d5644e016e373a | airflow | views.py | 13 | 9 | https://github.com/apache/airflow.git | 3 | 56 | 0 | 23 | 103 | Python | {
"docstring": "\n Some endpoints are called by javascript,\n returning json will allow us to more elegantly handle side-effects in-page\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 17
} | def redirect_or_json(origin, msg, status=""):
if request.headers.get('Accept') == 'application/json':
return {'status': status, 'message': msg}
else:
if status:
flash(msg, status)
else:
flash(msg)
return redirect(origin)
######################################################################################
# Error handlers
######################################################################################
|
|
75,649 | 259,212 | 203 | sklearn/preprocessing/_encoders.py | 62 | 12 | def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx):
if not self._infrequent_enabled:
return drop_idx
default_to_infrequent = self._default_to_infrequent_mappings[feature_idx]
if default_to_infrequent is None:
return drop_idx
# Raise error when explicitly dropping a category that is infrequent
infrequent_indices = self._infrequent_indices[feature_idx]
if infrequent_indices is not None and drop_idx in infrequent_indices:
categories = self.categories_[feature_idx]
raise ValueError(
f"Unable to drop category {categories[drop_idx]!r} from feature"
f" {feature_idx} because it is infrequent"
)
return default_to_infreq | ENH Adds infrequent categories to OneHotEncoder (#16018)
* ENH Completely adds infrequent categories
* STY Linting
* STY Linting
* DOC Improves wording
* DOC Lint
* BUG Fixes
* CLN Address comments
* CLN Address comments
* DOC Uses math to description float min_frequency
* DOC Adds comment regarding drop
* BUG Fixes method name
* DOC Clearer docstring
* TST Adds more tests
* FIX Fixes mege
* CLN More pythonic
* CLN Address comments
* STY Flake8
* CLN Address comments
* DOC Fix
* MRG
* WIP
* ENH Address comments
* STY Fix
* ENH Use functiion call instead of property
* ENH Adds counts feature
* CLN Rename variables
* DOC More details
* CLN Remove unneeded line
* CLN Less lines is less complicated
* CLN Less diffs
* CLN Improves readiabilty
* BUG Fix
* CLN Address comments
* TST Fix
* CLN Address comments
* CLN Address comments
* CLN Move docstring to userguide
* DOC Better wrapping
* TST Adds test to handle_unknown='error'
* ENH Spelling error in docstring
* BUG Fixes counter with nan values
* BUG Removes unneeded test
* BUG Fixes issue
* ENH Sync with main
* DOC Correct settings
* DOC Adds docstring
* DOC Immprove user guide
* DOC Move to 1.0
* DOC Update docs
* TST Remove test
* DOC Update docstring
* STY Linting
* DOC Address comments
* ENH Neater code
* DOC Update explaination for auto
* Update sklearn/preprocessing/_encoders.py
Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com>
* TST Uses docstring instead of comments
* TST Remove call to fit
* TST Spelling error
* ENH Adds support for drop + infrequent categories
* ENH Adds infrequent_if_exist option
* DOC Address comments for user guide
* DOC Address comments for whats_new
* DOC Update docstring based on comments
* CLN Update test with suggestions
* ENH Adds computed property infrequent_categories_
* DOC Adds where the infrequent column is located
* TST Adds more test for infrequent_categories_
* DOC Adds docstring for _compute_drop_idx
* CLN Moves _convert_to_infrequent_idx into its own method
* TST Increases test coverage
* TST Adds failing test
* CLN Careful consideration of dropped and inverse_transform
* STY Linting
* DOC Adds docstrinb about dropping infrequent
* DOC Uses only
* DOC Numpydoc
* TST Includes test for get_feature_names_out
* DOC Move whats new
* DOC Address docstring comments
* DOC Docstring changes
* TST Better comments
* TST Adds check for handle_unknown='ignore' for infrequent
* CLN Make _infrequent_indices private
* CLN Change min_frequency default to None
* DOC Adds comments
* ENH adds support for max_categories=1
* ENH Describe lexicon ordering for ties
* DOC Better docstring
* STY Fix
* CLN Error when explicity dropping an infrequent category
* STY Grammar
Co-authored-by: Joel Nothman <joel.nothman@gmail.com>
Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com>
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | _map_drop_idx_to_infrequent | 7f0006c8aad1a09621ad19c3db19c3ff0555a183 | scikit-learn | _encoders.py | 13 | 14 | https://github.com/scikit-learn/scikit-learn.git | 5 | 72 | 0 | 47 | 127 | Python | {
"docstring": "Convert `drop_idx` into the index for infrequent categories.\n\n If there are no infrequent categories, then `drop_idx` is\n returned. This method is called in `_compute_drop_idx` when the `drop`\n parameter is an array-like.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 31,
"vocab_size": 26
} | def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx):
if not self._infrequent_enabled:
return drop_idx
default_to_infrequent = self._default_to_infrequent_mappings[feature_idx]
if default_to_infrequent is None:
return drop_idx
# Raise error when explicitly dropping a category that is infrequent
infrequent_indices = self._infrequent_indices[feature_idx]
if infrequent_indices is not None and drop_idx in infrequent_indices:
categories = self.categories_[feature_idx]
raise ValueError(
f"Unable to drop category {categories[drop_idx]!r} from feature"
f" {feature_idx} because it is infrequent"
)
return default_to_infrequent[drop_idx]
|
|
29,076 | 130,020 | 57 | dashboard/tests/test_dashboard.py | 29 | 13 | def test_dashboard_module_decorator(enable_test_module):
head_cls_list = dashboard_utils.get_all_modules(dashboard_utils.DashboardHeadModule)
agent_cls_list = dashboard_utils.get_all_modules(
dashboard_utils.DashboardAgentModule
)
assert any(cls.__name__ == "TestHead" for cls in head_cls_list)
assert any(cls.__name__ | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_dashboard_module_decorator | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_dashboard.py | 9 | 23 | https://github.com/ray-project/ray.git | 3 | 58 | 0 | 21 | 97 | Python | {
"docstring": "\nimport os\nimport ray.dashboard.utils as dashboard_utils\n\nos.environ.pop(\"RAY_DASHBOARD_MODULE_TEST\")\nhead_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardHeadModule)\nagent_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardAgentModule)\nprint(head_cls_list)\nprint(agent_cls_list)\nassert all(cls.__name__ != \"TestHead\" for cls in head_cls_list)\nassert all(cls.__name__ != \"TestAgent\" for cls in agent_cls_list)\nprint(\"success\")\n",
"language": "en",
"n_whitespaces": 38,
"n_words": 34,
"vocab_size": 25
} | def test_dashboard_module_decorator(enable_test_module):
head_cls_list = dashboard_utils.get_all_modules(dashboard_utils.DashboardHeadModule)
agent_cls_list = dashboard_utils.get_all_modules(
dashboard_utils.DashboardAgentModule
)
assert any(cls.__name__ == "TestHead" for cls in head_cls_list)
assert any(cls.__name__ == "TestAgent" for cls in agent_cls_list)
test_code =
run_string_as_driver(test_code)
|
|
57,102 | 223,844 | 21 | python3.10.4/Lib/email/parser.py | 7 | 6 | def parsestr(self, text, headersonly=False):
return self.parse(StringIO(text), headersonly=headers | add python 3.10.4 for windows | parsestr | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | parser.py | 9 | 2 | https://github.com/XX-net/XX-Net.git | 1 | 26 | 0 | 7 | 41 | Python | {
"docstring": "Create a message structure from a string.\n\n Returns the root of the message structure. Optional headersonly is a\n flag specifying whether to stop parsing after reading the headers or\n not. The default is False, meaning it parses the entire contents of\n the file.\n ",
"language": "en",
"n_whitespaces": 80,
"n_words": 43,
"vocab_size": 34
} | def parsestr(self, text, headersonly=False):
return self.parse(StringIO(text), headersonly=headersonly) |
|
78,323 | 266,162 | 69 | netbox/netbox/views/generic/utils.py | 19 | 9 | def get_prerequisite_model(queryset):
if not queryset.exists():
for prereq in getattr(queryset.model, 'prerequisite_models', []):
model = apps.get_model(prereq)
if not model.objects.exists():
return model
| Use strings to specify prerequisite models | get_prerequisite_model | ebf555e1fb1267348ca620c15ce456767d91042a | netbox | utils.py | 13 | 6 | https://github.com/netbox-community/netbox.git | 4 | 49 | 0 | 16 | 83 | Python | {
"docstring": "\n Return any prerequisite model that must be created prior to creating\n an instance of the current model.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 17
} | def get_prerequisite_model(queryset):
if not queryset.exists():
for prereq in getattr(queryset.model, 'prerequisite_models', []):
model = apps.get_model(prereq)
if not model.objects.exists():
return model
|
|
81,435 | 275,623 | 303 | keras/optimizers/optimizer_v2/utils.py | 95 | 15 | def filter_empty_gradients(grads_and_vars):
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
variable = ([v.name for _, v in grads_and_vars],)
raise ValueError(
f"No gradients provided for any variable: {variable}. "
f"Provided `grads_a | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | filter_empty_gradients | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | utils.py | 13 | 28 | https://github.com/keras-team/keras.git | 8 | 118 | 0 | 69 | 203 | Python | {
"docstring": "Filter out `(grad, var)` pairs that have a gradient equal to `None`.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def filter_empty_gradients(grads_and_vars):
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
variable = ([v.name for _, v in grads_and_vars],)
raise ValueError(
f"No gradients provided for any variable: {variable}. "
f"Provided `grads_and_vars` is {grads_and_vars}."
)
if vars_with_empty_grads:
logging.warning(
(
"Gradients do not exist for variables %s when minimizing the loss. "
"If you're using `model.compile()`, did you forget to provide a `loss`"
"argument?"
),
([v.name for v in vars_with_empty_grads]),
)
return filtered
|
|
49,486 | 200,000 | 439 | sympy/physics/wigner.py | 231 | 37 | def real_gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):
r
l_1, l_2, l_3, m_1, m_2, m_3 = [
as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)]
# check for quick exits
if sum(1 for i in (m_1, m_2, m_3) if i < 0) % 2:
return S.Zero # odd number of negative m
if (l_1 + l_2 + l_3) % 2:
return S.Zero # sum of l is odd
lmax = l_2 + l_3
lmin = max(abs(l_2 - l_3), min(abs(m_2 + m_3), abs(m_2 - m_3)))
if (lmin + lmax) % 2:
lmin += 1
if lmin not in range(lmax, lmin - 2, -2):
return S.Zero
kron_del = lambda i, j: 1 if i == j else 0
s = lambda e: -1 if e % 2 else 1 # (-1)**e to give +/-1, avoiding float when e<0
A = lambda a, b: (-kron_del(a, b)*s(a-b) + kron_del(a, -b)*
s(b)) if b < 0 else 0
B = lambda a, b: (kron_del(a, b) + kron_del(a, -b)*s(a)) if b > 0 else 0
C = lambda a, b: kron | Update wigner.py | real_gaunt | f8aedc2fa7434091fc83ff241298534f79047c60 | sympy | wigner.py | 16 | 142 | https://github.com/sympy/sympy.git | 15 | 424 | 0 | 124 | 623 | Python | {
"docstring": "\n Calculate the real Gaunt coefficient.\n\n Explanation\n ===========\n The real Gaunt coefficient is defined as the integral over three\n real spherical harmonics:\n \n .. math::\n \\begin{aligned}\n \\operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3)\n &=\\int Z^{m_1}_{l_1}(\\Omega)\n Z^{m_2}_{l_2}(\\Omega) Z^{m_3}_{l_3}(\\Omega) \\,d\\Omega \\\\\n \\end{aligned}\n\n Alternatively, it can be defined in terms of the standard Gaunt\n coefficient by relating the real spherical harmonics to the standard\n spherical harmonics via a unitary transformation `U`, i.e.\n `Z^{m}_{l}(\\Omega)=\\sum_{m'}U^{m}_{m'}Y^{m'}_{l}(\\Omega)` [Homeier96]_.\n The real Gaunt coefficient is then defined as\n\n .. math::\n \\begin{aligned}\n \\operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3)\n &=\\int Z^{m_1}_{l_1}(\\Omega)\n Z^{m_2}_{l_2}(\\Omega) Z^{m_3}_{l_3}(\\Omega) \\,d\\Omega \\\\\n &=\\sum_{m'_1 m'_2 m'_3} U^{m_1}_{m'_1}U^{m_2}_{m'_2}U^{m_3}_{m'_3}\n \\operatorname{Gaunt}(l_1,l_2,l_3,m'_1,m'_2,m'_3)\n \\end{aligned}\n\n The unitary matrix `U` has components\n\n .. math::\n \\begin{aligned}\n U^m_{m'} = \\delta_{|m||m'|}*(\\delta_{m'0}\\delta_{m0} + \\frac{1}{\\sqrt{2}}\\big[\\Theta(m)\n \\big(\\delta_{m'm}+(-1)^{m'}\\delta_{m'-m}\\big)+i\\Theta(-m)\\big((-1)^{-m}\n \\delta_{m'-m}-\\delta_{m'm}*(-1)^{m'-m}\\big)\\big])\n \\end{aligned}\n\n where `\\delta_{ij}` is the Kronecker delta symbol and `\\Theta` is a step\n function defined as\n\n .. math::\n \\begin{aligned}\n \\Theta(x) = \\begin{cases} 1 \\,\\text{for}\\, x > 0 \\\\ 0 \\,\\text{for}\\, x \\leq 0 \\end{cases}\n \\end{aligned}\n\n Parameters\n ==========\n l_1, l_2, l_3, m_1, m_2, m_3 :\n Integer.\n prec - precision, default: ``None``.\n Providing a precision can\n drastically speed up the calculation.\n\n Returns\n =======\n Rational number times the square root of a rational number.\n\n Examples\n ========\n >>> from sympy.physics.wigner import real_gaunt\n >>> real_gaunt(2,2,4,-1,-1,0)\n -2/(7*sqrt(pi))\n >>> real_gaunt(10,10,20,-9,-9,0).n(64)\n -0.00002480019791932209313156167...\n \n It is an error to use non-integer values for `l` and `m`::\n real_gaunt(2.8,0.5,1.3,0,0,0)\n Traceback (most recent call last):\n ...\n ValueError: l values must be integer\n real_gaunt(2,2,4,0.7,1,-3.4)\n Traceback (most recent call last):\n ...\n ValueError: m values must be integer\n\n Notes\n =====\n The real Gaunt coefficient inherits from the standard Gaunt coefficient,\n the invariance under any permutation of the pairs `(l_i, m_i)` and the\n requirement that the sum of the `l_i` be even to yield a non-zero value.\n It also obeys the following symmetry rules:\n\n - zero for `l_1`, `l_2`, `l_3` not fulfiling the condition\n `l_1 \\in \\{l_{\\text{max}}, l_{\\text{max}}-2, \\ldots, l_{\\text{min}}\\}`,\n where `l_{\\text{max}} = l_2+l_3`,\n \n .. math::\n \\begin{aligned}\n l_{\\text{min}} = \\begin{cases} \\kappa(l_2, l_3, m_2, m_3) & \\text{if}\\,\n \\kappa(l_2, l_3, m_2, m_3) + l_{\\text{max}}\\, \\text{is even} \\\\\n \\kappa(l_2, l_3, m_2, m_3)+1 & \\text{if}\\, \\kappa(l_2, l_3, m_2, m_3) +\n l_{\\text{max}}\\, \\text{is odd}\\end{cases}\n \\end{aligned}\n\n and `\\kappa(l_2, l_3, m_2, m_3) = \\max{\\big(|l_2-l_3|, \\min{\\big(|m_2+m_3|,\n |m_2-m_3|\\big)}\\big)}`\n \n - zero for an odd number of negative `m_i`\n \n Algorithms\n ==========\n This function uses the algorithms of [Homeier96]_ and [Rasch03]_ to\n calculate the value of the real Gaunt coefficient exactly. Note that\n the formula used in [Rasch03]_ contains alternating sums over large\n factorials and is therefore unsuitable for finite precision arithmetic\n and only useful for a computer algebra system [Rasch03]_. However, this\n function can in principle use any algorithm that computes the Gaunt\n coefficient, so it is suitable for finite precision arithmetic in so far\n as the algorithm which computes the Gaunt coefficient is.\n ",
"language": "en",
"n_whitespaces": 906,
"n_words": 429,
"vocab_size": 239
} | def real_gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):
r
l_1, l_2, l_3, m_1, m_2, m_3 = [
as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)]
# check for quick exits
if sum(1 for i in (m_1, m_2, m_3) if i < 0) % 2:
return S.Zero # odd number of negative m
if (l_1 + l_2 + l_3) % 2:
return S.Zero # sum of l is odd
lmax = l_2 + l_3
lmin = max(abs(l_2 - l_3), min(abs(m_2 + m_3), abs(m_2 - m_3)))
if (lmin + lmax) % 2:
lmin += 1
if lmin not in range(lmax, lmin - 2, -2):
return S.Zero
kron_del = lambda i, j: 1 if i == j else 0
s = lambda e: -1 if e % 2 else 1 # (-1)**e to give +/-1, avoiding float when e<0
A = lambda a, b: (-kron_del(a, b)*s(a-b) + kron_del(a, -b)*
s(b)) if b < 0 else 0
B = lambda a, b: (kron_del(a, b) + kron_del(a, -b)*s(a)) if b > 0 else 0
C = lambda a, b: kron_del(abs(a), abs(b))*(kron_del(a, 0)*kron_del(b, 0) +
(B(a, b) + I*A(a, b))/sqrt(2))
ugnt = 0
for i in range(-l_1, l_1+1):
U1 = C(i, m_1)
for j in range(-l_2, l_2+1):
U2 = C(j, m_2)
U3 = C(-i-j, m_3)
ugnt = ugnt + re(U1*U2*U3)*gaunt(l_1, l_2, l_3, i, j, -i-j)
if prec is not None:
ugnt = ugnt.n(prec)
return ugnt
|