hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
793e7fffbdd5c89651bd754571e1af6c08b7b976 | 820 | py | Python | add-binary/solution.py | LYZhelloworld/Leetcode | 1ef3c8d3a75a20755e7474427224ed8757f97932 | [
"MIT"
] | null | null | null | add-binary/solution.py | LYZhelloworld/Leetcode | 1ef3c8d3a75a20755e7474427224ed8757f97932 | [
"MIT"
] | null | null | null | add-binary/solution.py | LYZhelloworld/Leetcode | 1ef3c8d3a75a20755e7474427224ed8757f97932 | [
"MIT"
] | null | null | null | class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
x = [i == '1' for i in a[::-1]]
y = [i == '1' for i in b[::-1]]
r = []
carry = False
if len(x) > len(y):
y += [False] * (len(x) - len(y))
else:
x += [False] * (len(y) - len(x))
for d in range(len(x)):
s, carry = self.full_adder(x[d], y[d], carry)
r += [s]
if carry:
r += [True]
r.reverse()
return ''.join(['1' if i else '0' for i in r])
def half_adder(self, a, b):
return a ^ b, a & b
def full_adder(self, a, b, cin):
s1, c1 = self.half_adder(a, b)
s2, c2 = self.half_adder(s1, cin)
return s2, c1 | c2
| 24.117647 | 57 | 0.4 |
793e81ebd98b987ef5fea1e69b37fa789775b1f4 | 7,321 | py | Python | dp_multiq/csmooth.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | dp_multiq/csmooth.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | dp_multiq/csmooth.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CDP smooth sensitivity method for computing differentially private quantiles.
The smooth sensitivity method is described in
"Smooth Sensitivity and Sampling in Private Data Analysis" by Nissim,
Raskhodnikova, and Smith
(https://cs-people.bu.edu/ads22/pubs/NRS07/NRS07-full-draft-v1.pdf). Details for
the CDP noise distribution appear in Section 3.1 of "Average-Case Averages:
Private Algorithms for Smooth Sensitivity and Mean Estimation" by Bun and
Steinke (NeurIPS 2019). Details for optimizing t, s, and sigma appear in
Section 3.1.1 of the same paper.
"""
import numpy as np
from dp_multiq import base
from dp_multiq import smooth_utils
def compute_triples(eps, ts):
"""Returns triples of form (t, log(s), sigma) for hyperparameter optimization.
Args:
eps: Privacy parameter epsilon.
ts: Array of possible smooth sensitivity parameters.
"""
triples = np.empty([len(ts), 3])
for t_idx in range(len(ts)):
t = ts[t_idx]
triples[t_idx, 0] = t
sigma = opt_sigma(eps, t)
triples[t_idx, 2] = sigma
triples[t_idx, 1] = -1.5 * (sigma**2) + np.log(eps - (t / sigma))
return triples
def opt_sigma(eps, t):
"""Returns optimal sigma as detailed in Section 3.1.1 of Bun and Steinke.
Args:
eps: Privacy parameter epsilon.
t: Smooth sensitivity parameter.
"""
return np.real(np.roots([5 * eps / t, -5, 0, -1])[0])
def lln(sigma):
"""Returns a sample from the Laplace Log-Normal distribution.
Args:
sigma: Sigma parameter for the Laplace Log-Normal distribution.
"""
return np.random.laplace() * np.exp(sigma * np.random.normal())
def csmooth(sorted_data, data_low, data_high, qs, divided_eps, ts):
"""Returns eps^2/2-CDP quantile estimates for qs.
Args:
sorted_data: Array of data points sorted in increasing order.
data_low: Lower limit for any differentially private quantile output value.
data_high: Upper limit for any differentially private quantile output value.
qs: Increasing array of quantiles in [0,1].
divided_eps: Privacy parameter epsilon. Assumes eps has already been divided
so that the overall desired privacy guarantee is achieved.
ts: Array of smooth sensitivity parameters, one for each q in qs.
"""
sorted_data = np.clip(sorted_data, data_low, data_high)
o = np.empty(len(qs))
triples = compute_triples(divided_eps, ts)
for i in range(len(qs)):
t, log_s, sigma = triples[i]
true_quantile_idx = base.quantile_index(len(sorted_data), qs[i])
true_quantile_value = sorted_data[true_quantile_idx]
laplace_log_normal_noise = lln(sigma)
log_sensitivity = smooth_utils.compute_log_smooth_sensitivity(
sorted_data, data_low, data_high, true_quantile_idx, t)
noise = np.sign(laplace_log_normal_noise) * np.exp(
log_sensitivity + np.log(np.abs(laplace_log_normal_noise)) - log_s)
o[i] = true_quantile_value + noise
o = np.clip(o, data_low, data_high)
return np.sort(o)
def log_choose_triple_idx(triples, eps, log_sensitivities):
"""Returns triple (t, log_s, sigma) that minimizes noisy statistic variance.
Args:
triples: Array with entries of form (t, log_s, sigma).
eps: Privacy parameter epsilon.
log_sensitivities: Log(t smooth sensitivity) for each t in triples.
"""
variances = np.empty(len(triples))
for triple_idx in range(len(triples)):
numerator = 2 * (np.exp(2 * log_sensitivities[triple_idx]))
denominator = np.exp(-5 * (triples[triple_idx][2]**2)) * (
(eps - (triples[triple_idx][0] / triples[triple_idx][2]))**2)
variances[triple_idx] = numerator / denominator
return np.argmin(variances)
def csmooth_tune_and_return_ts(sorted_data, data_low, data_high, qs,
divided_eps, log_t_low, log_t_high, num_t):
"""Returns ts minimizing variance for data and each q under ~eps^2/2-CDP.
Args:
sorted_data: Array of data points sorted in increasing order.
data_low: Lower limit for any differentially private quantile output value.
data_high: Upper limit for any differentially private quantile output value.
qs: Increasing array of quantiles in [0,1].
divided_eps: Privacy parameter epsilon. Assumes eps has already been divided
so that the overall desired privacy guarantee is achieved.
log_t_low: Tuning range for t has lower bound 10^(log_t_low).
log_t_high: Tuning range for t has upper bound 10^(log_t_high).
num_t: Number of logarithmically spaced t used to populate tuning range.
"""
sorted_data = np.clip(sorted_data, data_low, data_high)
triples = compute_triples(divided_eps,
np.logspace(log_t_low, log_t_high, num_t))
num_qs = len(qs)
ts = np.empty(num_qs)
for i in range(num_qs):
true_quantile_idx = base.quantile_index(len(sorted_data), qs[i])
log_sensitivities = np.zeros(len(triples))
for triple_idx in range(len(triples)):
t = triples[triple_idx, 0]
log_sensitivities[
triple_idx] = smooth_utils.compute_log_smooth_sensitivity(
sorted_data, data_low, data_high, true_quantile_idx, t)
ts[i] = triples[log_choose_triple_idx(triples, divided_eps,
log_sensitivities)][0]
return ts
def csmooth_tune_t_experiment(eps, num_samples, num_trials, num_quantiles_range,
data_low, data_high, log_t_low, log_t_high,
num_t):
"""Returns 2-D array of ts, tuned for each (num_quantiles, quantile) pair.
Args:
eps: Privacy parameter epsilon.
num_samples: Number of standard Gaussian samples to draw for each trial.
num_trials: Number of trials to average.
num_quantiles_range: Array of number of quantiles to estimate.
data_low: Lower bound for data, used by CSmooth.
data_high: Upper bound for data, used by CSmooth.
log_t_low: Tuning range for t has lower bound 10^(log_t_low).
log_t_high: Tuning range for t has upper bound 10^(log_t_high).
num_t: Number of logarithmically spaced t used to populate tuning range.
"""
ts = [np.zeros(num_quantiles) for num_quantiles in num_quantiles_range]
num_quantiles_idx = 0
for num_quantiles_idx in range(len(num_quantiles_range)):
num_quantiles = num_quantiles_range[num_quantiles_idx]
divided_eps = eps / np.sqrt(num_quantiles)
for _ in range(num_trials):
sorted_data = base.gen_gaussian(num_samples, 0, 1)
qs = np.linspace(0, 1, num_quantiles + 2)[1:-1]
ts[num_quantiles_idx] += csmooth_tune_and_return_ts(
sorted_data, data_low, data_high, qs, divided_eps, log_t_low,
log_t_high, num_t) / num_trials
print("Finished num_quantiles: {}".format(num_quantiles))
return ts
| 41.129213 | 80 | 0.714656 |
793e8273f88b0f2b7ce110aa9b57d8e53036e64a | 11,313 | py | Python | code/python/Publisher/v3/fds/sdk/Publisher/model/account_directories_root.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/Publisher/v3/fds/sdk/Publisher/model/account_directories_root.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/Publisher/v3/fds/sdk/Publisher/model/account_directories_root.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Publisher API
Allow clients to fetch Publisher Analytics through APIs. # noqa: E501
The version of the OpenAPI document: 3
Contact: analytics.api.support@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.Publisher.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.Publisher.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.Publisher.model.account_directories import AccountDirectories
globals()['AccountDirectories'] = AccountDirectories
class AccountDirectoriesRoot(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (AccountDirectories,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AccountDirectoriesRoot - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (AccountDirectories): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AccountDirectoriesRoot - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (AccountDirectories): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.015209 | 121 | 0.574472 |
793e8409b2a4d12488dff56ea216a81b3efed7f2 | 125 | py | Python | 6_command/no_command.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 6_command/no_command.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 6_command/no_command.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | from command import Command
class NoCommand(Command):
def excuse(self):
pass
def undo(self):
pass
| 12.5 | 27 | 0.616 |
793e845f73958f35d2ea7faa1847f8a8ad73e2ad | 11,688 | py | Python | aiida/tools/graph/graph_traversers.py | HaoZeke/aiida-core | 1a4cada67fe36353326dcebfe888ebc01a6c5b7b | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/tools/graph/graph_traversers.py | HaoZeke/aiida-core | 1a4cada67fe36353326dcebfe888ebc01a6c5b7b | [
"MIT",
"BSD-3-Clause"
] | 2 | 2019-03-06T11:23:42.000Z | 2020-03-09T09:34:07.000Z | aiida/tools/graph/graph_traversers.py | lorisercole/aiida-core | 84c2098318bf234641219e55795726f99dc25a16 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for functions to traverse AiiDA graphs."""
from numpy import inf
from aiida.common.links import GraphTraversalRules, LinkType
def get_nodes_delete(starting_pks, get_links=False, **kwargs):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified authorized
links and directions for deletion.
:type starting_pks: list or tuple or set
:param starting_pks: Contains the (valid) pks of the starting nodes.
:param bool get_links:
Pass True to also return the links between all nodes (found + initial).
:param bool create_forward: will traverse CREATE links in the forward direction.
:param bool call_calc_forward: will traverse CALL_CALC links in the forward direction.
:param bool call_work_forward: will traverse CALL_WORK links in the forward direction.
"""
traverse_links = validate_traversal_rules(GraphTraversalRules.DELETE, **kwargs)
traverse_output = traverse_graph(
starting_pks,
get_links=get_links,
links_forward=traverse_links['forward'],
links_backward=traverse_links['backward']
)
function_output = {
'nodes': traverse_output['nodes'],
'links': traverse_output['links'],
'rules': traverse_links['rules_applied']
}
return function_output
def get_nodes_export(starting_pks, get_links=False, **kwargs):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified authorized
links and directions for export. This will also return the links and
the traversal rules parsed.
:type starting_pks: list or tuple or set
:param starting_pks: Contains the (valid) pks of the starting nodes.
:param bool get_links:
Pass True to also return the links between all nodes (found + initial).
:param bool input_calc_forward: will traverse INPUT_CALC links in the forward direction.
:param bool create_backward: will traverse CREATE links in the backward direction.
:param bool return_backward: will traverse RETURN links in the backward direction.
:param bool input_work_forward: will traverse INPUT_WORK links in the forward direction.
:param bool call_calc_backward: will traverse CALL_CALC links in the backward direction.
:param bool call_work_backward: will traverse CALL_WORK links in the backward direction.
"""
traverse_links = validate_traversal_rules(GraphTraversalRules.EXPORT, **kwargs)
traverse_output = traverse_graph(
starting_pks,
get_links=get_links,
links_forward=traverse_links['forward'],
links_backward=traverse_links['backward']
)
function_output = {
'nodes': traverse_output['nodes'],
'links': traverse_output['links'],
'rules': traverse_links['rules_applied']
}
return function_output
def validate_traversal_rules(ruleset=GraphTraversalRules.DEFAULT, **kwargs):
"""
Validates the keywords with a ruleset template and returns a parsed dictionary
ready to be used.
:type ruleset: :py:class:`aiida.common.links.GraphTraversalRules`
:param ruleset: Ruleset template used to validate the set of rules.
:param bool input_calc_forward: will traverse INPUT_CALC links in the forward direction.
:param bool input_calc_backward: will traverse INPUT_CALC links in the backward direction.
:param bool create_forward: will traverse CREATE links in the forward direction.
:param bool create_backward: will traverse CREATE links in the backward direction.
:param bool return_forward: will traverse RETURN links in the forward direction.
:param bool return_backward: will traverse RETURN links in the backward direction.
:param bool input_work_forward: will traverse INPUT_WORK links in the forward direction.
:param bool input_work_backward: will traverse INPUT_WORK links in the backward direction.
:param bool call_calc_forward: will traverse CALL_CALC links in the forward direction.
:param bool call_calc_backward: will traverse CALL_CALC links in the backward direction.
:param bool call_work_forward: will traverse CALL_WORK links in the forward direction.
:param bool call_work_backward: will traverse CALL_WORK links in the backward direction.
"""
from aiida.common import exceptions
if not isinstance(ruleset, GraphTraversalRules):
raise TypeError(
'ruleset input must be of type aiida.common.links.GraphTraversalRules\ninstead, it is: {}'.format(
type(ruleset)
)
)
rules_applied = {}
links_forward = []
links_backward = []
for name, rule in ruleset.value.items():
follow = rule.default
if name in kwargs:
if not rule.toggleable:
raise ValueError('input rule {} is not toggleable for ruleset {}'.format(name, ruleset))
follow = kwargs.pop(name)
if not isinstance(follow, bool):
raise ValueError('the value of rule {} must be boolean, but it is: {}'.format(name, follow))
if follow:
if rule.direction == 'forward':
links_forward.append(rule.link_type)
elif rule.direction == 'backward':
links_backward.append(rule.link_type)
else:
raise exceptions.InternalError(
'unrecognized direction `{}` for graph traversal rule'.format(rule.direction)
)
rules_applied[name] = follow
if kwargs:
error_message = 'unrecognized keywords: {}'.format(', '.join(kwargs.keys()))
raise exceptions.ValidationError(error_message)
valid_output = {
'rules_applied': rules_applied,
'forward': links_forward,
'backward': links_backward,
}
return valid_output
def traverse_graph(starting_pks, max_iterations=None, get_links=False, links_forward=(), links_backward=()):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified links.
Optionally, it may also return the links that connect these nodes.
:type starting_pks: list or tuple or set
:param starting_pks: Contains the (valid) pks of the starting nodes.
:type max_iterations: int or None
:param max_iterations:
The number of iterations to apply the set of rules (a value of 'None' will
iterate until no new nodes are added).
:param bool get_links:
Pass True to also return the links between all nodes (found + initial).
:type links_forward: aiida.common.links.LinkType
:param links_forward:
List with all the links that should be traversed in the forward direction.
:type links_backward: aiida.common.links.LinkType
:param links_backward:
List with all the links that should be traversed in the backward direction.
"""
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
from aiida import orm
from aiida.tools.graph.age_entities import Basket
from aiida.tools.graph.age_rules import UpdateRule, RuleSequence, RuleSaveWalkers, RuleSetWalkers
from aiida.common import exceptions
if max_iterations is None:
max_iterations = inf
elif not (isinstance(max_iterations, int) or max_iterations is inf):
raise TypeError('Max_iterations has to be an integer or infinity')
linktype_list = []
for linktype in links_forward:
if not isinstance(linktype, LinkType):
raise TypeError('links_forward should contain links, but one of them is: {}'.format(type(linktype)))
linktype_list.append(linktype.value)
filters_forwards = {'type': {'in': linktype_list}}
linktype_list = []
for linktype in links_backward:
if not isinstance(linktype, LinkType):
raise TypeError('links_backward should contain links, but one of them is: {}'.format(type(linktype)))
linktype_list.append(linktype.value)
filters_backwards = {'type': {'in': linktype_list}}
if not isinstance(starting_pks, (list, set, tuple)):
raise TypeError('starting_pks must be of type list, set or tuple\ninstead, it is {}'.format(type(starting_pks)))
if not starting_pks:
if get_links:
output = {'nodes': set(), 'links': set()}
else:
output = {'nodes': set(), 'links': None}
return output
if any([not isinstance(pk, int) for pk in starting_pks]):
raise TypeError('one of the starting_pks is not of type int:\n {}'.format(starting_pks))
operational_set = set(starting_pks)
query_nodes = orm.QueryBuilder()
query_nodes.append(orm.Node, project=['id'], filters={'id': {'in': operational_set}})
existing_pks = set(query_nodes.all(flat=True))
missing_pks = operational_set.difference(existing_pks)
if missing_pks:
raise exceptions.NotExistent(
'The following pks are not in the database and must be pruned before this call: {}'.format(missing_pks)
)
rules = []
basket = Basket(nodes=operational_set)
# When max_iterations is finite, the order of traversal may affect the result
# (its not the same to first go backwards and then forwards than vice-versa)
# In order to make it order-independent, the result of the first operation needs
# to be stashed and the second operation must be performed only on the nodes
# that were already in the set at the begining of the iteration: this way, both
# rules are applied on the same set of nodes and the order doesn't matter.
# The way to do this is saving and seting the walkers at the right moments only
# when both forwards and backwards rules are present.
if links_forward and links_backward:
stash = basket.get_template()
rules += [RuleSaveWalkers(stash)]
if links_forward:
query_outgoing = orm.QueryBuilder()
query_outgoing.append(orm.Node, tag='sources')
query_outgoing.append(orm.Node, edge_filters=filters_forwards, with_incoming='sources')
rule_outgoing = UpdateRule(query_outgoing, max_iterations=1, track_edges=get_links)
rules += [rule_outgoing]
if links_forward and links_backward:
rules += [RuleSetWalkers(stash)]
if links_backward:
query_incoming = orm.QueryBuilder()
query_incoming.append(orm.Node, tag='sources')
query_incoming.append(orm.Node, edge_filters=filters_backwards, with_outgoing='sources')
rule_incoming = UpdateRule(query_incoming, max_iterations=1, track_edges=get_links)
rules += [rule_incoming]
rulesequence = RuleSequence(rules, max_iterations=max_iterations)
results = rulesequence.run(basket)
output = {}
output['nodes'] = results.nodes.keyset
output['links'] = None
if get_links:
output['links'] = results['nodes_nodes'].keyset
return output
| 42.194946 | 120 | 0.683864 |
793e84e38db8b2f0f59fe0fbf6ac97860583e373 | 3,245 | py | Python | predict.py | jhonatantirado/CheXNet-Keras | 264fd4ba889fe8d9f5dee48b4ba3f7c0018aa393 | [
"MIT"
] | null | null | null | predict.py | jhonatantirado/CheXNet-Keras | 264fd4ba889fe8d9f5dee48b4ba3f7c0018aa393 | [
"MIT"
] | null | null | null | predict.py | jhonatantirado/CheXNet-Keras | 264fd4ba889fe8d9f5dee48b4ba3f7c0018aa393 | [
"MIT"
] | null | null | null | from keras.models import load_model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
import os
from configparser import ConfigParser
from models.keras import ModelFactory
import tensorflow as tf
def load_image(img_path, show=False):
img = image.load_img(img_path, target_size=(224, 224))
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
if show:
plt.imshow(img_tensor[0])
plt.axis('off')
plt.show()
return img_tensor
if __name__ == "__main__":
# parser config
config_file = "./config.ini"
cp = ConfigParser()
cp.read(config_file)
# default config
output_dir = cp["DEFAULT"].get("output_dir")
base_model_name = cp["DEFAULT"].get("base_model_name")
class_names = cp["DEFAULT"].get("class_names").split(",")
image_source_dir = cp["DEFAULT"].get("image_source_dir")
# train config
image_dimension = cp["TRAIN"].getint("image_dimension")
# test config
batch_size = cp["TEST"].getint("batch_size")
test_steps = cp["TEST"].get("test_steps")
use_best_weights = cp["TEST"].getboolean("use_best_weights")
# parse weights file path
output_weights_name = cp["TRAIN"].get("output_weights_name")
weights_path = os.path.join(output_dir, output_weights_name)
best_weights_path = os.path.join(output_dir, f"best_{output_weights_name}")
print("** load model **")
if use_best_weights:
print("** use best weights **")
model_weights_path = best_weights_path
else:
print("** use last weights **")
model_weights_path = weights_path
model_factory = ModelFactory()
model = model_factory.get_model(
class_names,
model_name=base_model_name,
use_base_weights=False,
weights_path=model_weights_path)
# image path
img_path_001 = 'starter_images/00001698_000.PNG'
img_path_002 = 'starter_images/00003728_000.PNG'
img_path_003 = 'starter_images/00005318_000.PNG'
# load a single image
new_image_001 = load_image(img_path_001)
new_image_002 = load_image(img_path_002)
new_image_003 = load_image(img_path_003)
# check prediction
pred_001 = model.predict(new_image_001)
pred_002 = model.predict(new_image_002)
pred_003 = model.predict(new_image_003)
print (pred_001)
print (pred_002)
print (pred_003)
result_001 = tf.argmax(pred_001, 1)
result_002 = tf.argmax(pred_002, 1)
result_003 = tf.argmax(pred_003, 1)
predicted_class_001 = tf.keras.backend.eval(result_001)
predicted_class_002 = tf.keras.backend.eval(result_002)
predicted_class_003 = tf.keras.backend.eval(result_003)
print (predicted_class_001)
print (predicted_class_002)
print (predicted_class_003)
print (class_names[predicted_class_001[0]])
print (class_names[predicted_class_002[0]])
print (class_names[predicted_class_003[0]])
| 33.112245 | 183 | 0.692142 |
793e86d829c9f48688ff289a9a6698a47d8a31e1 | 216 | py | Python | Python/Bank/cliente.py | GabrielRenan/Projects-to-Learn | 38bd7e1dfa6ff9ef5ae0e7e5bacaaf23147d71c0 | [
"MIT"
] | null | null | null | Python/Bank/cliente.py | GabrielRenan/Projects-to-Learn | 38bd7e1dfa6ff9ef5ae0e7e5bacaaf23147d71c0 | [
"MIT"
] | null | null | null | Python/Bank/cliente.py | GabrielRenan/Projects-to-Learn | 38bd7e1dfa6ff9ef5ae0e7e5bacaaf23147d71c0 | [
"MIT"
] | null | null | null |
class Cliente:
def __init__(self,nome):
self.__nome = nome
@property
def nome(self):
return self.__nome.title()
@nome.setter
def nome(self, nome):
self.__nome = nome | 18 | 34 | 0.574074 |
793e870b1b532063020eb34c42baea04b0444ddd | 369 | py | Python | wagtail/wagtailforms/urls.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 1 | 2019-11-06T10:51:42.000Z | 2019-11-06T10:51:42.000Z | wagtail/wagtailforms/urls.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailforms/urls.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 2 | 2017-08-08T01:39:02.000Z | 2018-05-06T06:16:10.000Z | from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from wagtail.wagtailforms import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^submissions/(\d+)/$', views.list_submissions, name='list_submissions'),
url(r'^submissions/(\d+)/delete/$', views.delete_submissions, name='delete_submissions')
]
| 30.75 | 92 | 0.731707 |
793e87565395cde7d73093136d453cee31661aa7 | 14,048 | py | Python | docs/conf.py | zhengknight/tensorpack | 726747313fb2f189dd195d32087897b16a23be0a | [
"Apache-2.0"
] | 1 | 2019-05-07T15:23:33.000Z | 2019-05-07T15:23:33.000Z | docs/conf.py | zhengknight/tensorpack | 726747313fb2f189dd195d32087897b16a23be0a | [
"Apache-2.0"
] | null | null | null | docs/conf.py | zhengknight/tensorpack | 726747313fb2f189dd195d32087897b16a23be0a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa
# tensorpack documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 27 01:41:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re
import mock
import inspect
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DOC_BUILDING'] = '1'
ON_RTD = (os.environ.get('READTHEDOCS') == 'True')
MOCK_MODULES = ['tabulate', 'h5py',
'cv2', 'zmq', 'lmdb',
'sklearn', 'sklearn.datasets',
'scipy', 'scipy.misc', 'scipy.io',
'tornado', 'tornado.concurrent',
'horovod', 'horovod.tensorflow',
'pyarrow',
'subprocess32', 'functools32']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name)
sys.modules['cv2'].__version__ = '3.2.1' # fake version
import tensorpack
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
#'sphinx.ext.autosectionlabel',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
if ON_RTD:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# to support markdown
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tensorpack'
copyright = u'2015 - 2018, Yuxin Wu, et al.'
author = u'Yuxin Wu, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = tensorpack.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# 'tensorpack.' prefix was removed by js
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tensorpack.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# avoid li fonts being larger
# TODO but li indices fonts are still larger
html_compact_lists = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tensorpackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tensorpack.tex', u'tensorpack documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorpack', u'tensorpack documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tensorpack', u'tensorpack documentation',
author, 'tensorpack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
suppress_warnings = ['image.nonlocal_uri']
#autodoc_member_order = 'bysource'
def process_signature(app, what, name, obj, options, signature,
return_annotation):
if signature:
# replace Mock function names
signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature)
# add scope name to layer signatures:
if hasattr(obj, 'use_scope') and hasattr(obj, 'symbolic_function'):
if obj.use_scope:
signature = signature[0] + 'scope_name, ' + signature[1:]
elif obj.use_scope is None:
signature = signature[0] + '[scope_name,] ' + signature[1:]
# signature: arg list
return signature, return_annotation
_DEPRECATED_NAMES = set([
# deprecated stuff:
'TryResumeTraining',
'QueueInputTrainer',
'SimplePredictBuilder',
'LMDBDataPoint',
'TFRecordData',
'dump_dataflow_to_lmdb',
'dump_dataflow_to_tfrecord',
# renamed stuff:
'DumpTensor',
'DumpParamAsImage',
'StagingInputWrapper',
'PeriodicRunHooks',
'get_nr_gpu',
# deprecated or renamed symbolic code
'ImageSample',
'Deconv2D',
'get_scalar_var', 'psnr',
'prediction_incorrect', 'huber_loss',
# internal only
'apply_default_prefetch',
'average_grads',
'aggregate_grads',
'allreduce_grads',
'PrefetchOnGPUs',
])
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, '__HIDE_SPHINX_DOC__', False):
return True
if name == '__init__':
if obj.__doc__ and skip:
# include_init_with_doc doesn't work well for decorated init
# https://github.com/sphinx-doc/sphinx/issues/4258
return False
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
if name in ['get_data', 'size', 'reset_state']:
# skip these methods with empty docstring
if not obj.__doc__ and inspect.isfunction(obj):
# https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3
cls = getattr(inspect.getmodule(obj),
obj.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if issubclass(cls, tensorpack.DataFlow):
return True
return None
def url_resolver(url):
if '.html' not in url:
return "https://github.com/tensorpack/tensorpack/blob/master/" + url
else:
if ON_RTD:
return "http://tensorpack.readthedocs.io/" + url
else:
return '/' + url
def setup(app):
from recommonmark.transform import AutoStructify
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
'recommonmark_config',
{'url_resolver': url_resolver,
'auto_toc_tree_section': 'Contents',
'enable_math': True,
'enable_inline_math': True,
'enable_eval_rst': True
}, True)
app.add_transform(AutoStructify)
| 32.518519 | 113 | 0.692981 |
793e876da4c6cb416ff291ce78dc2293f7f6312a | 1,123 | py | Python | satflow/run.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | satflow/run.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | satflow/run.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | import os
os.environ["HYDRA_FULL_ERROR"] = "1"
import dotenv
import hydra
from omegaconf import DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
@hydra.main(config_path="configs/", config_name="config.yaml")
def main(config: DictConfig):
# Imports should be nested inside @hydra.main to optimize tab completion
# Read more here: https://github.com/facebookresearch/hydra/issues/934
from satflow.core import utils
from satflow.experiments.pl_train import train
# A couple of optional utilities:
# - disabling python warnings
# - easier access to debug mode
# - forcing debug friendly configuration
# - forcing multi-gpu friendly configuration
# You can safely get rid of this line if you don't want those
utils.extras(config)
#
# Pretty print config using Rich library
if config.get("print_config"):
utils.print_config(config, resolve=True)
# Train model
return train(config)
if __name__ == "__main__":
main()
| 27.390244 | 76 | 0.723063 |
793e87e43756e35447b43ebef01209bf3cb56a58 | 946 | py | Python | ssepaperless/Organizer/urls.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/urls.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/urls.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<department_id>[0-9]+)/$', views.index2, name='index2'),
url(r'^(?P<department_id>[0-9]+)/Advisor/(?P<advisor_id>[0-9]+)/$' , views.advisorinfo, name= 'advisorinfo'),
url(r'^(?P<department_id>[0-9]+)/Advisor/(?P<advisor_id>[0-9]+)/Students/$', views.detail, name='detail'),
url(r'^(?P<department_id>[0-9]+)/Advisor/(?P<advisor_id>[0-9]+)/Degrees/$', views.advisordegree, name='advisordegree'),
# ex: /polls/5/results/
url(r'^(?P<department_id>[0-9]+)/Degree/(?P<degree_id>[0-9]+)/$', views.degree, name='degree'),
url(r'^(?P<department_id>[0-9]+)/Certificate/(?P<certificate_id>[0-9]+)/$', views.certificate, name='certificate'),
# ex: /polls/5/vote/
url(r'^Degree/(?P<degree_id>[0-9]+)/Courses/(?P<degree_core_course_structure_id>[0-9]+)/$', views.coursedegree, name='coursedegree'),
] | 59.125 | 137 | 0.634249 |
793e888b5ae5bf4a2f4ef1efef04be3249969d52 | 359 | py | Python | cloudstore/apps/api/migrations/0004_auto_20200823_2216.py | JonasUJ/cloudstore | 33dd9eb2a92c75d4f2034c07dc1c6f1d6d8d845d | [
"MIT"
] | null | null | null | cloudstore/apps/api/migrations/0004_auto_20200823_2216.py | JonasUJ/cloudstore | 33dd9eb2a92c75d4f2034c07dc1c6f1d6d8d845d | [
"MIT"
] | null | null | null | cloudstore/apps/api/migrations/0004_auto_20200823_2216.py | JonasUJ/cloudstore | 33dd9eb2a92c75d4f2034c07dc1c6f1d6d8d845d | [
"MIT"
] | 1 | 2020-10-08T19:53:21.000Z | 2020-10-08T19:53:21.000Z | # Generated by Django 3.0.8 on 2020-08-23 20:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200802_1500'),
]
operations = [
migrations.RenameField(
model_name='folder',
old_name='parent',
new_name='folder',
),
]
| 18.894737 | 47 | 0.579387 |
793e89591d849b89e64dc0b66300d2432fb4e950 | 632 | py | Python | nyc_data/manage.py | nyccto-rapicastillo/nyc-ppe | e6d5ba45cf2815f7659298103d3b5bc7210ed8cf | [
"MIT"
] | 3 | 2020-04-16T03:24:17.000Z | 2020-09-11T22:12:31.000Z | nyc_data/manage.py | nyccto-rapicastillo/nyc-ppe | e6d5ba45cf2815f7659298103d3b5bc7210ed8cf | [
"MIT"
] | 47 | 2020-04-10T20:02:09.000Z | 2021-09-08T02:05:09.000Z | nyc_data/manage.py | nyccto-rapicastillo/nyc-ppe | e6d5ba45cf2815f7659298103d3b5bc7210ed8cf | [
"MIT"
] | 1 | 2020-04-22T19:10:24.000Z | 2020-04-22T19:10:24.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nyc_data.settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| 28.727273 | 76 | 0.683544 |
793e89e1fd4d571e4f2b0c95fc34b2eff7c1130f | 978 | py | Python | tabpy/tabpy_server/handlers/service_info_handler.py | phantomcosmonaut/TabPy | 43cce449cdcb5c99202d68f1a6af4d355d3e3734 | [
"MIT"
] | null | null | null | tabpy/tabpy_server/handlers/service_info_handler.py | phantomcosmonaut/TabPy | 43cce449cdcb5c99202d68f1a6af4d355d3e3734 | [
"MIT"
] | null | null | null | tabpy/tabpy_server/handlers/service_info_handler.py | phantomcosmonaut/TabPy | 43cce449cdcb5c99202d68f1a6af4d355d3e3734 | [
"MIT"
] | null | null | null | import json
from tabpy.tabpy_server.app.SettingsParameters import SettingsParameters
from tabpy.tabpy_server.handlers.management_handler import ManagementHandler
class ServiceInfoHandler(ManagementHandler):
def initialize(self, app):
super(ServiceInfoHandler, self).initialize(app)
def get(self):
# do not check for authentication - this method
# is the only way for client to collect info about
# supported API versions and required features
self._add_CORS_header()
info = {}
info["description"] = self.tabpy_state.get_description()
info["creation_time"] = self.tabpy_state.creation_time
info["state_path"] = self.settings[SettingsParameters.StateFilePath]
info["server_version"] = self.settings[SettingsParameters.ServerVersion]
info["name"] = self.tabpy_state.name
info["versions"] = self.settings[SettingsParameters.ApiVersions]
self.finish(json.dumps(info))
| 42.521739 | 80 | 0.721881 |
793e8b1020aa202f8bae68561a80bba9afe4b12a | 63,065 | py | Python | mlflow/tracking/fluent.py | devlibx/mlflowx | 291c51161ec26450b1e79c8e4a32af960da79591 | [
"Apache-2.0"
] | 1 | 2021-12-13T20:52:08.000Z | 2021-12-13T20:52:08.000Z | mlflow/tracking/fluent.py | devlibx/mlflowx | 291c51161ec26450b1e79c8e4a32af960da79591 | [
"Apache-2.0"
] | 9 | 2021-08-04T06:41:49.000Z | 2022-01-10T10:10:52.000Z | mlflow/tracking/fluent.py | devlibx/mlflowx | 291c51161ec26450b1e79c8e4a32af960da79591 | [
"Apache-2.0"
] | 1 | 2021-03-01T10:09:32.000Z | 2021-03-01T10:09:32.000Z | """
Internal module implementing the fluent API, allowing management of an active
MLflow run. This module is exposed to users at the top-level :py:mod:`mlflow` module.
"""
import os
import atexit
import time
import logging
import inspect
from packaging.version import Version
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
from mlflow.entities import Experiment, Run, RunInfo, RunStatus, Param, RunTag, Metric, ViewType
from mlflow.entities.lifecycle_stage import LifecycleStage
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from mlflow.tracking.client import MlflowClient
from mlflow.tracking import artifact_utils, _get_store
from mlflow.tracking.context import registry as context_registry
from mlflow.store.tracking import SEARCH_MAX_RESULTS_DEFAULT
from mlflow.utils import env
from mlflow.utils.autologging_utils import (
is_testing,
autologging_integration,
AUTOLOGGING_INTEGRATIONS,
autologging_is_disabled,
)
from mlflow.utils.databricks_utils import is_in_databricks_notebook, get_notebook_id
from mlflow.utils.import_hooks import register_post_import_hook
from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID, MLFLOW_RUN_NAME
from mlflow.utils.validation import _validate_run_id
if TYPE_CHECKING:
import pandas # pylint: disable=unused-import
import matplotlib # pylint: disable=unused-import
import plotly # pylint: disable=unused-import
import numpy # pylint: disable=unused-import
import PIL # pylint: disable=unused-import
_EXPERIMENT_ID_ENV_VAR = "MLFLOW_EXPERIMENT_ID"
_EXPERIMENT_NAME_ENV_VAR = "MLFLOW_EXPERIMENT_NAME"
_RUN_ID_ENV_VAR = "MLFLOW_RUN_ID"
_active_run_stack = []
_active_experiment_id = None
SEARCH_MAX_RESULTS_PANDAS = 100000
NUM_RUNS_PER_PAGE_PANDAS = 10000
_logger = logging.getLogger(__name__)
def set_experiment(experiment_name: str = None, experiment_id: str = None) -> None:
"""
Set the given experiment as the active experiment. The experiment must either be specified by
name via `experiment_name` or by ID via `experiment_id`. The experiment name and ID cannot
both be specified.
:param experiment_name: Case sensitive name of the experiment to be activated. If an experiment
with this name does not exist, a new experiment wth this name is
created.
:param experiment_id: ID of the experiment to be activated. If an experiment with this ID
does not exist, an exception is thrown.
:return: An instance of :py:class:`mlflow.entities.Experiment` representing the new active
experiment.
.. code-block:: python
:caption: Example
import mlflow
# Set an experiment name, which must be unique and case sensitive.
mlflow.set_experiment("Social NLP Experiments")
# Get Experiment Details
experiment = mlflow.get_experiment_by_name("Social NLP Experiments")
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Experiment_id: 1
Artifact Location: file:///.../mlruns/1
Tags: {}
Lifecycle_stage: active
"""
if (experiment_name is not None and experiment_id is not None) or (
experiment_name is None and experiment_id is None
):
raise MlflowException(
message="Must specify exactly one of: `experiment_id` or `experiment_name`.",
error_code=INVALID_PARAMETER_VALUE,
)
client = MlflowClient()
if experiment_id is None:
experiment = client.get_experiment_by_name(experiment_name)
if not experiment:
_logger.info(
"Experiment with name '%s' does not exist. Creating a new experiment.",
experiment_name,
)
# NB: If two simultaneous threads or processes attempt to set the same experiment
# simultaneously, a race condition may be encountered here wherein experiment creation
# fails
experiment_id = client.create_experiment(experiment_name)
experiment = client.get_experiment(experiment_id)
else:
experiment = client.get_experiment(experiment_id)
if experiment is None:
raise MlflowException(
message=f"Experiment with ID '{experiment_id}' does not exist.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
if experiment.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException(
message=(
"Cannot set a deleted experiment '%s' as the active experiment."
" You can restore the experiment, or permanently delete the "
" experiment to create a new one." % experiment.name
),
error_code=INVALID_PARAMETER_VALUE,
)
global _active_experiment_id
_active_experiment_id = experiment.experiment_id
return experiment
class ActiveRun(Run): # pylint: disable=W0223
"""Wrapper around :py:class:`mlflow.entities.Run` to enable using Python ``with`` syntax."""
def __init__(self, run):
Run.__init__(self, run.info, run.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
status = RunStatus.FINISHED if exc_type is None else RunStatus.FAILED
end_run(RunStatus.to_string(status))
return exc_type is None
def start_run(
run_id: str = None,
experiment_id: Optional[str] = None,
run_name: Optional[str] = None,
nested: bool = False,
tags: Optional[Dict[str, Any]] = None,
) -> ActiveRun:
"""
Start a new MLflow run, setting it as the active run under which metrics and parameters
will be logged. The return value can be used as a context manager within a ``with`` block;
otherwise, you must call ``end_run()`` to terminate the current run.
If you pass a ``run_id`` or the ``MLFLOW_RUN_ID`` environment variable is set,
``start_run`` attempts to resume a run with the specified run ID and
other parameters are ignored. ``run_id`` takes precedence over ``MLFLOW_RUN_ID``.
If resuming an existing run, the run status is set to ``RunStatus.RUNNING``.
MLflow sets a variety of default tags on the run, as defined in
:ref:`MLflow system tags <system_tags>`.
:param run_id: If specified, get the run with the specified UUID and log parameters
and metrics under that run. The run's end time is unset and its status
is set to running, but the run's other attributes (``source_version``,
``source_type``, etc.) are not changed.
:param experiment_id: ID of the experiment under which to create the current run (applicable
only when ``run_id`` is not specified). If ``experiment_id`` argument
is unspecified, will look for valid experiment in the following order:
activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_NAME``
environment variable, ``MLFLOW_EXPERIMENT_ID`` environment variable,
or the default experiment as defined by the tracking server.
:param run_name: Name of new run (stored as a ``mlflow.runName`` tag).
Used only when ``run_id`` is unspecified.
:param nested: Controls whether run is nested in parent run. ``True`` creates a nested run.
:param tags: An optional dictionary of string keys and values to set as tags on the run.
If a run is being resumed, these tags are set on the resumed run. If a new run is
being created, these tags are set on the new run.
:return: :py:class:`mlflow.ActiveRun` object that acts as a context manager wrapping
the run's state.
.. code-block:: python
:caption: Example
import mlflow
# Create nested runs
with mlflow.start_run(run_name='PARENT_RUN') as parent_run:
mlflow.log_param("parent", "yes")
with mlflow.start_run(run_name='CHILD_RUN', nested=True) as child_run:
mlflow.log_param("child", "yes")
print("parent run_id: {}".format(parent_run.info.run_id))
print("child run_id : {}".format(child_run.info.run_id))
print("--")
# Search all child runs with a parent id
query = "tags.mlflow.parentRunId = '{}'".format(parent_run.info.run_id)
results = mlflow.search_runs(filter_string=query)
print(results[["run_id", "params.child", "tags.mlflow.runName"]])
.. code-block:: text
:caption: Output
parent run_id: 5ec0e7ae18f54c2694ffb48c2fccf25c
child run_id : 78b3b0d264b44cd29e8dc389749bb4be
--
run_id params.child tags.mlflow.runName
0 78b3b0d264b44cd29e8dc389749bb4be yes CHILD_RUN
"""
global _active_run_stack
# back compat for int experiment_id
experiment_id = str(experiment_id) if isinstance(experiment_id, int) else experiment_id
if len(_active_run_stack) > 0 and not nested:
raise Exception(
(
"Run with UUID {} is already active. To start a new run, first end the "
+ "current run with mlflow.end_run(). To start a nested "
+ "run, call start_run with nested=True"
).format(_active_run_stack[0].info.run_id)
)
client = MlflowClient()
if run_id:
existing_run_id = run_id
elif _RUN_ID_ENV_VAR in os.environ:
existing_run_id = os.environ[_RUN_ID_ENV_VAR]
del os.environ[_RUN_ID_ENV_VAR]
else:
existing_run_id = None
if existing_run_id:
_validate_run_id(existing_run_id)
active_run_obj = client.get_run(existing_run_id)
# Check to see if experiment_id from environment matches experiment_id from set_experiment()
if (
_active_experiment_id is not None
and _active_experiment_id != active_run_obj.info.experiment_id
):
raise MlflowException(
"Cannot start run with ID {} because active run ID "
"does not match environment run ID. Make sure --experiment-name "
"or --experiment-id matches experiment set with "
"set_experiment(), or just use command-line "
"arguments".format(existing_run_id)
)
# Check to see if current run isn't deleted
if active_run_obj.info.lifecycle_stage == LifecycleStage.DELETED:
raise MlflowException(
"Cannot start run with ID {} because it is in the "
"deleted state.".format(existing_run_id)
)
# Use previous end_time because a value is required for update_run_info
end_time = active_run_obj.info.end_time
_get_store().update_run_info(
existing_run_id, run_status=RunStatus.RUNNING, end_time=end_time
)
if tags:
client.log_batch(
run_id=existing_run_id,
tags=[RunTag(key, str(value)) for key, value in tags.items()],
)
active_run_obj = client.get_run(existing_run_id)
else:
if len(_active_run_stack) > 0:
parent_run_id = _active_run_stack[-1].info.run_id
else:
parent_run_id = None
exp_id_for_run = experiment_id if experiment_id is not None else _get_experiment_id()
user_specified_tags = tags or {}
if parent_run_id is not None:
user_specified_tags[MLFLOW_PARENT_RUN_ID] = parent_run_id
if run_name is not None:
user_specified_tags[MLFLOW_RUN_NAME] = run_name
tags = context_registry.resolve_tags(user_specified_tags)
active_run_obj = client.create_run(experiment_id=exp_id_for_run, tags=tags)
_active_run_stack.append(ActiveRun(active_run_obj))
return _active_run_stack[-1]
def end_run(status: str = RunStatus.to_string(RunStatus.FINISHED)) -> None:
"""End an active MLflow run (if there is one).
.. code-block:: python
:caption: Example
import mlflow
# Start run and get status
mlflow.start_run()
run = mlflow.active_run()
print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
# End run and get status
mlflow.end_run()
run = mlflow.get_run(run.info.run_id)
print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
print("--")
# Check for any active runs
print("Active run: {}".format(mlflow.active_run()))
.. code-block:: text
:caption: Output
run_id: b47ee4563368419880b44ad8535f6371; status: RUNNING
run_id: b47ee4563368419880b44ad8535f6371; status: FINISHED
--
Active run: None
"""
global _active_run_stack
if len(_active_run_stack) > 0:
# Clear out the global existing run environment variable as well.
env.unset_variable(_RUN_ID_ENV_VAR)
run = _active_run_stack.pop()
MlflowClient().set_terminated(run.info.run_id, status)
atexit.register(end_run)
def active_run() -> Optional[ActiveRun]:
"""Get the currently active ``Run``, or None if no such run exists.
**Note**: You cannot access currently-active run attributes
(parameters, metrics, etc.) through the run returned by ``mlflow.active_run``. In order
to access such attributes, use the :py:class:`mlflow.tracking.MlflowClient` as follows:
.. code-block:: python
:caption: Example
import mlflow
mlflow.start_run()
run = mlflow.active_run()
print("Active run_id: {}".format(run.info.run_id))
mlflow.end_run()
.. code-block:: text
:caption: Output
Active run_id: 6f252757005748708cd3aad75d1ff462
"""
return _active_run_stack[-1] if len(_active_run_stack) > 0 else None
def get_run(run_id: str) -> Run:
"""
Fetch the run from backend store. The resulting :py:class:`Run <mlflow.entities.Run>`
contains a collection of run metadata -- :py:class:`RunInfo <mlflow.entities.RunInfo>`,
as well as a collection of run parameters, tags, and metrics --
:py:class:`RunData <mlflow.entities.RunData>`. In the case where multiple metrics with the
same key are logged for the run, the :py:class:`RunData <mlflow.entities.RunData>` contains
the most recently logged value at the largest step for each metric.
:param run_id: Unique identifier for the run.
:return: A single :py:class:`mlflow.entities.Run` object, if the run exists. Otherwise,
raises an exception.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run() as run:
mlflow.log_param("p", 0)
run_id = run.info.run_id
print("run_id: {}; lifecycle_stage: {}".format(run_id,
mlflow.get_run(run_id).info.lifecycle_stage))
.. code-block:: text
:caption: Output
run_id: 7472befefc754e388e8e922824a0cca5; lifecycle_stage: active
"""
return MlflowClient().get_run(run_id)
def log_param(key: str, value: Any) -> None:
"""
Log a parameter under the current run. If no run is active, this method will create
a new active run.
:param key: Parameter name (string). This string may only contain alphanumerics,
underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Parameter value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.log_param("learning_rate", 0.01)
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_param(run_id, key, value)
def set_tag(key: str, value: Any) -> None:
"""
Set a tag under the current run. If no run is active, this method will create a
new active run.
:param key: Tag name (string). This string may only contain alphanumerics, underscores
(_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Tag value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.set_tag("release.version", "2.2.0")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().set_tag(run_id, key, value)
def delete_tag(key: str) -> None:
"""
Delete a tag from a run. This is irreversible. If no run is active, this method
will create a new active run.
:param key: Name of the tag
.. code-block:: python
:caption: Example
import mlflow
tags = {"engineering": "ML Platform",
"engineering_remote": "ML Platform"}
with mlflow.start_run() as run:
mlflow.set_tags(tags)
with mlflow.start_run(run_id=run.info.run_id):
mlflow.delete_tag("engineering_remote")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().delete_tag(run_id, key)
def log_metric(key: str, value: float, step: Optional[int] = None) -> None:
"""
Log a metric under the current run. If no run is active, this method will create
a new active run.
:param key: Metric name (string). This string may only contain alphanumerics, underscores (_),
dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Metric value (float). Note that some special values such as +/- Infinity may be
replaced by other values depending on the store. For example, the
SQLAlchemy store replaces +/- Infinity with max / min float values.
All backend stores will support values up to length 5000, but some
may support larger values.
:param step: Metric step (int). Defaults to zero if unspecified.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.log_metric("mse", 2500.00)
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_metric(run_id, key, value, int(time.time() * 1000), step or 0)
def log_metrics(metrics: Dict[str, float], step: Optional[int] = None) -> None:
"""
Log multiple metrics for the current run. If no run is active, this method will create a new
active run.
:param metrics: Dictionary of metric_name: String -> value: Float. Note that some special
values such as +/- Infinity may be replaced by other values depending on
the store. For example, sql based store may replace +/- Infinity with
max / min float values.
:param step: A single integer step at which to log the specified
Metrics. If unspecified, each metric is logged at step zero.
:returns: None
.. code-block:: python
:caption: Example
import mlflow
metrics = {"mse": 2500.00, "rmse": 50.00}
# Log a batch of metrics
with mlflow.start_run():
mlflow.log_metrics(metrics)
"""
run_id = _get_or_start_run().info.run_id
timestamp = int(time.time() * 1000)
metrics_arr = [Metric(key, value, timestamp, step or 0) for key, value in metrics.items()]
MlflowClient().log_batch(run_id=run_id, metrics=metrics_arr, params=[], tags=[])
def log_params(params: Dict[str, Any]) -> None:
"""
Log a batch of params for the current run. If no run is active, this method will create a
new active run.
:param params: Dictionary of param_name: String -> value: (String, but will be string-ified if
not)
:returns: None
.. code-block:: python
:caption: Example
import mlflow
params = {"learning_rate": 0.01, "n_estimators": 10}
# Log a batch of parameters
with mlflow.start_run():
mlflow.log_params(params)
"""
run_id = _get_or_start_run().info.run_id
params_arr = [Param(key, str(value)) for key, value in params.items()]
MlflowClient().log_batch(run_id=run_id, metrics=[], params=params_arr, tags=[])
def set_tags(tags: Dict[str, Any]) -> None:
"""
Log a batch of tags for the current run. If no run is active, this method will create a
new active run.
:param tags: Dictionary of tag_name: String -> value: (String, but will be string-ified if
not)
:returns: None
.. code-block:: python
:caption: Example
import mlflow
tags = {"engineering": "ML Platform",
"release.candidate": "RC1",
"release.version": "2.2.0"}
# Set a batch of tags
with mlflow.start_run():
mlflow.set_tags(tags)
"""
run_id = _get_or_start_run().info.run_id
tags_arr = [RunTag(key, str(value)) for key, value in tags.items()]
MlflowClient().log_batch(run_id=run_id, metrics=[], params=[], tags=tags_arr)
def log_artifact(local_path: str, artifact_path: Optional[str] = None) -> None:
"""
Log a local file or directory as an artifact of the currently active run. If no run is
active, this method will create a new active run.
:param local_path: Path to the file to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
.. code-block:: python
:caption: Example
import mlflow
# Create a features.txt artifact file
features = "rooms, zipcode, median_price, school_rating, transport"
with open("features.txt", 'w') as f:
f.write(features)
# With artifact_path=None write features.txt under
# root artifact_uri/artifacts directory
with mlflow.start_run():
mlflow.log_artifact("features.txt")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_artifact(run_id, local_path, artifact_path)
def log_artifacts(local_dir: str, artifact_path: Optional[str] = None) -> None:
"""
Log all the contents of a local directory as artifacts of the run. If no run is active,
this method will create a new active run.
:param local_dir: Path to the directory of files to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
.. code-block:: python
:caption: Example
import os
import mlflow
# Create some files to preserve as artifacts
features = "rooms, zipcode, median_price, school_rating, transport"
data = {"state": "TX", "Available": 25, "Type": "Detached"}
# Create couple of artifact files under the directory "data"
os.makedirs("data", exist_ok=True)
with open("data/data.json", 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
with open("data/features.txt", 'w') as f:
f.write(features)
# Write all files in "data" to root artifact_uri/states
with mlflow.start_run():
mlflow.log_artifacts("data", artifact_path="states")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_artifacts(run_id, local_dir, artifact_path)
def log_text(text: str, artifact_file: str) -> None:
"""
Log text as an artifact.
:param text: String containing text to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the text is saved (e.g. "dir/file.txt").
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
# Log text to a file under the run's root artifact directory
mlflow.log_text("text1", "file1.txt")
# Log text in a subdirectory of the run's root artifact directory
mlflow.log_text("text2", "dir/file2.txt")
# Log HTML text
mlflow.log_text("<h1>header</h1>", "index.html")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_text(run_id, text, artifact_file)
def log_dict(dictionary: Any, artifact_file: str) -> None:
"""
Log a JSON/YAML-serializable object (e.g. `dict`) as an artifact. The serialization
format (JSON or YAML) is automatically inferred from the extension of `artifact_file`.
If the file extension doesn't exist or match any of [".json", ".yml", ".yaml"],
JSON format is used.
:param dictionary: Dictionary to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the dictionary is saved (e.g. "dir/data.json").
.. code-block:: python
:caption: Example
import mlflow
dictionary = {"k": "v"}
with mlflow.start_run():
# Log a dictionary as a JSON file under the run's root artifact directory
mlflow.log_dict(dictionary, "data.json")
# Log a dictionary as a YAML file in a subdirectory of the run's root artifact directory
mlflow.log_dict(dictionary, "dir/data.yml")
# If the file extension doesn't exist or match any of [".json", ".yaml", ".yml"],
# JSON format is used.
mlflow.log_dict(dictionary, "data")
mlflow.log_dict(dictionary, "data.txt")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_dict(run_id, dictionary, artifact_file)
def log_figure(
figure: Union["matplotlib.figure.Figure", "plotly.graph_objects.Figure"], artifact_file: str
) -> None:
"""
Log a figure as an artifact. The following figure objects are supported:
- `matplotlib.figure.Figure`_
- `plotly.graph_objects.Figure`_
.. _matplotlib.figure.Figure:
https://matplotlib.org/api/_as_gen/matplotlib.figure.Figure.html
.. _plotly.graph_objects.Figure:
https://plotly.com/python-api-reference/generated/plotly.graph_objects.Figure.html
:param figure: Figure to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the figure is saved (e.g. "dir/file.png").
.. code-block:: python
:caption: Matplotlib Example
import mlflow
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot([0, 1], [2, 3])
with mlflow.start_run():
mlflow.log_figure(fig, "figure.png")
.. code-block:: python
:caption: Plotly Example
import mlflow
from plotly import graph_objects as go
fig = go.Figure(go.Scatter(x=[0, 1], y=[2, 3]))
with mlflow.start_run():
mlflow.log_figure(fig, "figure.html")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_figure(run_id, figure, artifact_file)
def log_image(image: Union["numpy.ndarray", "PIL.Image.Image"], artifact_file: str) -> None:
"""
Log an image as an artifact. The following image objects are supported:
- `numpy.ndarray`_
- `PIL.Image.Image`_
.. _numpy.ndarray:
https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html
.. _PIL.Image.Image:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image
Numpy array support
- data type (( ) represents a valid value range):
- bool
- integer (0 ~ 255)
- unsigned integer (0 ~ 255)
- float (0.0 ~ 1.0)
.. warning::
- Out-of-range integer values will be **clipped** to [0, 255].
- Out-of-range float values will be **clipped** to [0, 1].
- shape (H: height, W: width):
- H x W (Grayscale)
- H x W x 1 (Grayscale)
- H x W x 3 (an RGB channel order is assumed)
- H x W x 4 (an RGBA channel order is assumed)
:param image: Image to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the image is saved (e.g. "dir/image.png").
.. code-block:: python
:caption: Numpy Example
import mlflow
import numpy as np
image = np.random.randint(0, 256, size=(100, 100, 3), dtype=np.uint8)
with mlflow.start_run():
mlflow.log_image(image, "image.png")
.. code-block:: python
:caption: Pillow Example
import mlflow
from PIL import Image
image = Image.new("RGB", (100, 100))
with mlflow.start_run():
mlflow.log_image(image, "image.png")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_image(run_id, image, artifact_file)
def _record_logged_model(mlflow_model):
run_id = _get_or_start_run().info.run_id
MlflowClient()._record_logged_model(run_id, mlflow_model)
def get_experiment(experiment_id: str) -> Experiment:
"""
Retrieve an experiment by experiment_id from the backend store
:param experiment_id: The string-ified experiment ID returned from ``create_experiment``.
:return: :py:class:`mlflow.entities.Experiment`
.. code-block:: python
:caption: Example
import mlflow
experiment = mlflow.get_experiment("0")
print("Name: {}".format(experiment.name))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: Default
Artifact Location: file:///.../mlruns/0
Tags: {}
Lifecycle_stage: active
"""
return MlflowClient().get_experiment(experiment_id)
def get_experiment_by_name(name: str) -> Optional[Experiment]:
"""
Retrieve an experiment by experiment name from the backend store
:param name: The case senstive experiment name.
:return: An instance of :py:class:`mlflow.entities.Experiment`
if an experiment with the specified name exists, otherwise None.
.. code-block:: python
:caption: Example
import mlflow
# Case sensitive name
experiment = mlflow.get_experiment_by_name("Default")
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Experiment_id: 0
Artifact Location: file:///.../mlruns/0
Tags: {}
Lifecycle_stage: active
"""
return MlflowClient().get_experiment_by_name(name)
def list_experiments(
view_type: int = ViewType.ACTIVE_ONLY,
max_results: Optional[int] = None,
) -> List[Experiment]:
"""
:param view_type: Qualify requested type of experiments.
:param max_results: If passed, specifies the maximum number of experiments desired. If not
passed, all experiments will be returned.
:return: A list of :py:class:`Experiment <mlflow.entities.Experiment>` objects.
"""
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().list_experiments(
view_type=view_type,
max_results=number_to_get,
page_token=next_page_token,
)
return _paginate(pagination_wrapper_func, SEARCH_MAX_RESULTS_DEFAULT, max_results)
def create_experiment(
name: str,
artifact_location: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
) -> str:
"""
Create an experiment.
:param name: The experiment name, which must be unique and is case sensitive
:param artifact_location: The location to store run artifacts.
If not provided, the server picks an appropriate default.
:param tags: An optional dictionary of string keys and values to set as
tags on the experiment.
:return: String ID of the created experiment.
.. code-block:: python
:caption: Example
import mlflow
# Create an experiment name, which must be unique and case sensitive
experiment_id = mlflow.create_experiment("Social NLP Experiments")
experiment = mlflow.get_experiment(experiment_id)
print("Name: {}".format(experiment.name))
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: Social NLP Experiments
Experiment_id: 1
Artifact Location: file:///.../mlruns/1
Tags= {}
Lifecycle_stage: active
"""
return MlflowClient().create_experiment(name, artifact_location, tags)
def delete_experiment(experiment_id: str) -> None:
"""
Delete an experiment from the backend store.
:param experiment_id: The The string-ified experiment ID returned from ``create_experiment``.
.. code-block:: python
:caption: Example
import mlflow
experiment_id = mlflow.create_experiment("New Experiment")
mlflow.delete_experiment(experiment_id)
# Examine the deleted experiment details.
experiment = mlflow.get_experiment(experiment_id)
print("Name: {}".format(experiment.name))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: New Experiment
Artifact Location: file:///.../mlruns/2
Lifecycle_stage: deleted
"""
MlflowClient().delete_experiment(experiment_id)
def delete_run(run_id: str) -> None:
"""
Deletes a run with the given ID.
:param run_id: Unique identifier for the run to delete.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run() as run:
mlflow.log_param("p", 0)
run_id = run.info.run_id
mlflow.delete_run(run_id)
print("run_id: {}; lifecycle_stage: {}".format(run_id,
mlflow.get_run(run_id).info.lifecycle_stage))
.. code-block:: text
:caption: Output
run_id: 45f4af3e6fd349e58579b27fcb0b8277; lifecycle_stage: deleted
"""
MlflowClient().delete_run(run_id)
def get_artifact_uri(artifact_path: Optional[str] = None) -> str:
"""
Get the absolute URI of the specified artifact in the currently active run.
If `path` is not specified, the artifact root URI of the currently active
run will be returned; calls to ``log_artifact`` and ``log_artifacts`` write
artifact(s) to subdirectories of the artifact root URI.
If no run is active, this method will create a new active run.
:param artifact_path: The run-relative artifact path for which to obtain an absolute URI.
For example, "path/to/artifact". If unspecified, the artifact root URI
for the currently active run will be returned.
:return: An *absolute* URI referring to the specified artifact or the currently adtive run's
artifact root. For example, if an artifact path is provided and the currently active
run uses an S3-backed store, this may be a uri of the form
``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path
is not provided and the currently active run uses an S3-backed store, this may be a
URI of the form ``s3://<bucket_name>/path/to/artifact/root``.
.. code-block:: python
:caption: Example
import mlflow
features = "rooms, zipcode, median_price, school_rating, transport"
with open("features.txt", 'w') as f:
f.write(features)
# Log the artifact in a directory "features" under the root artifact_uri/features
with mlflow.start_run():
mlflow.log_artifact("features.txt", artifact_path="features")
# Fetch the artifact uri root directory
artifact_uri = mlflow.get_artifact_uri()
print("Artifact uri: {}".format(artifact_uri))
# Fetch a specific artifact uri
artifact_uri = mlflow.get_artifact_uri(artifact_path="features/features.txt")
print("Artifact uri: {}".format(artifact_uri))
.. code-block:: text
:caption: Output
Artifact uri: file:///.../0/a46a80f1c9644bd8f4e5dd5553fffce/artifacts
Artifact uri: file:///.../0/a46a80f1c9644bd8f4e5dd5553fffce/artifacts/features/features.txt
"""
return artifact_utils.get_artifact_uri(
run_id=_get_or_start_run().info.run_id, artifact_path=artifact_path
)
def search_runs(
experiment_ids: Optional[List[str]] = None,
filter_string: str = "",
run_view_type: int = ViewType.ACTIVE_ONLY,
max_results: int = SEARCH_MAX_RESULTS_PANDAS,
order_by: Optional[List[str]] = None,
output_format: str = "pandas",
) -> Union[List[Run], "pandas.DataFrame"]:
"""
Get a pandas DataFrame of runs that fit the search criteria.
:param experiment_ids: List of experiment IDs. None will default to the active experiment.
:param filter_string: Filter query string, defaults to searching all runs.
:param run_view_type: one of enum values ``ACTIVE_ONLY``, ``DELETED_ONLY``, or ``ALL`` runs
defined in :py:class:`mlflow.entities.ViewType`.
:param max_results: The maximum number of runs to put in the dataframe. Default is 100,000
to avoid causing out-of-memory issues on the user's machine.
:param order_by: List of columns to order by (e.g., "metrics.rmse"). The ``order_by`` column
can contain an optional ``DESC`` or ``ASC`` value. The default is ``ASC``.
The default ordering is to sort by ``start_time DESC``, then ``run_id``.
:param output_format: The output format to be returned. If ``pandas``, a ``pandas.DataFrame``
is returned and, if ``list``, a list of :py:class:`mlflow.entities.Run`
is returned.
:return: If output_format is ``list``: a list of :py:class:`mlflow.entities.Run`. If
output_format is ``pandas``: ``pandas.DataFrame`` of runs, where each metric,
parameter, and tag is expanded into its own column named metrics.*, params.*, or
tags.* respectively. For runs that don't have a particular metric, parameter, or tag,
the value for the corresponding column is (NumPy) ``Nan``, ``None``, or ``None``
respectively.
.. code-block:: python
:caption: Example
import mlflow
# Create an experiment and log two runs under it
experiment_id = mlflow.create_experiment("Social NLP Experiments")
with mlflow.start_run(experiment_id=experiment_id):
mlflow.log_metric("m", 1.55)
mlflow.set_tag("s.release", "1.1.0-RC")
with mlflow.start_run(experiment_id=experiment_id):
mlflow.log_metric("m", 2.50)
mlflow.set_tag("s.release", "1.2.0-GA")
# Search all runs in experiment_id
df = mlflow.search_runs([experiment_id], order_by=["metrics.m DESC"])
print(df[["metrics.m", "tags.s.release", "run_id"]])
print("--")
# Search the experiment_id using a filter_string with tag
# that has a case insensitive pattern
filter_string = "tags.s.release ILIKE '%rc%'"
df = mlflow.search_runs([experiment_id], filter_string=filter_string)
print(df[["metrics.m", "tags.s.release", "run_id"]])
.. code-block:: text
:caption: Output
metrics.m tags.s.release run_id
0 2.50 1.2.0-GA 147eed886ab44633902cc8e19b2267e2
1 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4
--
metrics.m tags.s.release run_id
0 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4
"""
if not experiment_ids:
experiment_ids = _get_experiment_id()
# Using an internal function as the linter doesn't like assigning a lambda, and inlining the
# full thing is a mess
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().search_runs(
experiment_ids,
filter_string,
run_view_type,
number_to_get,
order_by,
next_page_token,
)
runs = _paginate(pagination_wrapper_func, NUM_RUNS_PER_PAGE_PANDAS, max_results)
if output_format == "list":
return runs # List[mlflow.entities.run.Run]
elif output_format == "pandas":
import numpy as np
import pandas as pd
info = {
"run_id": [],
"experiment_id": [],
"status": [],
"artifact_uri": [],
"start_time": [],
"end_time": [],
}
params, metrics, tags = ({}, {}, {})
PARAM_NULL, METRIC_NULL, TAG_NULL = (None, np.nan, None)
for i, run in enumerate(runs):
info["run_id"].append(run.info.run_id)
info["experiment_id"].append(run.info.experiment_id)
info["status"].append(run.info.status)
info["artifact_uri"].append(run.info.artifact_uri)
info["start_time"].append(pd.to_datetime(run.info.start_time, unit="ms", utc=True))
info["end_time"].append(pd.to_datetime(run.info.end_time, unit="ms", utc=True))
# Params
param_keys = set(params.keys())
for key in param_keys:
if key in run.data.params:
params[key].append(run.data.params[key])
else:
params[key].append(PARAM_NULL)
new_params = set(run.data.params.keys()) - param_keys
for p in new_params:
params[p] = [PARAM_NULL] * i # Fill in null values for all previous runs
params[p].append(run.data.params[p])
# Metrics
metric_keys = set(metrics.keys())
for key in metric_keys:
if key in run.data.metrics:
metrics[key].append(run.data.metrics[key])
else:
metrics[key].append(METRIC_NULL)
new_metrics = set(run.data.metrics.keys()) - metric_keys
for m in new_metrics:
metrics[m] = [METRIC_NULL] * i
metrics[m].append(run.data.metrics[m])
# Tags
tag_keys = set(tags.keys())
for key in tag_keys:
if key in run.data.tags:
tags[key].append(run.data.tags[key])
else:
tags[key].append(TAG_NULL)
new_tags = set(run.data.tags.keys()) - tag_keys
for t in new_tags:
tags[t] = [TAG_NULL] * i
tags[t].append(run.data.tags[t])
data = {}
data.update(info)
for key in metrics:
data["metrics." + key] = metrics[key]
for key in params:
data["params." + key] = params[key]
for key in tags:
data["tags." + key] = tags[key]
return pd.DataFrame(data)
else:
raise ValueError(
"Unsupported output format: %s. Supported string values are 'pandas' or 'list'"
% output_format
)
def list_run_infos(
experiment_id: str,
run_view_type: int = ViewType.ACTIVE_ONLY,
max_results: int = SEARCH_MAX_RESULTS_DEFAULT,
order_by: Optional[List[str]] = None,
) -> List[RunInfo]:
"""
Return run information for runs which belong to the experiment_id.
:param experiment_id: The experiment id which to search
:param run_view_type: ACTIVE_ONLY, DELETED_ONLY, or ALL runs
:param max_results: Maximum number of results desired.
:param order_by: List of order_by clauses. Currently supported values are
are ``metric.key``, ``parameter.key``, ``tag.key``, ``attribute.key``.
For example, ``order_by=["tag.release ASC", "metric.click_rate DESC"]``.
:return: A list of :py:class:`RunInfo <mlflow.entities.RunInfo>` objects that satisfy the
search expressions.
.. code-block:: python
:caption: Example
import mlflow
from mlflow.entities import ViewType
# Create two runs
with mlflow.start_run() as run1:
mlflow.log_param("p", 0)
with mlflow.start_run() as run2:
mlflow.log_param("p", 1)
# Delete the last run
mlflow.delete_run(run2.info.run_id)
def print_run_infos(run_infos):
for r in run_infos:
print("- run_id: {}, lifecycle_stage: {}".format(r.run_id, r.lifecycle_stage))
print("Active runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ACTIVE_ONLY))
print("Deleted runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.DELETED_ONLY))
print("All runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ALL))
.. code-block:: text
:caption: Output
Active runs:
- run_id: 4937823b730640d5bed9e3e5057a2b34, lifecycle_stage: active
Deleted runs:
- run_id: b13f1badbed842cf9975c023d23da300, lifecycle_stage: deleted
All runs:
- run_id: b13f1badbed842cf9975c023d23da300, lifecycle_stage: deleted
- run_id: 4937823b730640d5bed9e3e5057a2b34, lifecycle_stage: active
"""
# Using an internal function as the linter doesn't like assigning a lambda, and inlining the
# full thing is a mess
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().list_run_infos(
experiment_id, run_view_type, number_to_get, order_by, next_page_token
)
return _paginate(pagination_wrapper_func, SEARCH_MAX_RESULTS_DEFAULT, max_results)
def _paginate(paginated_fn, max_results_per_page, max_results=None):
"""
Intended to be a general use pagination utility.
:param paginated_fn:
:type paginated_fn: This function is expected to take in the number of results to retrieve
per page and a pagination token, and return a PagedList object
:param max_results_per_page:
:type max_results_per_page: The maximum number of results to retrieve per page
:param max_results:
:type max_results: The maximum number of results to retrieve overall. If unspecified,
all results will be retrieved.
:return: Returns a list of entities, as determined by the paginated_fn parameter, with no more
entities than specified by max_results
:rtype: list[object]
"""
all_results = []
next_page_token = None
returns_all = max_results is None
while returns_all or len(all_results) < max_results:
num_to_get = max_results_per_page if returns_all else max_results - len(all_results)
if num_to_get < max_results_per_page:
page_results = paginated_fn(num_to_get, next_page_token)
else:
page_results = paginated_fn(max_results_per_page, next_page_token)
all_results.extend(page_results)
if hasattr(page_results, "token") and page_results.token:
next_page_token = page_results.token
else:
break
return all_results
def _get_or_start_run():
if len(_active_run_stack) > 0:
return _active_run_stack[-1]
return start_run()
def _get_experiment_id_from_env():
experiment_name = env.get_env(_EXPERIMENT_NAME_ENV_VAR)
if experiment_name is not None:
exp = MlflowClient().get_experiment_by_name(experiment_name)
return exp.experiment_id if exp else None
return env.get_env(_EXPERIMENT_ID_ENV_VAR)
def _get_experiment_id():
# TODO: Replace with None for 1.0, leaving for 0.9.1 release backcompat with existing servers
deprecated_default_exp_id = "0"
return (
_active_experiment_id
or _get_experiment_id_from_env()
or (is_in_databricks_notebook() and get_notebook_id())
) or deprecated_default_exp_id
@autologging_integration("mlflow")
def autolog(
log_input_examples: bool = False,
log_model_signatures: bool = True,
log_models: bool = True,
disable: bool = False,
exclusive: bool = False,
disable_for_unsupported_versions: bool = False,
silent: bool = False,
# pylint: disable=unused-argument
) -> None:
"""
Enables (or disables) and configures autologging for all supported integrations.
The parameters are passed to any autologging integrations that support them.
See the :ref:`tracking docs <automatic-logging>` for a list of supported autologging
integrations.
Note that framework-specific configurations set at any point will take precedence over
any configurations set by this function. For example:
.. code-block:: python
mlflow.autolog(log_models=False, exclusive=True)
import sklearn
would enable autologging for `sklearn` with `log_models=False` and `exclusive=True`,
but
.. code-block:: python
mlflow.autolog(log_models=False, exclusive=True)
import sklearn
mlflow.sklearn.autolog(log_models=True)
would enable autologging for `sklearn` with `log_models=True` and `exclusive=False`,
the latter resulting from the default value for `exclusive` in `mlflow.sklearn.autolog`;
other framework autolog functions (e.g. `mlflow.tensorflow.autolog`) would use the
configurations set by `mlflow.autolog` (in this instance, `log_models=False`, `exclusive=True`),
until they are explicitly called by the user.
:param log_input_examples: If ``True``, input examples from training datasets are collected and
logged along with model artifacts during training. If ``False``,
input examples are not logged.
Note: Input examples are MLflow model attributes
and are only collected if ``log_models`` is also ``True``.
:param log_model_signatures: If ``True``,
:py:class:`ModelSignatures <mlflow.models.ModelSignature>`
describing model inputs and outputs are collected and logged along
with model artifacts during training. If ``False``, signatures are
not logged. Note: Model signatures are MLflow model attributes
and are only collected if ``log_models`` is also ``True``.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
Input examples and model signatures, which are attributes of MLflow models,
are also omitted when ``log_models`` is ``False``.
:param disable: If ``True``, disables all supported autologging integrations. If ``False``,
enables all supported autologging integrations.
:param exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
:param disable_for_unsupported_versions: If ``True``, disable autologging for versions of
all integration libraries that have not been tested against this version
of the MLflow client or are incompatible.
:param silent: If ``True``, suppress all event logs and warnings from MLflow during autologging
setup and training execution. If ``False``, show all events and warnings during
autologging setup and training execution.
.. code-block:: python
:caption: Example
import numpy as np
import mlflow.sklearn
from mlflow.tracking import MlflowClient
from sklearn.linear_model import LinearRegression
def print_auto_logged_info(r):
tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")}
artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, "model")]
print("run_id: {}".format(r.info.run_id))
print("artifacts: {}".format(artifacts))
print("params: {}".format(r.data.params))
print("metrics: {}".format(r.data.metrics))
print("tags: {}".format(tags))
# prepare training data
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
# Auto log all the parameters, metrics, and artifacts
mlflow.autolog()
model = LinearRegression()
with mlflow.start_run() as run:
model.fit(X, y)
# fetch the auto logged parameters and metrics for ended run
print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
.. code-block:: text
:caption: Output
run_id: fd10a17d028c47399a55ab8741721ef7
artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/model.pkl']
params: {'copy_X': 'True',
'normalize': 'False',
'fit_intercept': 'True',
'n_jobs': 'None'}
metrics: {'training_score': 1.0,
'training_rmse': 4.440892098500626e-16,
'training_r2_score': 1.0,
'training_mae': 2.220446049250313e-16,
'training_mse': 1.9721522630525295e-31}
tags: {'estimator_class': 'sklearn.linear_model._base.LinearRegression',
'estimator_name': 'LinearRegression'}
"""
from mlflow import (
tensorflow,
keras,
gluon,
xgboost,
lightgbm,
pyspark,
statsmodels,
spark,
sklearn,
fastai,
pytorch,
)
locals_copy = locals().items()
# Mapping of library module name to specific autolog function
# eg: mxnet.gluon is the actual library, mlflow.gluon.autolog is our autolog function for it
LIBRARY_TO_AUTOLOG_FN = {
"tensorflow": tensorflow.autolog,
"keras": keras.autolog,
"mxnet.gluon": gluon.autolog,
"xgboost": xgboost.autolog,
"lightgbm": lightgbm.autolog,
"statsmodels": statsmodels.autolog,
"sklearn": sklearn.autolog,
"fastai": fastai.autolog,
"pyspark": spark.autolog,
"pyspark.ml": pyspark.ml.autolog,
# TODO: Broaden this beyond pytorch_lightning as we add autologging support for more
# Pytorch frameworks under mlflow.pytorch.autolog
"pytorch_lightning": pytorch.autolog,
}
CONF_KEY_IS_GLOBALLY_CONFIGURED = "globally_configured"
def get_autologging_params(autolog_fn):
try:
needed_params = list(inspect.signature(autolog_fn).parameters.keys())
return {k: v for k, v in locals_copy if k in needed_params}
except Exception:
return {}
def setup_autologging(module):
try:
autolog_fn = LIBRARY_TO_AUTOLOG_FN[module.__name__]
# Only call integration's autolog function with `mlflow.autolog` configs
# if the integration's autolog function has not already been called by the user.
# Logic is as follows:
# - if a previous_config exists, that means either `mlflow.autolog` or
# `mlflow.integration.autolog` was called.
# - if the config contains `CONF_KEY_IS_GLOBALLY_CONFIGURED`, the configuration
# was set by `mlflow.autolog`, and so we can safely call `autolog_fn` with
# `autologging_params`.
# - if the config doesn't contain this key, the configuration was set by an
# `mlflow.integration.autolog` call, so we should not call `autolog_fn` with
# new configs.
prev_config = AUTOLOGGING_INTEGRATIONS.get(autolog_fn.integration_name)
if prev_config and not prev_config.get(CONF_KEY_IS_GLOBALLY_CONFIGURED, False):
return
autologging_params = get_autologging_params(autolog_fn)
autolog_fn(**autologging_params)
AUTOLOGGING_INTEGRATIONS[autolog_fn.integration_name][
CONF_KEY_IS_GLOBALLY_CONFIGURED
] = True
if not autologging_is_disabled(
autolog_fn.integration_name
) and not autologging_params.get("silent", False):
_logger.info("Autologging successfully enabled for %s.", module.__name__)
except Exception as e:
if is_testing():
# Raise unexpected exceptions in test mode in order to detect
# errors within dependent autologging integrations
raise
elif not autologging_params.get("silent", False):
_logger.warning(
"Exception raised while enabling autologging for %s: %s",
module.__name__,
str(e),
)
# for each autolog library (except pyspark), register a post-import hook.
# this way, we do not send any errors to the user until we know they are using the library.
# the post-import hook also retroactively activates for previously-imported libraries.
for module in list(
set(LIBRARY_TO_AUTOLOG_FN.keys()) - set(["tensorflow", "keras", "pyspark", "pyspark.ml"])
):
register_post_import_hook(setup_autologging, module, overwrite=True)
FULLY_IMPORTED_KERAS = False
TF_AUTOLOG_SETUP_CALLED = False
def conditionally_set_up_keras_autologging(keras_module):
nonlocal FULLY_IMPORTED_KERAS, TF_AUTOLOG_SETUP_CALLED
FULLY_IMPORTED_KERAS = True
if Version(keras_module.__version__) >= Version("2.6.0"):
# NB: Keras unconditionally depends on TensorFlow beginning with Version 2.6.0, and
# many classes defined in the `keras` module are aliases of classes in the `tf.keras`
# module. Accordingly, TensorFlow autologging serves as a replacement for Keras
# autologging in Keras >= 2.6.0
try:
import tensorflow
setup_autologging(tensorflow)
TF_AUTOLOG_SETUP_CALLED = True
except Exception as e:
_logger.debug(
"Failed to set up TensorFlow autologging for tf.keras models upon"
" Keras library import: %s",
str(e),
)
raise
else:
setup_autologging(keras_module)
register_post_import_hook(conditionally_set_up_keras_autologging, "keras", overwrite=True)
def set_up_tensorflow_autologging(tensorflow_module):
import sys
nonlocal FULLY_IMPORTED_KERAS, TF_AUTOLOG_SETUP_CALLED
if "keras" in sys.modules and not FULLY_IMPORTED_KERAS:
# In Keras >= 2.6.0, importing Keras imports the TensorFlow library, which can
# trigger this autologging import hook for TensorFlow before the entire Keras import
# procedure is completed. Attempting to set up autologging before the Keras import
# procedure has completed will result in a failure due to the unavailability of
# certain modules. In this case, we terminate the TensorFlow autologging import hook
# and rely on the Keras autologging import hook to successfully set up TensorFlow
# autologging for tf.keras models once the Keras import procedure has completed
return
# By design, in Keras >= 2.6.0, Keras needs to enable tensorflow autologging so that
# tf.keras models always use tensorflow autologging, rather than vanilla keras autologging.
# As a result, Keras autologging must call `mlflow.tensorflow.autolog()` in Keras >= 2.6.0.
# Accordingly, we insert this check to ensure that importing tensorflow, which may import
# keras, does not enable tensorflow autologging twice.
if not TF_AUTOLOG_SETUP_CALLED:
setup_autologging(tensorflow_module)
register_post_import_hook(set_up_tensorflow_autologging, "tensorflow", overwrite=True)
# for pyspark, we activate autologging immediately, without waiting for a module import.
# this is because on Databricks a SparkSession already exists and the user can directly
# interact with it, and this activity should be logged.
try:
import pyspark as pyspark_module
import pyspark.ml as pyspark_ml_module
setup_autologging(pyspark_module)
setup_autologging(pyspark_ml_module)
except ImportError as ie:
# if pyspark isn't installed, a user could potentially install it in the middle
# of their session so we want to enable autologging once they do
if "pyspark" in str(ie):
register_post_import_hook(setup_autologging, "pyspark", overwrite=True)
register_post_import_hook(setup_autologging, "pyspark.ml", overwrite=True)
except Exception as e:
if is_testing():
# Raise unexpected exceptions in test mode in order to detect
# errors within dependent autologging integrations
raise
else:
_logger.warning("Exception raised while enabling autologging for spark: %s", str(e))
| 39.049536 | 100 | 0.639864 |
793e8b7da49659a2fbffd8ae180d597ce3814940 | 20,365 | py | Python | pandas/tests/series/test_rank.py | sofiane87/pandas | 0de99558b497c5611cbe5d35d504763bd7692275 | [
"BSD-3-Clause"
] | 2 | 2019-11-13T18:20:29.000Z | 2020-04-18T02:58:39.000Z | pandas/tests/series/methods/test_rank.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/series/methods/test_rank.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | 2 | 2019-12-21T21:17:43.000Z | 2019-12-26T10:34:36.000Z | from itertools import chain, product
import numpy as np
import pytest
from pandas._libs.algos import Infinity, NegInfinity
from pandas._libs.tslib import iNaT
import pandas.util._test_decorators as td
from pandas import NaT, Series, Timestamp, date_range
from pandas.api.types import CategoricalDtype
import pandas.util.testing as tm
class TestSeriesRank:
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
results = {
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
def test_rank(self, datetime_series):
pytest.importorskip("scipy.stats.special")
rankdata = pytest.importorskip("scipy.stats.rankdata")
datetime_series[::2] = np.nan
datetime_series[:10][::3] = 4.0
ranks = datetime_series.rank()
oranks = datetime_series.astype("O").rank()
tm.assert_series_equal(ranks, oranks)
mask = np.isnan(datetime_series)
filled = datetime_series.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled), index=filled.index, name="ts")
exp[mask] = np.nan
tm.assert_series_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
tm.assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
rng = date_range("1/1/1990", periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.iloc[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
# GH 5968
iseries = Series(["3 day", "1 day 10m", "-2 day", NaT], dtype="m8[ns]")
exp = Series([3, 2, 1, np.nan])
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
values = np.array(
[-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40],
dtype="float64",
)
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype="float64")
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
def test_rank_categorical(self):
# GH issue #15420 rank incorrectly orders ordered categories
# Test ascending/descending ranking for ordered categoricals
exp = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
exp_desc = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0])
ordered = Series(
["first", "second", "third", "fourth", "fifth", "sixth"]
).astype(
CategoricalDtype(
categories=["first", "second", "third", "fourth", "fifth", "sixth"],
ordered=True,
)
)
tm.assert_series_equal(ordered.rank(), exp)
tm.assert_series_equal(ordered.rank(ascending=False), exp_desc)
# Unordered categoricals should be ranked as objects
unordered = Series(
["first", "second", "third", "fourth", "fifth", "sixth"]
).astype(
CategoricalDtype(
categories=["first", "second", "third", "fourth", "fifth", "sixth"],
ordered=False,
)
)
exp_unordered = Series([2.0, 4.0, 6.0, 3.0, 1.0, 5.0])
res = unordered.rank()
tm.assert_series_equal(res, exp_unordered)
unordered1 = Series([1, 2, 3, 4, 5, 6]).astype(
CategoricalDtype([1, 2, 3, 4, 5, 6], False)
)
exp_unordered1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
res1 = unordered1.rank()
tm.assert_series_equal(res1, exp_unordered1)
# Test na_option for rank data
na_ser = Series(
["first", "second", "third", "fourth", "fifth", "sixth", np.NaN]
).astype(
CategoricalDtype(
["first", "second", "third", "fourth", "fifth", "sixth", "seventh"],
True,
)
)
exp_top = Series([2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.0])
exp_bot = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
exp_keep = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, np.NaN])
tm.assert_series_equal(na_ser.rank(na_option="top"), exp_top)
tm.assert_series_equal(na_ser.rank(na_option="bottom"), exp_bot)
tm.assert_series_equal(na_ser.rank(na_option="keep"), exp_keep)
# Test na_option for rank data with ascending False
exp_top = Series([7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0])
exp_bot = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 7.0])
exp_keep = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, np.NaN])
tm.assert_series_equal(na_ser.rank(na_option="top", ascending=False), exp_top)
tm.assert_series_equal(
na_ser.rank(na_option="bottom", ascending=False), exp_bot
)
tm.assert_series_equal(na_ser.rank(na_option="keep", ascending=False), exp_keep)
# Test invalid values for na_option
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
na_ser.rank(na_option="bad", ascending=False)
# invalid type
with pytest.raises(ValueError, match=msg):
na_ser.rank(na_option=True, ascending=False)
# Test with pct=True
na_ser = Series(["first", "second", "third", "fourth", np.NaN]).astype(
CategoricalDtype(["first", "second", "third", "fourth"], True)
)
exp_top = Series([0.4, 0.6, 0.8, 1.0, 0.2])
exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.0])
exp_keep = Series([0.25, 0.5, 0.75, 1.0, np.NaN])
tm.assert_series_equal(na_ser.rank(na_option="top", pct=True), exp_top)
tm.assert_series_equal(na_ser.rank(na_option="bottom", pct=True), exp_bot)
tm.assert_series_equal(na_ser.rank(na_option="keep", pct=True), exp_keep)
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method="average")
msg = (
"No axis named average for object type"
" <class 'pandas.core.series.Series'>"
)
with pytest.raises(ValueError, match=msg):
s.rank("average")
@pytest.mark.parametrize(
"contents,dtype",
[
(
[
-np.inf,
-50,
-1,
-1e-20,
-1e-25,
-1e-50,
0,
1e-40,
1e-20,
1e-10,
2,
40,
np.inf,
],
"float64",
),
(
[
-np.inf,
-50,
-1,
-1e-20,
-1e-25,
-1e-45,
0,
1e-40,
1e-20,
1e-10,
2,
40,
np.inf,
],
"float32",
),
([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"),
pytest.param(
[
np.iinfo(np.int64).min,
-100,
0,
1,
9999,
100000,
1e10,
np.iinfo(np.int64).max,
],
"int64",
marks=pytest.mark.xfail(
reason="iNaT is equivalent to minimum value of dtype"
"int64 pending issue GH#16674"
),
),
([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"),
],
)
def test_rank_inf(self, contents, dtype):
dtype_na_map = {
"float64": np.nan,
"float32": np.nan,
"int64": iNaT,
"object": None,
}
# Insert nans at random positions if underlying dtype has missing
# value. Then adjust the expected order by adding nans accordingly
# This is for testing whether rank calculation is affected
# when values are interwined with nan values.
values = np.array(contents, dtype=dtype)
exp_order = np.array(range(len(values)), dtype="float64") + 1.0
if dtype in dtype_na_map:
na_value = dtype_na_map[dtype]
nan_indices = np.random.choice(range(len(values)), 5)
values = np.insert(values, nan_indices, na_value)
exp_order = np.insert(exp_order, nan_indices, np.nan)
# shuffle the testing array and expected results in the same way
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(exp_order[random_order], dtype="float64")
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
def test_rank_tie_methods(self):
s = self.s
def _check(s, expected, method="average"):
result = s.rank(method=method)
tm.assert_series_equal(result, Series(expected))
dtypes = [None, object]
disabled = {(object, "first")}
results = self.results
for method, dtype in product(results, dtypes):
if (dtype, method) in disabled:
continue
series = s if dtype is None else s.astype(dtype)
_check(series, results[method], method=method)
@td.skip_if_no_scipy
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"])
@pytest.mark.parametrize("na_option", ["top", "bottom", "keep"])
def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending):
dtypes = [
("object", None, Infinity(), NegInfinity()),
("float64", np.nan, np.inf, -np.inf),
]
chunk = 3
disabled = {("object", "first")}
def _check(s, method, na_option, ascending):
exp_ranks = {
"average": ([2, 2, 2], [5, 5, 5], [8, 8, 8]),
"min": ([1, 1, 1], [4, 4, 4], [7, 7, 7]),
"max": ([3, 3, 3], [6, 6, 6], [9, 9, 9]),
"first": ([1, 2, 3], [4, 5, 6], [7, 8, 9]),
"dense": ([1, 1, 1], [2, 2, 2], [3, 3, 3]),
}
ranks = exp_ranks[method]
if na_option == "top":
order = [ranks[1], ranks[0], ranks[2]]
elif na_option == "bottom":
order = [ranks[0], ranks[2], ranks[1]]
else:
order = [ranks[0], [np.nan] * chunk, ranks[1]]
expected = order if ascending else order[::-1]
expected = list(chain.from_iterable(expected))
result = s.rank(method=method, na_option=na_option, ascending=ascending)
tm.assert_series_equal(result, Series(expected, dtype="float64"))
for dtype, na_value, pos_inf, neg_inf in dtypes:
in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk
iseries = Series(in_arr, dtype=dtype)
if (dtype, method) in disabled:
continue
_check(iseries, method, na_option, ascending)
def test_rank_desc_mix_nans_infs(self):
# GH 19538
# check descending ranking when mix nans and infs
iseries = Series([1, np.nan, np.inf, -np.inf, 25])
result = iseries.rank(ascending=False)
exp = Series([3, np.nan, 1, 4, 2], dtype="float64")
tm.assert_series_equal(result, exp)
def test_rank_methods_series(self):
pytest.importorskip("scipy.stats.special")
rankdata = pytest.importorskip("scipy.stats.rankdata")
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
np.random.shuffle(xs)
index = [chr(ord("a") + i) for i in range(len(xs))]
for vals in [xs, xs + 1e6, xs * 1e-6]:
ts = Series(vals, index=index)
for m in ["average", "min", "max", "first", "dense"]:
result = ts.rank(method=m)
sprank = rankdata(vals, m if m != "first" else "ordinal")
expected = Series(sprank, index=index).astype("float64")
tm.assert_series_equal(result, expected)
def test_rank_dense_method(self):
dtypes = ["O", "f8", "i8"]
in_out = [
([1], [1]),
([2], [1]),
([0], [1]),
([2, 2], [1, 1]),
([1, 2, 3], [1, 2, 3]),
([4, 2, 1], [3, 2, 1]),
([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),
([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5]),
]
for ser, exp in in_out:
for dtype in dtypes:
s = Series(ser).astype(dtype)
result = s.rank(method="dense")
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
def test_rank_descending(self):
dtypes = ["O", "f8", "i8"]
for dtype, method in product(dtypes, self.results):
if "i" in dtype:
s = self.s.dropna()
else:
s = self.s.astype(dtype)
res = s.rank(ascending=False)
expected = (s.max() - s).rank()
tm.assert_series_equal(res, expected)
if method == "first" and dtype == "O":
continue
expected = (s.max() - s).rank(method=method)
res2 = s.rank(method=method, ascending=False)
tm.assert_series_equal(res2, expected)
def test_rank_int(self):
s = self.s.dropna().astype("i8")
for method, res in self.results.items():
result = s.rank(method=method)
expected = Series(res).dropna()
expected.index = result.index
tm.assert_series_equal(result, expected)
def test_rank_object_bug(self):
# GH 13445
# smoke tests
Series([np.nan] * 32).astype(object).rank(ascending=True)
Series([np.nan] * 32).astype(object).rank(ascending=False)
def test_rank_modify_inplace(self):
# GH 18521
# Check rank does not mutate series
s = Series([Timestamp("2017-01-05 10:20:27.569000"), NaT])
expected = s.copy()
s.rank()
result = s
tm.assert_series_equal(result, expected)
# GH15630, pct should be on 100% basis when method='dense'
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0, 1.0]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 2, 2.0 / 2, 2.0 / 2]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.0 / 3, 1.0 / 3, 3.0 / 3, 3.0 / 3, 2.0 / 3]),
([1, 1, 3, 3, 5, 5], [1.0 / 3, 1.0 / 3, 2.0 / 3, 2.0 / 3, 3.0 / 3, 3.0 / 3]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_dense_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="dense", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0 / 2, 1.0 / 2]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 2.0 / 3, 2.0 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.0 / 5, 1.0 / 5, 4.0 / 5, 4.0 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [1.0 / 6, 1.0 / 6, 3.0 / 6, 3.0 / 6, 5.0 / 6, 5.0 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_min_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="min", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0, 1.0]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 3.0 / 3, 3.0 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [2.0 / 5, 2.0 / 5, 5.0 / 5, 5.0 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [2.0 / 6, 2.0 / 6, 4.0 / 6, 4.0 / 6, 6.0 / 6, 6.0 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_max_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="max", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.5 / 2, 1.5 / 2]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 2.5 / 3, 2.5 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.5 / 5, 1.5 / 5, 4.5 / 5, 4.5 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [1.5 / 6, 1.5 / 6, 3.5 / 6, 3.5 / 6, 5.5 / 6, 5.5 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_average_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="average", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0 / 2, 2.0 / 2.0]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.0 / 5, 2.0 / 5, 4.0 / 5, 5.0 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [1.0 / 6, 2.0 / 6, 3.0 / 6, 4.0 / 6, 5.0 / 6, 6.0 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_first_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="first", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.single
@pytest.mark.high_memory
def test_pct_max_many_rows():
# GH 18271
s = Series(np.arange(2 ** 24 + 1))
result = s.rank(pct=True).max()
assert result == 1
| 35.917108 | 88 | 0.497569 |
End of preview. Expand
in Dataset Viewer.
YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/datasets-cards)
5b gpt2 tokens
- Downloads last month
- 19