Unnamed: 0
int64 0
2.93k
| code
stringlengths 101
62.2k
| docs
stringlengths 51
10.7k
| doc_len
int64 4
1.74k
| words
int64 4
4.82k
| lang
stringclasses 1
value | prompt
stringlengths 320
71.2k
|
---|---|---|---|---|---|---|
2,900 | def get_sympy_dir():
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return os.path.normcase(sympy_dir)
|
Returns the root SymPy directory and set the global value
indicating whether the system is case sensitive or not.
| 19 | 15 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_sympy_dir():
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return os.path.normcase(sympy_dir)
```
###Assistant :
Returns the root SymPy directory and set the global value
indicating whether the system is case sensitive or not.
|
2,901 | def CheckCaffeRandom(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
for function in c_random_function_list:
ix = line.find(function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'caffe/random_fn', 2,
'Use caffe_rng_rand() (or other caffe_rng_* function) instead of '
+ function +
') to ensure results are deterministic for a fixed Caffe seed.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
| Checks for calls to C random functions (rand, rand_r, random, ...).
Caffe code should (almost) always use the caffe_rng_* functions rather
than these, as the internal state of these C functions is independent of the
native Caffe RNG system which should produce deterministic results for a
fixed Caffe seed set using Caffe::set_random_seed(...).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
| 84 | 99 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def CheckCaffeRandom(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
for function in c_random_function_list:
ix = line.find(function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'caffe/random_fn', 2,
'Use caffe_rng_rand() (or other caffe_rng_* function) instead of '
+ function +
') to ensure results are deterministic for a fixed Caffe seed.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
```
###Assistant : Checks for calls to C random functions (rand, rand_r, random, ...).
Caffe code should (almost) always use the caffe_rng_* functions rather
than these, as the internal state of these C functions is independent of the
native Caffe RNG system which should produce deterministic results for a
fixed Caffe seed set using Caffe::set_random_seed(...).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
2,902 | def test_mapped_dag(self, dag_id, executor_name, session):
# This test needs a real executor to run, so that the `make_list` task can write out the TaskMap
from airflow.executors.executor_loader import ExecutorLoader
self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py'))
dag = self.dagbag.get_dag(dag_id)
when = datetime.datetime(2022, 1, 1)
job = BackfillJob(
dag=dag,
start_date=when,
end_date=when,
donot_pickle=True,
executor=ExecutorLoader.load_executor(executor_name),
)
job.run()
dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0]
assert dr
assert dr.state == DagRunState.SUCCESS
# Check that every task has a start and end date
for ti in dr.task_instances:
assert ti.state == TaskInstanceState.SUCCESS
assert ti.start_date is not None
assert ti.end_date is not None
|
End-to-end test of a simple mapped dag.
We test with multiple executors as they have different "execution environments" -- for instance
DebugExecutor runs a lot more in the same process than other Executors.
| 33 | 89 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_mapped_dag(self, dag_id, executor_name, session):
# This test needs a real executor to run, so that the `make_list` task can write out the TaskMap
from airflow.executors.executor_loader import ExecutorLoader
self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py'))
dag = self.dagbag.get_dag(dag_id)
when = datetime.datetime(2022, 1, 1)
job = BackfillJob(
dag=dag,
start_date=when,
end_date=when,
donot_pickle=True,
executor=ExecutorLoader.load_executor(executor_name),
)
job.run()
dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0]
assert dr
assert dr.state == DagRunState.SUCCESS
# Check that every task has a start and end date
for ti in dr.task_instances:
assert ti.state == TaskInstanceState.SUCCESS
assert ti.start_date is not None
assert ti.end_date is not None
```
###Assistant :
End-to-end test of a simple mapped dag.
We test with multiple executors as they have different "execution environments" -- for instance
DebugExecutor runs a lot more in the same process than other Executors.
|
2,903 | async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog):
cert_path, key_path = await hass.async_add_executor_job(
_setup_broken_ssl_pem_files, tmpdir
)
hass.config.safe_mode = True
assert (
await async_setup_component(
hass,
"http",
{
"http": {"ssl_certificate": cert_path, "ssl_key": key_path},
},
)
is True
)
await hass.async_start()
await hass.async_block_till_done()
assert (
"Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate was not usable"
in caplog.text
)
assert hass.http.site is not None
| Test http can startup with an emergency self signed cert when the current one is broken. | 16 | 69 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog):
cert_path, key_path = await hass.async_add_executor_job(
_setup_broken_ssl_pem_files, tmpdir
)
hass.config.safe_mode = True
assert (
await async_setup_component(
hass,
"http",
{
"http": {"ssl_certificate": cert_path, "ssl_key": key_path},
},
)
is True
)
await hass.async_start()
await hass.async_block_till_done()
assert (
"Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate was not usable"
in caplog.text
)
assert hass.http.site is not None
```
###Assistant : Test http can startup with an emergency self signed cert when the current one is broken.
|
2,904 | def get_safe_request_meta(self, request):
if not hasattr(request, "META"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
|
Return a dictionary of request.META with sensitive values redacted.
| 9 | 18 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_safe_request_meta(self, request):
if not hasattr(request, "META"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
```
###Assistant :
Return a dictionary of request.META with sensitive values redacted.
|
2,905 | def cuda(self, *args, **kwargs) -> nn.Module:
return self.data_preprocessor.cuda(*args, **kwargs)
| Overrides this method to set the :attr:`device`
Returns:
nn.Module: The model itself.
| 12 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def cuda(self, *args, **kwargs) -> nn.Module:
return self.data_preprocessor.cuda(*args, **kwargs)
```
###Assistant : Overrides this method to set the :attr:`device`
Returns:
nn.Module: The model itself.
|
2,906 | def get_cache_attr_name(cls):
return "_{}.{}".format(cls._meta.app_label, cls._meta.model_name).lower()
|
Returns the name of the attribute that should be used to store
a reference to the fetched/created object on a request.
| 21 | 5 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_cache_attr_name(cls):
return "_{}.{}".format(cls._meta.app_label, cls._meta.model_name).lower()
```
###Assistant :
Returns the name of the attribute that should be used to store
a reference to the fetched/created object on a request.
|
2,907 | def get_instance(cls, info, **data):
object_id = data.get("id")
object_sku = data.get("sku")
attributes = data.get("attributes")
if attributes:
# Prefetches needed by AttributeAssignmentMixin and
# associate_attribute_values_to_instance
qs = cls.Meta.model.objects.prefetch_related(
"product__product_type__variant_attributes__values",
"product__product_type__attributevariant",
)
else:
# Use the default queryset.
qs = models.ProductVariant.objects.all()
if object_id:
return cls.get_node_or_error(
info, object_id, only_type="ProductVariant", qs=qs
)
elif object_sku:
instance = qs.filter(sku=object_sku).first()
if not instance:
raise ValidationError(
{
"sku": ValidationError(
"Couldn't resolve to a node: %s" % object_sku,
code="not_found",
)
}
)
return instance
else:
return cls._meta.model()
| Prefetch related fields that are needed to process the mutation.
If we are updating an instance and want to update its attributes,
# prefetch them.
| 25 | 77 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_instance(cls, info, **data):
object_id = data.get("id")
object_sku = data.get("sku")
attributes = data.get("attributes")
if attributes:
# Prefetches needed by AttributeAssignmentMixin and
# associate_attribute_values_to_instance
qs = cls.Meta.model.objects.prefetch_related(
"product__product_type__variant_attributes__values",
"product__product_type__attributevariant",
)
else:
# Use the default queryset.
qs = models.ProductVariant.objects.all()
if object_id:
return cls.get_node_or_error(
info, object_id, only_type="ProductVariant", qs=qs
)
elif object_sku:
instance = qs.filter(sku=object_sku).first()
if not instance:
raise ValidationError(
{
"sku": ValidationError(
"Couldn't resolve to a node: %s" % object_sku,
code="not_found",
)
}
)
return instance
else:
return cls._meta.model()
```
###Assistant : Prefetch related fields that are needed to process the mutation.
If we are updating an instance and want to update its attributes,
# prefetch them.
|
2,908 | def _can_use_libjoin(self) -> bool:
if type(self) is Index:
# excludes EAs
return isinstance(self.dtype, np.dtype)
return not is_interval_dtype(self.dtype)
# --------------------------------------------------------------------
# Uncategorized Methods
|
Whether we can use the fastpaths implement in _libs.join
| 9 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _can_use_libjoin(self) -> bool:
if type(self) is Index:
# excludes EAs
return isinstance(self.dtype, np.dtype)
return not is_interval_dtype(self.dtype)
# --------------------------------------------------------------------
# Uncategorized Methods
```
###Assistant :
Whether we can use the fastpaths implement in _libs.join
|
2,909 | def sub_syllables(self, from_i, to_j):
if not isinstance(from_i, int) or not isinstance(to_j, int):
raise ValueError("both arguments should be integers")
group = self.group
if to_j <= from_i:
return group.identity
else:
r = tuple(self.array_form[from_i: to_j])
return group.dtype(r)
|
`sub_syllables` returns the subword of the associative word `self` that
consists of syllables from positions `from_to` to `to_j`, where
`from_to` and `to_j` must be positive integers and indexing is done
with origin 0.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a, b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.sub_syllables(1, 2)
b
>>> w.sub_syllables(3, 3)
<identity>
| 59 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def sub_syllables(self, from_i, to_j):
if not isinstance(from_i, int) or not isinstance(to_j, int):
raise ValueError("both arguments should be integers")
group = self.group
if to_j <= from_i:
return group.identity
else:
r = tuple(self.array_form[from_i: to_j])
return group.dtype(r)
```
###Assistant :
`sub_syllables` returns the subword of the associative word `self` that
consists of syllables from positions `from_to` to `to_j`, where
`from_to` and `to_j` must be positive integers and indexing is done
with origin 0.
Examples
========
>>> from sympy.combinatorics import free_group
>>> f, a, b = free_group("a, b")
>>> w = a**5*b*a**2*b**-4*a
>>> w.sub_syllables(1, 2)
b
>>> w.sub_syllables(3, 3)
<identity>
|
2,910 | def seterr(all=None, divide=None, over=None, under=None, invalid=None):
pyvals = umath.geterrobj()
old = geterr()
if divide is None:
divide = all or old['divide']
if over is None:
over = all or old['over']
if under is None:
under = all or old['under']
if invalid is None:
invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
@set_module('numpy') |
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] https://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.seterr(**old_settings) # reset to default
{'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in scalar multiply
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
>>> np.int16(32000) * np.int16(3)
30464
| 336 | 72 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
pyvals = umath.geterrobj()
old = geterr()
if divide is None:
divide = all or old['divide']
if over is None:
over = all or old['over']
if under is None:
under = all or old['under']
if invalid is None:
invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
@set_module('numpy')
```
###Assistant :
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] https://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.seterr(**old_settings) # reset to default
{'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in scalar multiply
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
>>> np.int16(32000) * np.int16(3)
30464
|
2,911 | def _create_drawables(self, tokensource):
lineno = charno = maxcharno = 0
maxlinelength = linelength = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(linelength, lineno),
temp,
font = self._get_style_font(style),
text_fg = self._get_text_color(style),
text_bg = self._get_text_bg_color(style),
)
temp_width, temp_hight = self.fonts.get_text_size(temp)
linelength += temp_width
maxlinelength = max(maxlinelength, linelength)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
linelength = 0
charno = 0
lineno += 1
self.maxlinelength = maxlinelength
self.maxcharno = maxcharno
self.maxlineno = lineno
|
Create drawables for the token content.
| 6 | 144 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _create_drawables(self, tokensource):
lineno = charno = maxcharno = 0
maxlinelength = linelength = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(linelength, lineno),
temp,
font = self._get_style_font(style),
text_fg = self._get_text_color(style),
text_bg = self._get_text_bg_color(style),
)
temp_width, temp_hight = self.fonts.get_text_size(temp)
linelength += temp_width
maxlinelength = max(maxlinelength, linelength)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
linelength = 0
charno = 0
lineno += 1
self.maxlinelength = maxlinelength
self.maxcharno = maxcharno
self.maxlineno = lineno
```
###Assistant :
Create drawables for the token content.
|
2,912 | def __monotonic_time_coarse() -> float:
return time.clock_gettime(CLOCK_MONOTONIC_COARSE)
monotonic_time_coarse = time.monotonic
with suppress(Exception):
if (
platform.system() == "Linux"
and abs(time.monotonic() - __monotonic_time_coarse()) < 1
):
monotonic_time_coarse = __monotonic_time_coarse
| Return a monotonic time in seconds.
This is the coarse version of time_monotonic, which is faster but less accurate.
Since many arm64 and 32-bit platforms don't support VDSO with time.monotonic
because of errata, we can't rely on the kernel to provide a fast
monotonic time.
https://lore.kernel.org/lkml/20170404171826.25030-1-marc.zyngier@arm.com/
| 46 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __monotonic_time_coarse() -> float:
return time.clock_gettime(CLOCK_MONOTONIC_COARSE)
monotonic_time_coarse = time.monotonic
with suppress(Exception):
if (
platform.system() == "Linux"
and abs(time.monotonic() - __monotonic_time_coarse()) < 1
):
monotonic_time_coarse = __monotonic_time_coarse
```
###Assistant : Return a monotonic time in seconds.
This is the coarse version of time_monotonic, which is faster but less accurate.
Since many arm64 and 32-bit platforms don't support VDSO with time.monotonic
because of errata, we can't rely on the kernel to provide a fast
monotonic time.
https://lore.kernel.org/lkml/20170404171826.25030-1-marc.zyngier@arm.com/
|
2,913 | def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
image_inputs = []
for i in range(feature_extract_tester.batch_size):
image_inputs.append(
np.random.randint(
255,
size=(
feature_extract_tester.num_channels,
feature_extract_tester.max_resolution,
feature_extract_tester.max_resolution,
),
dtype=np.uint8,
)
)
else:
image_inputs = []
# To avoid getting image width/height 0
min_resolution = feature_extract_tester.min_resolution
if getattr(feature_extract_tester, "size_divisor", None):
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(feature_extract_tester.size_divisor, min_resolution)
for i in range(feature_extract_tester.batch_size):
width, height = np.random.choice(np.arange(min_resolution, feature_extract_tester.max_resolution), 2)
image_inputs.append(
np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8)
)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
if torchify:
image_inputs = [torch.from_numpy(x) for x in image_inputs]
return image_inputs
| This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
| 28 | 129 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
image_inputs = []
for i in range(feature_extract_tester.batch_size):
image_inputs.append(
np.random.randint(
255,
size=(
feature_extract_tester.num_channels,
feature_extract_tester.max_resolution,
feature_extract_tester.max_resolution,
),
dtype=np.uint8,
)
)
else:
image_inputs = []
# To avoid getting image width/height 0
min_resolution = feature_extract_tester.min_resolution
if getattr(feature_extract_tester, "size_divisor", None):
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(feature_extract_tester.size_divisor, min_resolution)
for i in range(feature_extract_tester.batch_size):
width, height = np.random.choice(np.arange(min_resolution, feature_extract_tester.max_resolution), 2)
image_inputs.append(
np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8)
)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
if torchify:
image_inputs = [torch.from_numpy(x) for x in image_inputs]
return image_inputs
```
###Assistant : This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
|
2,914 | def _update_step_xla(self, gradient, variable, key):
return self._update_step(gradient, variable)
| A wrapper of `update_step` to enable XLA acceleration.
Due to `tf.function` tracing mechanism, for (gradient, variable) pairs of
the same shape and dtype, the execution graph always invoke the first
pair it has seen. Thus, we need a `key` argument to make each
(gradient, variable) pair unique. In additions, XLA cannot understand
string input, so the key is an integer.
Args:
gradient: backpropagated gradient of the given variable.
variable: variable whose value needs to be updated.
key (int): a unique key that identifies the variable.
Returns:
An `Operation` that applies the specified gradients.
| 93 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _update_step_xla(self, gradient, variable, key):
return self._update_step(gradient, variable)
```
###Assistant : A wrapper of `update_step` to enable XLA acceleration.
Due to `tf.function` tracing mechanism, for (gradient, variable) pairs of
the same shape and dtype, the execution graph always invoke the first
pair it has seen. Thus, we need a `key` argument to make each
(gradient, variable) pair unique. In additions, XLA cannot understand
string input, so the key is an integer.
Args:
gradient: backpropagated gradient of the given variable.
variable: variable whose value needs to be updated.
key (int): a unique key that identifies the variable.
Returns:
An `Operation` that applies the specified gradients.
|
2,915 | async def log_in(self, request):
fingerprint = request["fingerprint"]
if self.service.logged_in_fingerprint == fingerprint:
return {"fingerprint": fingerprint}
await self._stop_wallet()
started = await self.service._start(fingerprint)
if started is True:
return {"fingerprint": fingerprint}
return {"success": False, "error": "Unknown Error"}
|
Logs in the wallet with a specific key.
| 8 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def log_in(self, request):
fingerprint = request["fingerprint"]
if self.service.logged_in_fingerprint == fingerprint:
return {"fingerprint": fingerprint}
await self._stop_wallet()
started = await self.service._start(fingerprint)
if started is True:
return {"fingerprint": fingerprint}
return {"success": False, "error": "Unknown Error"}
```
###Assistant :
Logs in the wallet with a specific key.
|
2,916 | def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None:
commit_info = "* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n"
expected_message = f
self.check_webhook(
"push_multiple_committers_with_others", TOPIC_BRANCH_EVENTS, expected_message
)
| Tomasz [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by Tomasz (4), James (3), Brendon (2) and others (1).\n\n{commit_info*9}* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed)) | 21 | 18 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None:
commit_info = "* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n"
expected_message = f
self.check_webhook(
"push_multiple_committers_with_others", TOPIC_BRANCH_EVENTS, expected_message
)
```
###Assistant : Tomasz [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by Tomasz (4), James (3), Brendon (2) and others (1).\n\n{commit_info*9}* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))
|
2,917 | def drawControl(self, element, opt, p, widget=None):
if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape,
QStyle.ControlElement.CE_TabBarTabLabel]:
# Let the real style draw it.
self._style.drawControl(element, opt, p, widget)
return
layouts = self._tab_layout(opt)
if layouts is None:
log.misc.warning("Could not get layouts for tab!")
return
if element == QStyle.ControlElement.CE_TabBarTab:
# We override this so we can control TabBarTabShape/TabBarTabLabel.
self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)
self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget)
elif element == QStyle.ControlElement.CE_TabBarTabShape:
p.fillRect(opt.rect, opt.palette.window())
self._draw_indicator(layouts, opt, p)
# We use super() rather than self._style here because we don't want
# any sophisticated drawing.
super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)
elif element == QStyle.ControlElement.CE_TabBarTabLabel:
if not opt.icon.isNull() and layouts.icon.isValid():
self._draw_icon(layouts, opt, p)
alignment = (config.cache['tabs.title.alignment'] |
Qt.AlignmentFlag.AlignVCenter | Qt.TextFlag.TextHideMnemonic)
self._style.drawItemText(p,
layouts.text,
int(alignment),
opt.palette,
bool(opt.state & QStyle.StateFlag.State_Enabled),
opt.text,
QPalette.ColorRole.WindowText)
else:
raise ValueError("Invalid element {!r}".format(element))
| Override drawControl to draw odd tabs in a different color.
Draws the given element with the provided painter with the style
options specified by option.
Args:
element: ControlElement
opt: QStyleOption
p: QPainter
widget: QWidget
| 34 | 122 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def drawControl(self, element, opt, p, widget=None):
if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape,
QStyle.ControlElement.CE_TabBarTabLabel]:
# Let the real style draw it.
self._style.drawControl(element, opt, p, widget)
return
layouts = self._tab_layout(opt)
if layouts is None:
log.misc.warning("Could not get layouts for tab!")
return
if element == QStyle.ControlElement.CE_TabBarTab:
# We override this so we can control TabBarTabShape/TabBarTabLabel.
self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)
self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget)
elif element == QStyle.ControlElement.CE_TabBarTabShape:
p.fillRect(opt.rect, opt.palette.window())
self._draw_indicator(layouts, opt, p)
# We use super() rather than self._style here because we don't want
# any sophisticated drawing.
super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)
elif element == QStyle.ControlElement.CE_TabBarTabLabel:
if not opt.icon.isNull() and layouts.icon.isValid():
self._draw_icon(layouts, opt, p)
alignment = (config.cache['tabs.title.alignment'] |
Qt.AlignmentFlag.AlignVCenter | Qt.TextFlag.TextHideMnemonic)
self._style.drawItemText(p,
layouts.text,
int(alignment),
opt.palette,
bool(opt.state & QStyle.StateFlag.State_Enabled),
opt.text,
QPalette.ColorRole.WindowText)
else:
raise ValueError("Invalid element {!r}".format(element))
```
###Assistant : Override drawControl to draw odd tabs in a different color.
Draws the given element with the provided painter with the style
options specified by option.
Args:
element: ControlElement
opt: QStyleOption
p: QPainter
widget: QWidget
|
2,918 | def _get_mask(length, max_length):
length = length.unsqueeze(-1)
B = paddle.shape(length)[0]
grid = paddle.arange(0, max_length).unsqueeze(0).tile([B, 1])
zero_mask = paddle.zeros([B, max_length], dtype='float32')
inf_mask = paddle.full([B, max_length], '-inf', dtype='float32')
diag_mask = paddle.diag(
paddle.full(
[max_length], '-inf', dtype=paddle.float32),
offset=0,
name=None)
mask = paddle.where(grid >= length, inf_mask, zero_mask)
mask = mask.unsqueeze(1) + diag_mask
return mask.unsqueeze(1)
| Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
| 20 | 48 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_mask(length, max_length):
length = length.unsqueeze(-1)
B = paddle.shape(length)[0]
grid = paddle.arange(0, max_length).unsqueeze(0).tile([B, 1])
zero_mask = paddle.zeros([B, max_length], dtype='float32')
inf_mask = paddle.full([B, max_length], '-inf', dtype='float32')
diag_mask = paddle.diag(
paddle.full(
[max_length], '-inf', dtype=paddle.float32),
offset=0,
name=None)
mask = paddle.where(grid >= length, inf_mask, zero_mask)
mask = mask.unsqueeze(1) + diag_mask
return mask.unsqueeze(1)
```
###Assistant : Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
|
2,919 | def test_config_options_removed_on_reparse(self):
global_config_path = "/mock/home/folder/.streamlit/config.toml"
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path == global_config_path
global_config =
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
self.assertEqual("dark", config.get_option("theme.base"))
self.assertEqual("sans serif", config.get_option("theme.font"))
global_config =
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(force_reparse=True)
self.assertEqual("dark", config.get_option("theme.base"))
self.assertEqual(None, config.get_option("theme.font"))
| Test that config options that are removed in a file are also removed
from our _config_options dict.
[theme]
base = "dark"
font = "sans serif"
[theme]
base = "dark"
| 29 | 52 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_config_options_removed_on_reparse(self):
global_config_path = "/mock/home/folder/.streamlit/config.toml"
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path == global_config_path
global_config =
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
self.assertEqual("dark", config.get_option("theme.base"))
self.assertEqual("sans serif", config.get_option("theme.font"))
global_config =
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(force_reparse=True)
self.assertEqual("dark", config.get_option("theme.base"))
self.assertEqual(None, config.get_option("theme.font"))
```
###Assistant : Test that config options that are removed in a file are also removed
from our _config_options dict.
[theme]
base = "dark"
font = "sans serif"
[theme]
base = "dark"
|
2,920 | def __getitem__(self, key):
getitem = self._data.__getitem__
if is_integer(key) or is_float(key):
# GH#44051 exclude bool, which would return a 2d ndarray
key = com.cast_scalar_indexer(key, warn_float=True)
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization com.is_bool_indexer and ndim checks.
result = getitem(key)
# Going through simple_new for performance.
return type(self)._simple_new(result, name=self._name)
if com.is_bool_indexer(key):
# if we have list[bools, length=1e5] then doing this check+convert
# takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__
# time below from 3.8 ms to 496 µs
# if we already have ndarray[bool], the overhead is 1.4 µs or .25%
key = np.asarray(key, dtype=bool)
result = getitem(key)
# Because we ruled out integer above, we always get an arraylike here
if result.ndim > 1:
deprecate_ndim_indexing(result)
if hasattr(result, "_ndarray"):
# error: Item "ndarray[Any, Any]" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr]
# i.e. NDArrayBackedExtensionArray
# Unpack to ndarray for MPL compat
return result._ndarray # type: ignore[union-attr]
return result
# NB: Using _constructor._simple_new would break if MultiIndex
# didn't override __getitem__
return self._constructor._simple_new(result, name=self._name)
|
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
| 38 | 178 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __getitem__(self, key):
getitem = self._data.__getitem__
if is_integer(key) or is_float(key):
# GH#44051 exclude bool, which would return a 2d ndarray
key = com.cast_scalar_indexer(key, warn_float=True)
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization com.is_bool_indexer and ndim checks.
result = getitem(key)
# Going through simple_new for performance.
return type(self)._simple_new(result, name=self._name)
if com.is_bool_indexer(key):
# if we have list[bools, length=1e5] then doing this check+convert
# takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__
# time below from 3.8 ms to 496 µs
# if we already have ndarray[bool], the overhead is 1.4 µs or .25%
key = np.asarray(key, dtype=bool)
result = getitem(key)
# Because we ruled out integer above, we always get an arraylike here
if result.ndim > 1:
deprecate_ndim_indexing(result)
if hasattr(result, "_ndarray"):
# error: Item "ndarray[Any, Any]" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr]
# i.e. NDArrayBackedExtensionArray
# Unpack to ndarray for MPL compat
return result._ndarray # type: ignore[union-attr]
return result
# NB: Using _constructor._simple_new would break if MultiIndex
# didn't override __getitem__
return self._constructor._simple_new(result, name=self._name)
```
###Assistant :
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
|
2,921 | def get_data(conditions, filters):
data = frappe.db.sql(
.format(
conditions=conditions
),
filters,
as_dict=1,
)
return data
|
SELECT
so.transaction_date as date,
soi.delivery_date as delivery_date,
so.name as sales_order,
so.status, so.customer, soi.item_code,
DATEDIFF(CURDATE(), soi.delivery_date) as delay_days,
IF(so.status in ('Completed','To Bill'), 0, (SELECT delay_days)) as delay,
soi.qty, soi.delivered_qty,
(soi.qty - soi.delivered_qty) AS pending_qty,
IF((SELECT pending_qty) = 0, (TO_SECONDS(Max(dn.posting_date))-TO_SECONDS(so.transaction_date)), 0) as time_taken_to_deliver,
IFNULL(SUM(sii.qty), 0) as billed_qty,
soi.base_amount as amount,
(soi.delivered_qty * soi.base_rate) as delivered_qty_amount,
(soi.billed_amt * IFNULL(so.conversion_rate, 1)) as billed_amount,
(soi.base_amount - (soi.billed_amt * IFNULL(so.conversion_rate, 1))) as pending_amount,
soi.warehouse as warehouse,
so.company, soi.name,
soi.description as description
FROM
`tabSales Order` so,
(`tabSales Order Item` soi
LEFT JOIN `tabSales Invoice Item` sii
ON sii.so_detail = soi.name and sii.docstatus = 1)
LEFT JOIN `tabDelivery Note Item` dni
on dni.so_detail = soi.name
RIGHT JOIN `tabDelivery Note` dn
on dni.parent = dn.name and dn.docstatus = 1
WHERE
soi.parent = so.name
and so.status not in ('Stopped', 'Closed', 'On Hold')
and so.docstatus = 1
{conditions}
GROUP BY soi.name
ORDER BY so.transaction_date ASC, soi.item_code ASC
| 146 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_data(conditions, filters):
data = frappe.db.sql(
.format(
conditions=conditions
),
filters,
as_dict=1,
)
return data
```
###Assistant :
SELECT
so.transaction_date as date,
soi.delivery_date as delivery_date,
so.name as sales_order,
so.status, so.customer, soi.item_code,
DATEDIFF(CURDATE(), soi.delivery_date) as delay_days,
IF(so.status in ('Completed','To Bill'), 0, (SELECT delay_days)) as delay,
soi.qty, soi.delivered_qty,
(soi.qty - soi.delivered_qty) AS pending_qty,
IF((SELECT pending_qty) = 0, (TO_SECONDS(Max(dn.posting_date))-TO_SECONDS(so.transaction_date)), 0) as time_taken_to_deliver,
IFNULL(SUM(sii.qty), 0) as billed_qty,
soi.base_amount as amount,
(soi.delivered_qty * soi.base_rate) as delivered_qty_amount,
(soi.billed_amt * IFNULL(so.conversion_rate, 1)) as billed_amount,
(soi.base_amount - (soi.billed_amt * IFNULL(so.conversion_rate, 1))) as pending_amount,
soi.warehouse as warehouse,
so.company, soi.name,
soi.description as description
FROM
`tabSales Order` so,
(`tabSales Order Item` soi
LEFT JOIN `tabSales Invoice Item` sii
ON sii.so_detail = soi.name and sii.docstatus = 1)
LEFT JOIN `tabDelivery Note Item` dni
on dni.so_detail = soi.name
RIGHT JOIN `tabDelivery Note` dn
on dni.parent = dn.name and dn.docstatus = 1
WHERE
soi.parent = so.name
and so.status not in ('Stopped', 'Closed', 'On Hold')
and so.docstatus = 1
{conditions}
GROUP BY soi.name
ORDER BY so.transaction_date ASC, soi.item_code ASC
|
2,922 | def getcoroutinelocals(coroutine):
frame = getattr(coroutine, "cr_frame", None)
if frame is not None:
return frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
|
Get the mapping of coroutine local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values. | 27 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def getcoroutinelocals(coroutine):
frame = getattr(coroutine, "cr_frame", None)
if frame is not None:
return frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
```
###Assistant :
Get the mapping of coroutine local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values.
|
2,923 | def _forward_over_back_hessian(f, params, use_pfor, dtype=None):
return _vectorize_parameters(
functools.partial(_hvp, f, params),
params,
use_pfor=use_pfor,
dtype=dtype,
)
| Computes the full Hessian matrix for the scalar-valued f(*params).
Args:
f: A function taking `params` and returning a scalar.
params: A possibly nested structure of tensors.
use_pfor: If true, uses `tf.vectorized_map` calls instead of looping.
dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes
(e.g. `tf.float32`) matching the structure of `f`'s returns.
Returns:
A possibly nested structure of matrix slices corresponding to `params`. Each
slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`)
in the corresponding element of `params` and `P` is the total number of
parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating
along the second axis.
| 105 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _forward_over_back_hessian(f, params, use_pfor, dtype=None):
return _vectorize_parameters(
functools.partial(_hvp, f, params),
params,
use_pfor=use_pfor,
dtype=dtype,
)
```
###Assistant : Computes the full Hessian matrix for the scalar-valued f(*params).
Args:
f: A function taking `params` and returning a scalar.
params: A possibly nested structure of tensors.
use_pfor: If true, uses `tf.vectorized_map` calls instead of looping.
dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes
(e.g. `tf.float32`) matching the structure of `f`'s returns.
Returns:
A possibly nested structure of matrix slices corresponding to `params`. Each
slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`)
in the corresponding element of `params` and `P` is the total number of
parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating
along the second axis.
|
2,924 | def predict_proba(self, X):
check_is_fitted(self)
# TODO(1.3): Remove "log"
if self.loss in ("log_loss", "log"):
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = len(self.classes_) == 2
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.0
prob /= 2.0
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = prob_sum == 0
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError(
"predict_(log_)proba only supported when"
" loss='log_loss' or loss='modified_huber' "
"(%r given)"
% self.loss
)
| Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
https://dl.acm.org/doi/pdf/10.1145/775047.775151
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
| 138 | 125 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def predict_proba(self, X):
check_is_fitted(self)
# TODO(1.3): Remove "log"
if self.loss in ("log_loss", "log"):
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = len(self.classes_) == 2
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.0
prob /= 2.0
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = prob_sum == 0
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError(
"predict_(log_)proba only supported when"
" loss='log_loss' or loss='modified_huber' "
"(%r given)"
% self.loss
)
```
###Assistant : Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
https://dl.acm.org/doi/pdf/10.1145/775047.775151
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
|
2,925 | def fetch_jwks(jwks_url) -> Optional[dict]:
response = None
try:
response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
jwks = response.json()
except requests.exceptions.RequestException:
logger.exception("Unable to fetch jwks from %s", jwks_url)
raise AuthenticationError("Unable to finalize the authentication process.")
except json.JSONDecodeError:
content = response.content if response else "Unable to find the response"
logger.exception(
"Unable to decode the response from auth service with jwks. "
"Response: %s",
content,
)
raise AuthenticationError("Unable to finalize the authentication process.")
keys = jwks.get("keys", [])
if not keys:
logger.warning("List of JWKS keys is empty")
cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME)
return keys
| Fetch JSON Web Key Sets from a provider.
Fetched keys will be stored in the cache to the reduced amount of possible
requests.
:raises AuthenticationError
| 25 | 86 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fetch_jwks(jwks_url) -> Optional[dict]:
response = None
try:
response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
jwks = response.json()
except requests.exceptions.RequestException:
logger.exception("Unable to fetch jwks from %s", jwks_url)
raise AuthenticationError("Unable to finalize the authentication process.")
except json.JSONDecodeError:
content = response.content if response else "Unable to find the response"
logger.exception(
"Unable to decode the response from auth service with jwks. "
"Response: %s",
content,
)
raise AuthenticationError("Unable to finalize the authentication process.")
keys = jwks.get("keys", [])
if not keys:
logger.warning("List of JWKS keys is empty")
cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME)
return keys
```
###Assistant : Fetch JSON Web Key Sets from a provider.
Fetched keys will be stored in the cache to the reduced amount of possible
requests.
:raises AuthenticationError
|