code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def add_route(app, fn, context=default_context):
"""
a decorator that adds a transmute route to the application.
"""
transmute_func = TransmuteFunction(
fn,
args_not_from_request=["request"]
)
handler = create_handler(transmute_func, context=context)
get_swagger_spec(app).add_func(transmute_func, context)
for p in transmute_func.paths:
aiohttp_path = _convert_to_aiohttp_path(p)
resource = app.router.add_resource(aiohttp_path)
for method in transmute_func.methods:
resource.add_route(method, handler) | a decorator that adds a transmute route to the application. | Below is the the instruction that describes the task:
### Input:
a decorator that adds a transmute route to the application.
### Response:
def add_route(app, fn, context=default_context):
"""
a decorator that adds a transmute route to the application.
"""
transmute_func = TransmuteFunction(
fn,
args_not_from_request=["request"]
)
handler = create_handler(transmute_func, context=context)
get_swagger_spec(app).add_func(transmute_func, context)
for p in transmute_func.paths:
aiohttp_path = _convert_to_aiohttp_path(p)
resource = app.router.add_resource(aiohttp_path)
for method in transmute_func.methods:
resource.add_route(method, handler) |
def get_roles_for_permission(permission, brain_or_object):
"""Return the roles of the permission that is granted on the object
Code extracted from `IRoleManager.rolesOfPermission`
:param permission: The permission to get the roles
:param brain_or_object: Catalog brain or object
:returns: List of roles having the permission
"""
obj = api.get_object(brain_or_object)
valid_roles = get_valid_roles_for(obj)
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
# found the requested permission
if name == permission:
# Permission maps a named permission to a set of attribute names
permission = Permission(name, value, obj)
roles = permission.getRoles()
# return only valid roles that have the permission granted
return filter(lambda r: r in valid_roles, roles)
# Raise an error if the permission is invalid
raise ValueError("The permission {} is invalid.".format(permission)) | Return the roles of the permission that is granted on the object
Code extracted from `IRoleManager.rolesOfPermission`
:param permission: The permission to get the roles
:param brain_or_object: Catalog brain or object
:returns: List of roles having the permission | Below is the the instruction that describes the task:
### Input:
Return the roles of the permission that is granted on the object
Code extracted from `IRoleManager.rolesOfPermission`
:param permission: The permission to get the roles
:param brain_or_object: Catalog brain or object
:returns: List of roles having the permission
### Response:
def get_roles_for_permission(permission, brain_or_object):
"""Return the roles of the permission that is granted on the object
Code extracted from `IRoleManager.rolesOfPermission`
:param permission: The permission to get the roles
:param brain_or_object: Catalog brain or object
:returns: List of roles having the permission
"""
obj = api.get_object(brain_or_object)
valid_roles = get_valid_roles_for(obj)
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
# found the requested permission
if name == permission:
# Permission maps a named permission to a set of attribute names
permission = Permission(name, value, obj)
roles = permission.getRoles()
# return only valid roles that have the permission granted
return filter(lambda r: r in valid_roles, roles)
# Raise an error if the permission is invalid
raise ValueError("The permission {} is invalid.".format(permission)) |
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,)) | Remove item from six.moves. | Below is the the instruction that describes the task:
### Input:
Remove item from six.moves.
### Response:
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,)) |
def from_start_and_end(cls, start, end, aa=None, helix_type='alpha'):
"""Creates a `Helix` between `start` and `end`.
Parameters
----------
start : 3D Vector (tuple or list or numpy.array)
The coordinate of the start of the helix primitive.
end : 3D Vector (tuple or list or numpy.array)
The coordinate of the end of the helix primitive.
aa : int, optional
Number of amino acids in the `Helix`. If `None, an
appropriate number of residues are added.
helix_type : str, optional
Type of helix, can be: 'alpha', 'pi', '3-10',
'PPI', 'PPII', 'collagen'.
"""
start = numpy.array(start)
end = numpy.array(end)
if aa is None:
rise_per_residue = _helix_parameters[helix_type][1]
aa = int((numpy.linalg.norm(end - start) / rise_per_residue) + 1)
instance = cls(aa=aa, helix_type=helix_type)
instance.move_to(start=start, end=end)
return instance | Creates a `Helix` between `start` and `end`.
Parameters
----------
start : 3D Vector (tuple or list or numpy.array)
The coordinate of the start of the helix primitive.
end : 3D Vector (tuple or list or numpy.array)
The coordinate of the end of the helix primitive.
aa : int, optional
Number of amino acids in the `Helix`. If `None, an
appropriate number of residues are added.
helix_type : str, optional
Type of helix, can be: 'alpha', 'pi', '3-10',
'PPI', 'PPII', 'collagen'. | Below is the the instruction that describes the task:
### Input:
Creates a `Helix` between `start` and `end`.
Parameters
----------
start : 3D Vector (tuple or list or numpy.array)
The coordinate of the start of the helix primitive.
end : 3D Vector (tuple or list or numpy.array)
The coordinate of the end of the helix primitive.
aa : int, optional
Number of amino acids in the `Helix`. If `None, an
appropriate number of residues are added.
helix_type : str, optional
Type of helix, can be: 'alpha', 'pi', '3-10',
'PPI', 'PPII', 'collagen'.
### Response:
def from_start_and_end(cls, start, end, aa=None, helix_type='alpha'):
"""Creates a `Helix` between `start` and `end`.
Parameters
----------
start : 3D Vector (tuple or list or numpy.array)
The coordinate of the start of the helix primitive.
end : 3D Vector (tuple or list or numpy.array)
The coordinate of the end of the helix primitive.
aa : int, optional
Number of amino acids in the `Helix`. If `None, an
appropriate number of residues are added.
helix_type : str, optional
Type of helix, can be: 'alpha', 'pi', '3-10',
'PPI', 'PPII', 'collagen'.
"""
start = numpy.array(start)
end = numpy.array(end)
if aa is None:
rise_per_residue = _helix_parameters[helix_type][1]
aa = int((numpy.linalg.norm(end - start) / rise_per_residue) + 1)
instance = cls(aa=aa, helix_type=helix_type)
instance.move_to(start=start, end=end)
return instance |
def liftover(self, intersecting_region):
"""
Lift a region that overlaps the genomic occurrence of the retrotransposon
to consensus sequence co-ordinates. This method will behave differently
depending on whether this retrotransposon occurrance contains a full
alignment or not. If it does, the alignment is used to do the liftover and
an exact result is provided. If it does not, the coordinates are used to
do the liftover, padding either the genomic region or consensus sequence
(whichever is shorter) with equally spaced gaps to make the size of both
match.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval.
"""
# a little sanity check here to make sure intersecting_region really does..
if not self.intersects(intersecting_region):
raise RetrotransposonError("trying to lift " + str(intersecting_region) +
" from genomic to transposon coordinates " +
"in " + str(self) + ", but it doesn't " +
"intersect!")
if self.pairwise_alignment is not None:
return self.pairwise_alignment.liftover(self.chrom, self.repeat_name(),
intersecting_region.start,
intersecting_region.end,
trim=True)
return self.liftover_coordinates(intersecting_region) | Lift a region that overlaps the genomic occurrence of the retrotransposon
to consensus sequence co-ordinates. This method will behave differently
depending on whether this retrotransposon occurrance contains a full
alignment or not. If it does, the alignment is used to do the liftover and
an exact result is provided. If it does not, the coordinates are used to
do the liftover, padding either the genomic region or consensus sequence
(whichever is shorter) with equally spaced gaps to make the size of both
match.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval. | Below is the the instruction that describes the task:
### Input:
Lift a region that overlaps the genomic occurrence of the retrotransposon
to consensus sequence co-ordinates. This method will behave differently
depending on whether this retrotransposon occurrance contains a full
alignment or not. If it does, the alignment is used to do the liftover and
an exact result is provided. If it does not, the coordinates are used to
do the liftover, padding either the genomic region or consensus sequence
(whichever is shorter) with equally spaced gaps to make the size of both
match.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval.
### Response:
def liftover(self, intersecting_region):
"""
Lift a region that overlaps the genomic occurrence of the retrotransposon
to consensus sequence co-ordinates. This method will behave differently
depending on whether this retrotransposon occurrance contains a full
alignment or not. If it does, the alignment is used to do the liftover and
an exact result is provided. If it does not, the coordinates are used to
do the liftover, padding either the genomic region or consensus sequence
(whichever is shorter) with equally spaced gaps to make the size of both
match.
:param intersecting_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval.
"""
# a little sanity check here to make sure intersecting_region really does..
if not self.intersects(intersecting_region):
raise RetrotransposonError("trying to lift " + str(intersecting_region) +
" from genomic to transposon coordinates " +
"in " + str(self) + ", but it doesn't " +
"intersect!")
if self.pairwise_alignment is not None:
return self.pairwise_alignment.liftover(self.chrom, self.repeat_name(),
intersecting_region.start,
intersecting_region.end,
trim=True)
return self.liftover_coordinates(intersecting_region) |
def otu_iter_nexson_proxy(nexson_proxy, otu_sort=None):
"""otu_sort can be None (not sorted or stable), True (sorted by ID lexigraphically)
or a key function for a sort function on list of otuIDs
Note that if there are multiple OTU groups, the NexSON specifies the order of sorting
of the groups (so the sort argument here only refers to the sorting of OTUs within
a group)
"""
nexml_el = nexson_proxy._nexml_el
og_order = nexml_el['^ot:otusElementOrder']
ogd = nexml_el['otusById']
for og_id in og_order:
og = ogd[og_id]
if otu_sort is None:
for k, v in og:
yield nexson_proxy._create_otu_proxy(k, v)
else:
key_list = list(og.keys())
if otu_sort is True:
key_list.sort()
else:
key_list.sort(key=otu_sort)
for k in key_list:
v = og[k]
yield nexson_proxy._create_otu_proxy(k, v) | otu_sort can be None (not sorted or stable), True (sorted by ID lexigraphically)
or a key function for a sort function on list of otuIDs
Note that if there are multiple OTU groups, the NexSON specifies the order of sorting
of the groups (so the sort argument here only refers to the sorting of OTUs within
a group) | Below is the the instruction that describes the task:
### Input:
otu_sort can be None (not sorted or stable), True (sorted by ID lexigraphically)
or a key function for a sort function on list of otuIDs
Note that if there are multiple OTU groups, the NexSON specifies the order of sorting
of the groups (so the sort argument here only refers to the sorting of OTUs within
a group)
### Response:
def otu_iter_nexson_proxy(nexson_proxy, otu_sort=None):
"""otu_sort can be None (not sorted or stable), True (sorted by ID lexigraphically)
or a key function for a sort function on list of otuIDs
Note that if there are multiple OTU groups, the NexSON specifies the order of sorting
of the groups (so the sort argument here only refers to the sorting of OTUs within
a group)
"""
nexml_el = nexson_proxy._nexml_el
og_order = nexml_el['^ot:otusElementOrder']
ogd = nexml_el['otusById']
for og_id in og_order:
og = ogd[og_id]
if otu_sort is None:
for k, v in og:
yield nexson_proxy._create_otu_proxy(k, v)
else:
key_list = list(og.keys())
if otu_sort is True:
key_list.sort()
else:
key_list.sort(key=otu_sort)
for k in key_list:
v = og[k]
yield nexson_proxy._create_otu_proxy(k, v) |
def get_cds_ranges_for_transcript(self, transcript_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=cds".format(transcript_id)
r = self.ensembl_request(ext, headers)
cds_ranges = []
for cds_range in json.loads(r):
if cds_range["Parent"] != transcript_id:
continue
start = cds_range["start"]
end = cds_range["end"]
cds_ranges.append((start, end))
return cds_ranges | obtain the sequence for a transcript from ensembl | Below is the the instruction that describes the task:
### Input:
obtain the sequence for a transcript from ensembl
### Response:
def get_cds_ranges_for_transcript(self, transcript_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=cds".format(transcript_id)
r = self.ensembl_request(ext, headers)
cds_ranges = []
for cds_range in json.loads(r):
if cds_range["Parent"] != transcript_id:
continue
start = cds_range["start"]
end = cds_range["end"]
cds_ranges.append((start, end))
return cds_ranges |
def last_available_business_date(self, asset_manager_id, asset_ids, page_no=None, page_size=None):
"""
Returns the last available business date for the assets so we know the
starting date for new data which needs to be downloaded from data providers.
This method can only be invoked by system user
"""
self.logger.info('Retrieving last available business dates for assets')
url = '%s/last-available-business-date' % self.endpoint
params = {'asset_manager_ids': [asset_manager_id],
'asset_ids': ','.join(asset_ids)}
if page_no:
params['page_no'] = page_no
if page_size:
params['page_size'] = page_size
response = self.session.get(url, params=params)
if response.ok:
self.logger.info("Received %s assets' last available business date", len(response.json()))
return response.json()
else:
self.logger.error(response.text)
response.raise_for_status() | Returns the last available business date for the assets so we know the
starting date for new data which needs to be downloaded from data providers.
This method can only be invoked by system user | Below is the the instruction that describes the task:
### Input:
Returns the last available business date for the assets so we know the
starting date for new data which needs to be downloaded from data providers.
This method can only be invoked by system user
### Response:
def last_available_business_date(self, asset_manager_id, asset_ids, page_no=None, page_size=None):
"""
Returns the last available business date for the assets so we know the
starting date for new data which needs to be downloaded from data providers.
This method can only be invoked by system user
"""
self.logger.info('Retrieving last available business dates for assets')
url = '%s/last-available-business-date' % self.endpoint
params = {'asset_manager_ids': [asset_manager_id],
'asset_ids': ','.join(asset_ids)}
if page_no:
params['page_no'] = page_no
if page_size:
params['page_size'] = page_size
response = self.session.get(url, params=params)
if response.ok:
self.logger.info("Received %s assets' last available business date", len(response.json()))
return response.json()
else:
self.logger.error(response.text)
response.raise_for_status() |
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern | returns the pattern specified in a response schema | Below is the the instruction that describes the task:
### Input:
returns the pattern specified in a response schema
### Response:
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern |
def release(self, conn):
"""Release a previously acquired connection.
The connection is put back into the pool."""
self._pool_lock.acquire()
self._pool.put(ConnectionWrapper(self._pool, conn))
self._current_acquired -= 1
self._pool_lock.release() | Release a previously acquired connection.
The connection is put back into the pool. | Below is the the instruction that describes the task:
### Input:
Release a previously acquired connection.
The connection is put back into the pool.
### Response:
def release(self, conn):
"""Release a previously acquired connection.
The connection is put back into the pool."""
self._pool_lock.acquire()
self._pool.put(ConnectionWrapper(self._pool, conn))
self._current_acquired -= 1
self._pool_lock.release() |
def cli(env, volume_id, reason, immediate):
"""Cancel existing snapshot space for a given volume."""
file_storage_manager = SoftLayer.FileStorageManager(env.client)
if not (env.skip_confirmations or formatting.no_going_back(volume_id)):
raise exceptions.CLIAbort('Aborted')
cancelled = file_storage_manager.cancel_snapshot_space(
volume_id, reason, immediate)
if cancelled:
if immediate:
click.echo('File volume with id %s has been marked'
' for immediate snapshot cancellation' % volume_id)
else:
click.echo('File volume with id %s has been marked'
' for snapshot cancellation' % volume_id)
else:
click.echo('Unable to cancel snapshot space for file volume %s'
% volume_id) | Cancel existing snapshot space for a given volume. | Below is the the instruction that describes the task:
### Input:
Cancel existing snapshot space for a given volume.
### Response:
def cli(env, volume_id, reason, immediate):
"""Cancel existing snapshot space for a given volume."""
file_storage_manager = SoftLayer.FileStorageManager(env.client)
if not (env.skip_confirmations or formatting.no_going_back(volume_id)):
raise exceptions.CLIAbort('Aborted')
cancelled = file_storage_manager.cancel_snapshot_space(
volume_id, reason, immediate)
if cancelled:
if immediate:
click.echo('File volume with id %s has been marked'
' for immediate snapshot cancellation' % volume_id)
else:
click.echo('File volume with id %s has been marked'
' for snapshot cancellation' % volume_id)
else:
click.echo('Unable to cancel snapshot space for file volume %s'
% volume_id) |
def request_add_sensor(self, sock, msg):
""" add a sensor
"""
self.add_sensor(Sensor(int, 'int_sensor%d' % len(self._sensors),
'descr', 'unit', params=[-10, 10]))
return Message.reply('add-sensor', 'ok') | add a sensor | Below is the the instruction that describes the task:
### Input:
add a sensor
### Response:
def request_add_sensor(self, sock, msg):
""" add a sensor
"""
self.add_sensor(Sensor(int, 'int_sensor%d' % len(self._sensors),
'descr', 'unit', params=[-10, 10]))
return Message.reply('add-sensor', 'ok') |
def fullName(self):
""" Returns a reliable full name (firstName lastName) for every
member (as of the writing of this comment.)
"""
if self.givenName and self.sn:
return "{0} {1}".format(self.givenName, self.sn)
if self.givenName:
return self.givenName
if self.sn:
return self.sn
return self.uid | Returns a reliable full name (firstName lastName) for every
member (as of the writing of this comment.) | Below is the the instruction that describes the task:
### Input:
Returns a reliable full name (firstName lastName) for every
member (as of the writing of this comment.)
### Response:
def fullName(self):
""" Returns a reliable full name (firstName lastName) for every
member (as of the writing of this comment.)
"""
if self.givenName and self.sn:
return "{0} {1}".format(self.givenName, self.sn)
if self.givenName:
return self.givenName
if self.sn:
return self.sn
return self.uid |
def custom_parser(cards: list, parser: Optional[Callable[[list], Optional[list]]]=None) -> Optional[list]:
'''parser for CUSTOM [1] issue mode,
please provide your custom parser as argument'''
if not parser:
return cards
else:
return parser(cards) | parser for CUSTOM [1] issue mode,
please provide your custom parser as argument | Below is the the instruction that describes the task:
### Input:
parser for CUSTOM [1] issue mode,
please provide your custom parser as argument
### Response:
def custom_parser(cards: list, parser: Optional[Callable[[list], Optional[list]]]=None) -> Optional[list]:
'''parser for CUSTOM [1] issue mode,
please provide your custom parser as argument'''
if not parser:
return cards
else:
return parser(cards) |
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._file_data is None or self._current_offset >= self._size:
return b''
if size is None:
size = self._size
if self._current_offset + size > self._size:
size = self._size - self._current_offset
start_offset = self._current_offset
self._current_offset += size
return self._file_data[start_offset:self._current_offset] | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | Below is the the instruction that describes the task:
### Input:
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
### Response:
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._file_data is None or self._current_offset >= self._size:
return b''
if size is None:
size = self._size
if self._current_offset + size > self._size:
size = self._size - self._current_offset
start_offset = self._current_offset
self._current_offset += size
return self._file_data[start_offset:self._current_offset] |
def mode_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
"""
most_frequent_list = self._get_most_frequent_values(rows, column)
if not most_frequent_list:
return 0.0 # type: ignore
most_frequent_value = most_frequent_list[0]
if not isinstance(most_frequent_value, Number):
raise ExecutionError(f"Invalid valus for mode_number: {most_frequent_value}")
return most_frequent_value | Takes a list of rows and a column and returns the most frequent value under
that column in those rows. | Below is the the instruction that describes the task:
### Input:
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
### Response:
def mode_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
"""
most_frequent_list = self._get_most_frequent_values(rows, column)
if not most_frequent_list:
return 0.0 # type: ignore
most_frequent_value = most_frequent_list[0]
if not isinstance(most_frequent_value, Number):
raise ExecutionError(f"Invalid valus for mode_number: {most_frequent_value}")
return most_frequent_value |
def wploader(self):
'''per-sysid wploader'''
if self.target_system not in self.wploader_by_sysid:
self.wploader_by_sysid[self.target_system] = mavwp.MAVWPLoader()
return self.wploader_by_sysid[self.target_system] | per-sysid wploader | Below is the the instruction that describes the task:
### Input:
per-sysid wploader
### Response:
def wploader(self):
'''per-sysid wploader'''
if self.target_system not in self.wploader_by_sysid:
self.wploader_by_sysid[self.target_system] = mavwp.MAVWPLoader()
return self.wploader_by_sysid[self.target_system] |
def transform_qubits(self: TSelf_Operation,
func: Callable[[Qid], Qid]) -> TSelf_Operation:
"""Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function.
"""
return self.with_qubits(*(func(q) for q in self.qubits)) | Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function. | Below is the the instruction that describes the task:
### Input:
Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function.
### Response:
def transform_qubits(self: TSelf_Operation,
func: Callable[[Qid], Qid]) -> TSelf_Operation:
"""Returns the same operation, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving operation but with qubits transformed by the given
function.
"""
return self.with_qubits(*(func(q) for q in self.qubits)) |
def make_name(self):
"""Autogenerates a :attr:`name` from :attr:`title_for_name`"""
if self.title:
self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__)) | Autogenerates a :attr:`name` from :attr:`title_for_name` | Below is the the instruction that describes the task:
### Input:
Autogenerates a :attr:`name` from :attr:`title_for_name`
### Response:
def make_name(self):
"""Autogenerates a :attr:`name` from :attr:`title_for_name`"""
if self.title:
self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__)) |
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif ((self.is_float or self.is_complex) and
(is_integer_dtype(dtype) or is_float_dtype(dtype))):
# don't coerce float/complex to int
return self
elif (self.is_datetime or
is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)):
# not a datetime
if not ((is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)) and self.is_datetime):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, 'tz', None)
othertz = getattr(dtype, 'tz', None)
if str(mytz) != str(othertz):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
elif (self.is_timedelta or is_timedelta64_dtype(dtype)):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
try:
return self.astype(dtype)
except (ValueError, TypeError, OverflowError):
pass
return self.astype(object) | coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block | Below is the the instruction that describes the task:
### Input:
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
### Response:
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif ((self.is_float or self.is_complex) and
(is_integer_dtype(dtype) or is_float_dtype(dtype))):
# don't coerce float/complex to int
return self
elif (self.is_datetime or
is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)):
# not a datetime
if not ((is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)) and self.is_datetime):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, 'tz', None)
othertz = getattr(dtype, 'tz', None)
if str(mytz) != str(othertz):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
elif (self.is_timedelta or is_timedelta64_dtype(dtype)):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
try:
return self.astype(dtype)
except (ValueError, TypeError, OverflowError):
pass
return self.astype(object) |
def LinearContrast(alpha=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform contrast adjustment by linearly scaling the distance to 128.
"""
params1d = [
iap.handle_continuous_param(alpha, "alpha", value_range=None, tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_linear
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
dtypes_disallowed=["uint64", "int64", "float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
) | Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform contrast adjustment by linearly scaling the distance to 128. | Below is the the instruction that describes the task:
### Input:
Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform contrast adjustment by linearly scaling the distance to 128.
### Response:
def LinearContrast(alpha=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform contrast adjustment by linearly scaling the distance to 128.
"""
params1d = [
iap.handle_continuous_param(alpha, "alpha", value_range=None, tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_linear
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
dtypes_disallowed=["uint64", "int64", "float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
) |
def declare_list(self, name, sep=os.pathsep):
"""
Declare an environment variable as a list-like special variable.
This can be used even if the environment variable is not
present.
:param name: The name of the environment variable that should
be considered list-like.
:param sep: The separator to be used. Defaults to the value
of ``os.pathsep``.
"""
self._declare_special(name, sep, ListVariable) | Declare an environment variable as a list-like special variable.
This can be used even if the environment variable is not
present.
:param name: The name of the environment variable that should
be considered list-like.
:param sep: The separator to be used. Defaults to the value
of ``os.pathsep``. | Below is the the instruction that describes the task:
### Input:
Declare an environment variable as a list-like special variable.
This can be used even if the environment variable is not
present.
:param name: The name of the environment variable that should
be considered list-like.
:param sep: The separator to be used. Defaults to the value
of ``os.pathsep``.
### Response:
def declare_list(self, name, sep=os.pathsep):
"""
Declare an environment variable as a list-like special variable.
This can be used even if the environment variable is not
present.
:param name: The name of the environment variable that should
be considered list-like.
:param sep: The separator to be used. Defaults to the value
of ``os.pathsep``.
"""
self._declare_special(name, sep, ListVariable) |
def _restore_stdout(self):
"""Unhook stdout and stderr if buffering is enabled.
"""
if self.buffer:
if self._mirror_output:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate() | Unhook stdout and stderr if buffering is enabled. | Below is the the instruction that describes the task:
### Input:
Unhook stdout and stderr if buffering is enabled.
### Response:
def _restore_stdout(self):
"""Unhook stdout and stderr if buffering is enabled.
"""
if self.buffer:
if self._mirror_output:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate() |
def prompt_save_images(args):
"""Prompt user to save images when crawling (for pdf and HTML formats)."""
if args['images'] or args['no_images']:
return
if (args['pdf'] or args['html']) and (args['crawl'] or args['crawl_all']):
save_msg = ('Choosing to save images will greatly slow the'
' crawling process.\nSave images anyways? (y/n): ')
try:
save_images = utils.confirm_input(input(save_msg))
except (KeyboardInterrupt, EOFError):
return
args['images'] = save_images
args['no_images'] = not save_images | Prompt user to save images when crawling (for pdf and HTML formats). | Below is the the instruction that describes the task:
### Input:
Prompt user to save images when crawling (for pdf and HTML formats).
### Response:
def prompt_save_images(args):
"""Prompt user to save images when crawling (for pdf and HTML formats)."""
if args['images'] or args['no_images']:
return
if (args['pdf'] or args['html']) and (args['crawl'] or args['crawl_all']):
save_msg = ('Choosing to save images will greatly slow the'
' crawling process.\nSave images anyways? (y/n): ')
try:
save_images = utils.confirm_input(input(save_msg))
except (KeyboardInterrupt, EOFError):
return
args['images'] = save_images
args['no_images'] = not save_images |
def IV(abf,T1,T2,plotToo=True,color='b'):
"""
Given two time points (seconds) return IV data.
Optionally plots a fancy graph (with errorbars)
Returns [[AV],[SD]] for the given range.
"""
rangeData=abf.average_data([[T1,T2]]) #get the average data per sweep
AV,SD=rangeData[:,0,0],rangeData[:,0,1] #separate by average and SD
Xs=abf.clampValues(T1) #get clamp values at time point T1
if plotToo:
new(abf) #do this so it's the right shape and size
# plot the original sweep
pylab.subplot(221)
pylab.title("sweep data")
pylab.xlabel("time (s)")
pylab.ylabel("Measurement (%s)"%abf.units)
sweep(abf,'all',protocol=False)
pylab.axis([None,None,np.min(rangeData)-50,np.max(rangeData)+50])
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the data zoomed in
pylab.subplot(223)
pylab.title("measurement region")
pylab.xlabel("time (s)")
pylab.ylabel("Measurement (%s)"%abf.units)
sweep(abf,'all',protocol=False)
pylab.axis([T1-.05,T2+.05,np.min(rangeData)-50,np.max(rangeData)+50])
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the protocol
pylab.subplot(222)
pylab.title("protocol")
pylab.xlabel("time (s)")
pylab.ylabel("Command (%s)"%abf.unitsCommand)
sweep(abf,'all',protocol=True)
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the I/V
pylab.subplot(224)
pylab.grid(alpha=.5)
pylab.title("command / measure relationship")
pylab.xlabel("Command (%s)"%abf.unitsCommand)
pylab.ylabel("Measurement (%s)"%abf.units)
pylab.errorbar(Xs,AV,SD,capsize=0,marker='.',color=color)
if abf.units=="pA":
pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.axvline(-70,alpha=.5,lw=2,color='r',ls="--")
else:
pylab.axhline(-70,alpha=.5,lw=2,color='r',ls="--")
pylab.axvline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.margins(.1,.1)
annotate(abf)
return AV,SD | Given two time points (seconds) return IV data.
Optionally plots a fancy graph (with errorbars)
Returns [[AV],[SD]] for the given range. | Below is the the instruction that describes the task:
### Input:
Given two time points (seconds) return IV data.
Optionally plots a fancy graph (with errorbars)
Returns [[AV],[SD]] for the given range.
### Response:
def IV(abf,T1,T2,plotToo=True,color='b'):
"""
Given two time points (seconds) return IV data.
Optionally plots a fancy graph (with errorbars)
Returns [[AV],[SD]] for the given range.
"""
rangeData=abf.average_data([[T1,T2]]) #get the average data per sweep
AV,SD=rangeData[:,0,0],rangeData[:,0,1] #separate by average and SD
Xs=abf.clampValues(T1) #get clamp values at time point T1
if plotToo:
new(abf) #do this so it's the right shape and size
# plot the original sweep
pylab.subplot(221)
pylab.title("sweep data")
pylab.xlabel("time (s)")
pylab.ylabel("Measurement (%s)"%abf.units)
sweep(abf,'all',protocol=False)
pylab.axis([None,None,np.min(rangeData)-50,np.max(rangeData)+50])
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the data zoomed in
pylab.subplot(223)
pylab.title("measurement region")
pylab.xlabel("time (s)")
pylab.ylabel("Measurement (%s)"%abf.units)
sweep(abf,'all',protocol=False)
pylab.axis([T1-.05,T2+.05,np.min(rangeData)-50,np.max(rangeData)+50])
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the protocol
pylab.subplot(222)
pylab.title("protocol")
pylab.xlabel("time (s)")
pylab.ylabel("Command (%s)"%abf.unitsCommand)
sweep(abf,'all',protocol=True)
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the I/V
pylab.subplot(224)
pylab.grid(alpha=.5)
pylab.title("command / measure relationship")
pylab.xlabel("Command (%s)"%abf.unitsCommand)
pylab.ylabel("Measurement (%s)"%abf.units)
pylab.errorbar(Xs,AV,SD,capsize=0,marker='.',color=color)
if abf.units=="pA":
pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.axvline(-70,alpha=.5,lw=2,color='r',ls="--")
else:
pylab.axhline(-70,alpha=.5,lw=2,color='r',ls="--")
pylab.axvline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.margins(.1,.1)
annotate(abf)
return AV,SD |
def show_worst_drawdown_periods(returns, top=5):
"""
Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5).
"""
drawdown_df = timeseries.gen_drawdown_table(returns, top=top)
utils.print_table(
drawdown_df.sort_values('Net drawdown in %', ascending=False),
name='Worst drawdown periods',
float_format='{0:.2f}'.format,
) | Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5). | Below is the the instruction that describes the task:
### Input:
Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5).
### Response:
def show_worst_drawdown_periods(returns, top=5):
"""
Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5).
"""
drawdown_df = timeseries.gen_drawdown_table(returns, top=top)
utils.print_table(
drawdown_df.sort_values('Net drawdown in %', ascending=False),
name='Worst drawdown periods',
float_format='{0:.2f}'.format,
) |
def DeleteAttachment(self, attachment_link, options=None):
"""Deletes an attachment.
:param str attachment_link:
The link to the attachment.
:param dict options:
The request options for the request.
:return:
The deleted Attachment.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(attachment_link)
attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link)
return self.DeleteResource(path,
'attachments',
attachment_id,
None,
options) | Deletes an attachment.
:param str attachment_link:
The link to the attachment.
:param dict options:
The request options for the request.
:return:
The deleted Attachment.
:rtype:
dict | Below is the the instruction that describes the task:
### Input:
Deletes an attachment.
:param str attachment_link:
The link to the attachment.
:param dict options:
The request options for the request.
:return:
The deleted Attachment.
:rtype:
dict
### Response:
def DeleteAttachment(self, attachment_link, options=None):
"""Deletes an attachment.
:param str attachment_link:
The link to the attachment.
:param dict options:
The request options for the request.
:return:
The deleted Attachment.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(attachment_link)
attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link)
return self.DeleteResource(path,
'attachments',
attachment_id,
None,
options) |
def read_index_iter(self):
"""Generator function that reads the file index from the vpk file
yeilds (file_path, metadata)
"""
with fopen(self.vpk_path, 'rb') as f:
f.seek(self.header_length)
while True:
if self.version > 0 and f.tell() > self.tree_length + self.header_length:
raise ValueError("Error parsing index (out of bounds)")
ext = _read_cstring(f)
if ext == '':
break
while True:
path = _read_cstring(f)
if path == '':
break
if path != ' ':
path = os.path.join(path, '')
else:
path = ''
while True:
name = _read_cstring(f)
if name == '':
break
(crc32,
preload_length,
archive_index,
archive_offset,
file_length,
suffix,
) = metadata = list(struct.unpack("IHHIIH", f.read(18)))
if suffix != 0xffff:
raise ValueError("Error while parsing index")
if archive_index == 0x7fff:
metadata[3] = self.header_length + self.tree_length + archive_offset
metadata = (f.read(preload_length),) + tuple(metadata[:-1])
yield path + name + '.' + ext, metadata | Generator function that reads the file index from the vpk file
yeilds (file_path, metadata) | Below is the the instruction that describes the task:
### Input:
Generator function that reads the file index from the vpk file
yeilds (file_path, metadata)
### Response:
def read_index_iter(self):
"""Generator function that reads the file index from the vpk file
yeilds (file_path, metadata)
"""
with fopen(self.vpk_path, 'rb') as f:
f.seek(self.header_length)
while True:
if self.version > 0 and f.tell() > self.tree_length + self.header_length:
raise ValueError("Error parsing index (out of bounds)")
ext = _read_cstring(f)
if ext == '':
break
while True:
path = _read_cstring(f)
if path == '':
break
if path != ' ':
path = os.path.join(path, '')
else:
path = ''
while True:
name = _read_cstring(f)
if name == '':
break
(crc32,
preload_length,
archive_index,
archive_offset,
file_length,
suffix,
) = metadata = list(struct.unpack("IHHIIH", f.read(18)))
if suffix != 0xffff:
raise ValueError("Error while parsing index")
if archive_index == 0x7fff:
metadata[3] = self.header_length + self.tree_length + archive_offset
metadata = (f.read(preload_length),) + tuple(metadata[:-1])
yield path + name + '.' + ext, metadata |
def load_from_namespace(module_name):
"""
Load a py3status bundled module.
"""
class_inst = None
name = "py3status.modules.{}".format(module_name)
py_mod = __import__(name)
components = name.split(".")
for comp in components[1:]:
py_mod = getattr(py_mod, comp)
class_inst = py_mod.Py3status()
return class_inst | Load a py3status bundled module. | Below is the the instruction that describes the task:
### Input:
Load a py3status bundled module.
### Response:
def load_from_namespace(module_name):
"""
Load a py3status bundled module.
"""
class_inst = None
name = "py3status.modules.{}".format(module_name)
py_mod = __import__(name)
components = name.split(".")
for comp in components[1:]:
py_mod = getattr(py_mod, comp)
class_inst = py_mod.Py3status()
return class_inst |
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op | Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses. | Below is the the instruction that describes the task:
### Input:
Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
### Response:
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op |
def _download_libraries(self, libname):
""" download enrichr libraries."""
self._logger.info("Downloading and generating Enrichr library gene sets......")
s = retry(5)
# queery string
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'
query_string = '?mode=text&libraryName=%s'
# get
response = s.get( ENRICHR_URL + query_string % libname, timeout=None)
if not response.ok:
raise Exception('Error fetching enrichment results, check internet connection first.')
# reformat to dict and save to disk
mkdirs(DEFAULT_CACHE_PATH)
genesets_dict = {}
outname = "enrichr.%s.gmt"%libname
gmtout = open(os.path.join(DEFAULT_CACHE_PATH, outname), "w")
for line in response.iter_lines(chunk_size=1024, decode_unicode='utf-8'):
line=line.strip()
k = line.split("\t")[0]
v = list(map(lambda x: x.split(",")[0], line.split("\t")[2:]))
genesets_dict.update({ k: v})
outline = "%s\t\t%s\n"%(k, "\t".join(v))
gmtout.write(outline)
gmtout.close()
return genesets_dict | download enrichr libraries. | Below is the the instruction that describes the task:
### Input:
download enrichr libraries.
### Response:
def _download_libraries(self, libname):
""" download enrichr libraries."""
self._logger.info("Downloading and generating Enrichr library gene sets......")
s = retry(5)
# queery string
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'
query_string = '?mode=text&libraryName=%s'
# get
response = s.get( ENRICHR_URL + query_string % libname, timeout=None)
if not response.ok:
raise Exception('Error fetching enrichment results, check internet connection first.')
# reformat to dict and save to disk
mkdirs(DEFAULT_CACHE_PATH)
genesets_dict = {}
outname = "enrichr.%s.gmt"%libname
gmtout = open(os.path.join(DEFAULT_CACHE_PATH, outname), "w")
for line in response.iter_lines(chunk_size=1024, decode_unicode='utf-8'):
line=line.strip()
k = line.split("\t")[0]
v = list(map(lambda x: x.split(",")[0], line.split("\t")[2:]))
genesets_dict.update({ k: v})
outline = "%s\t\t%s\n"%(k, "\t".join(v))
gmtout.write(outline)
gmtout.close()
return genesets_dict |
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs | Set gl configuration | Below is the the instruction that describes the task:
### Input:
Set gl configuration
### Response:
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs |
def error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: the error value/payload, if found.
:rtype: str
"""
status_code, error_msg, payload = self.check_error()
if status_code != 200 and not error_msg and not payload:
return "Async error (%s). Status code: %r" % (self.async_id, status_code)
return error_msg | Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: the error value/payload, if found.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: the error value/payload, if found.
:rtype: str
### Response:
def error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: the error value/payload, if found.
:rtype: str
"""
status_code, error_msg, payload = self.check_error()
if status_code != 200 and not error_msg and not payload:
return "Async error (%s). Status code: %r" % (self.async_id, status_code)
return error_msg |
def fluxfrac(*mags):
"""Returns fraction of total flux in first argument, assuming all are magnitudes.
"""
Ftot = 0
for mag in mags:
Ftot += 10**(-0.4*mag)
F1 = 10**(-0.4*mags[0])
return F1/Ftot | Returns fraction of total flux in first argument, assuming all are magnitudes. | Below is the the instruction that describes the task:
### Input:
Returns fraction of total flux in first argument, assuming all are magnitudes.
### Response:
def fluxfrac(*mags):
"""Returns fraction of total flux in first argument, assuming all are magnitudes.
"""
Ftot = 0
for mag in mags:
Ftot += 10**(-0.4*mag)
F1 = 10**(-0.4*mags[0])
return F1/Ftot |
def GetUserInfo(knowledge_base, user):
# TODO: This docstring cannot be a raw literal because there are
# issues with raw unicode literals on Python 2. Once support for Python 2 is
# dropped, it can be made raw again.
# pylint: disable=g-docstring-has-escape
"""Get a User protobuf for a specific user.
Args:
knowledge_base: An rdf_client.KnowledgeBase object.
user: Username as string. May contain domain like DOMAIN\\user.
Returns:
A User rdfvalue or None
"""
# pylint: enable=g-docstring-has-escape
if "\\" in user:
domain, user = user.split("\\", 1)
users = [
u for u in knowledge_base.users
if u.username == user and u.userdomain == domain
]
else:
users = [u for u in knowledge_base.users if u.username == user]
if not users:
return
else:
return users[0] | Get a User protobuf for a specific user.
Args:
knowledge_base: An rdf_client.KnowledgeBase object.
user: Username as string. May contain domain like DOMAIN\\user.
Returns:
A User rdfvalue or None | Below is the the instruction that describes the task:
### Input:
Get a User protobuf for a specific user.
Args:
knowledge_base: An rdf_client.KnowledgeBase object.
user: Username as string. May contain domain like DOMAIN\\user.
Returns:
A User rdfvalue or None
### Response:
def GetUserInfo(knowledge_base, user):
# TODO: This docstring cannot be a raw literal because there are
# issues with raw unicode literals on Python 2. Once support for Python 2 is
# dropped, it can be made raw again.
# pylint: disable=g-docstring-has-escape
"""Get a User protobuf for a specific user.
Args:
knowledge_base: An rdf_client.KnowledgeBase object.
user: Username as string. May contain domain like DOMAIN\\user.
Returns:
A User rdfvalue or None
"""
# pylint: enable=g-docstring-has-escape
if "\\" in user:
domain, user = user.split("\\", 1)
users = [
u for u in knowledge_base.users
if u.username == user and u.userdomain == domain
]
else:
users = [u for u in knowledge_base.users if u.username == user]
if not users:
return
else:
return users[0] |
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
if arcpyFound and isinstance(geom, arcpy.Multipoint):
feature_geom = []
fPart = []
for part in geom:
fPart = []
for pnt in part:
fPart.append(Point(coord=[pnt.X, pnt.Y],
wkid=geom.spatialReference.factoryCode,
z=pnt.Z, m=pnt.M))
feature_geom.append(fPart)
return feature_geom | converts a geometry object to a common.Geometry object | Below is the the instruction that describes the task:
### Input:
converts a geometry object to a common.Geometry object
### Response:
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
if arcpyFound and isinstance(geom, arcpy.Multipoint):
feature_geom = []
fPart = []
for part in geom:
fPart = []
for pnt in part:
fPart.append(Point(coord=[pnt.X, pnt.Y],
wkid=geom.spatialReference.factoryCode,
z=pnt.Z, m=pnt.M))
feature_geom.append(fPart)
return feature_geom |
def get_hash(self):
"""
Returns the associated hash for this template version
Returns:
str: Hash for this version
"""
if self._hash is None:
self._hash = self._source.get_hash(self._handle).strip()
return self._hash | Returns the associated hash for this template version
Returns:
str: Hash for this version | Below is the the instruction that describes the task:
### Input:
Returns the associated hash for this template version
Returns:
str: Hash for this version
### Response:
def get_hash(self):
"""
Returns the associated hash for this template version
Returns:
str: Hash for this version
"""
if self._hash is None:
self._hash = self._source.get_hash(self._handle).strip()
return self._hash |
def RGB_color_picker(obj):
"""Build a color representation from the string representation of an object
This allows to quickly get a color from some data, with the
additional benefit that the color will be the same as long as the
(string representation of the) data is the same::
>>> from colour import RGB_color_picker, Color
Same inputs produce the same result::
>>> RGB_color_picker("Something") == RGB_color_picker("Something")
True
... but different inputs produce different colors::
>>> RGB_color_picker("Something") != RGB_color_picker("Something else")
True
In any case, we still get a ``Color`` object::
>>> isinstance(RGB_color_picker("Something"), Color)
True
"""
## Turn the input into a by 3-dividable string. SHA-384 is good because it
## divides into 3 components of the same size, which will be used to
## represent the RGB values of the color.
digest = hashlib.sha384(str(obj).encode('utf-8')).hexdigest()
## Split the digest into 3 sub-strings of equivalent size.
subsize = int(len(digest) / 3)
splitted_digest = [digest[i * subsize: (i + 1) * subsize]
for i in range(3)]
## Convert those hexadecimal sub-strings into integer and scale them down
## to the 0..1 range.
max_value = float(int("f" * subsize, 16))
components = (
int(d, 16) ## Make a number from a list with hex digits
/ max_value ## Scale it down to [0.0, 1.0]
for d in splitted_digest)
return Color(rgb2hex(components)) | Build a color representation from the string representation of an object
This allows to quickly get a color from some data, with the
additional benefit that the color will be the same as long as the
(string representation of the) data is the same::
>>> from colour import RGB_color_picker, Color
Same inputs produce the same result::
>>> RGB_color_picker("Something") == RGB_color_picker("Something")
True
... but different inputs produce different colors::
>>> RGB_color_picker("Something") != RGB_color_picker("Something else")
True
In any case, we still get a ``Color`` object::
>>> isinstance(RGB_color_picker("Something"), Color)
True | Below is the the instruction that describes the task:
### Input:
Build a color representation from the string representation of an object
This allows to quickly get a color from some data, with the
additional benefit that the color will be the same as long as the
(string representation of the) data is the same::
>>> from colour import RGB_color_picker, Color
Same inputs produce the same result::
>>> RGB_color_picker("Something") == RGB_color_picker("Something")
True
... but different inputs produce different colors::
>>> RGB_color_picker("Something") != RGB_color_picker("Something else")
True
In any case, we still get a ``Color`` object::
>>> isinstance(RGB_color_picker("Something"), Color)
True
### Response:
def RGB_color_picker(obj):
"""Build a color representation from the string representation of an object
This allows to quickly get a color from some data, with the
additional benefit that the color will be the same as long as the
(string representation of the) data is the same::
>>> from colour import RGB_color_picker, Color
Same inputs produce the same result::
>>> RGB_color_picker("Something") == RGB_color_picker("Something")
True
... but different inputs produce different colors::
>>> RGB_color_picker("Something") != RGB_color_picker("Something else")
True
In any case, we still get a ``Color`` object::
>>> isinstance(RGB_color_picker("Something"), Color)
True
"""
## Turn the input into a by 3-dividable string. SHA-384 is good because it
## divides into 3 components of the same size, which will be used to
## represent the RGB values of the color.
digest = hashlib.sha384(str(obj).encode('utf-8')).hexdigest()
## Split the digest into 3 sub-strings of equivalent size.
subsize = int(len(digest) / 3)
splitted_digest = [digest[i * subsize: (i + 1) * subsize]
for i in range(3)]
## Convert those hexadecimal sub-strings into integer and scale them down
## to the 0..1 range.
max_value = float(int("f" * subsize, 16))
components = (
int(d, 16) ## Make a number from a list with hex digits
/ max_value ## Scale it down to [0.0, 1.0]
for d in splitted_digest)
return Color(rgb2hex(components)) |
def build_dependencies(self):
"""
Recursively build the dependencies for sub-modules and sub-packages.
Iterate on node's modules then packages and call their
build_dependencies methods.
"""
for m in self.modules:
m.build_dependencies()
for p in self.packages:
p.build_dependencies() | Recursively build the dependencies for sub-modules and sub-packages.
Iterate on node's modules then packages and call their
build_dependencies methods. | Below is the the instruction that describes the task:
### Input:
Recursively build the dependencies for sub-modules and sub-packages.
Iterate on node's modules then packages and call their
build_dependencies methods.
### Response:
def build_dependencies(self):
"""
Recursively build the dependencies for sub-modules and sub-packages.
Iterate on node's modules then packages and call their
build_dependencies methods.
"""
for m in self.modules:
m.build_dependencies()
for p in self.packages:
p.build_dependencies() |
def __setup():
"""Will be executed in the first time someone calls classes_*() """
global __collaborators, __flag_first
import f311
__flag_first = False
for pkgname in f311.COLLABORATORS_C:
try:
pkg = importlib.import_module(pkgname)
a99.get_python_logger().info("Imported collaborator package '{}'".format(pkgname))
try:
if hasattr(pkg, "_setup_filetypes"):
pkg._setup_filetypes()
else:
_collect_classes(pkg)
__collaborators[pkgname] = pkg
except:
a99.get_python_logger().exception(
"Actually, package '{}' gave error".format(pkgname))
raise
except:
a99.get_python_logger().warning("Failed to import package '{}".format(pkgname)) | Will be executed in the first time someone calls classes_*() | Below is the the instruction that describes the task:
### Input:
Will be executed in the first time someone calls classes_*()
### Response:
def __setup():
"""Will be executed in the first time someone calls classes_*() """
global __collaborators, __flag_first
import f311
__flag_first = False
for pkgname in f311.COLLABORATORS_C:
try:
pkg = importlib.import_module(pkgname)
a99.get_python_logger().info("Imported collaborator package '{}'".format(pkgname))
try:
if hasattr(pkg, "_setup_filetypes"):
pkg._setup_filetypes()
else:
_collect_classes(pkg)
__collaborators[pkgname] = pkg
except:
a99.get_python_logger().exception(
"Actually, package '{}' gave error".format(pkgname))
raise
except:
a99.get_python_logger().warning("Failed to import package '{}".format(pkgname)) |
def createPolyline(self, points, strokewidth=1, stroke='black'):
"""
Creates a Polyline
@type points: string in the form "x1,y1 x2,y2 x3,y3"
@param points: all points relevant to the polygon
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@return: a polyline object
"""
style_dict = {'fill':'none', 'stroke-width':strokewidth, 'stroke':stroke}
myStyle = StyleBuilder(style_dict)
p = Polyline(points=points)
p.set_style(myStyle.getStyle())
return p | Creates a Polyline
@type points: string in the form "x1,y1 x2,y2 x3,y3"
@param points: all points relevant to the polygon
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@return: a polyline object | Below is the the instruction that describes the task:
### Input:
Creates a Polyline
@type points: string in the form "x1,y1 x2,y2 x3,y3"
@param points: all points relevant to the polygon
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@return: a polyline object
### Response:
def createPolyline(self, points, strokewidth=1, stroke='black'):
"""
Creates a Polyline
@type points: string in the form "x1,y1 x2,y2 x3,y3"
@param points: all points relevant to the polygon
@type strokewidth: string or int
@param strokewidth: width of the pen used to draw
@type stroke: string (either css constants like "black" or numerical values like "#FFFFFF")
@param stroke: color with which to draw the outer limits
@return: a polyline object
"""
style_dict = {'fill':'none', 'stroke-width':strokewidth, 'stroke':stroke}
myStyle = StyleBuilder(style_dict)
p = Polyline(points=points)
p.set_style(myStyle.getStyle())
return p |
def remove_programmer(programmer_id):
"""remove programmer.
:param programmer_id: programmer id (e.g. 'avrisp')
:rtype: None
"""
log.debug('remove %s', programmer_id)
lines = programmers_txt().lines()
lines = filter(
lambda x: not x.strip().startswith(programmer_id + '.'), lines)
programmers_txt().write_lines(lines) | remove programmer.
:param programmer_id: programmer id (e.g. 'avrisp')
:rtype: None | Below is the the instruction that describes the task:
### Input:
remove programmer.
:param programmer_id: programmer id (e.g. 'avrisp')
:rtype: None
### Response:
def remove_programmer(programmer_id):
"""remove programmer.
:param programmer_id: programmer id (e.g. 'avrisp')
:rtype: None
"""
log.debug('remove %s', programmer_id)
lines = programmers_txt().lines()
lines = filter(
lambda x: not x.strip().startswith(programmer_id + '.'), lines)
programmers_txt().write_lines(lines) |
def logistic_regression(X, y, coef_only=False, alpha=0.05,
as_dataframe=True, remove_na=False, **kwargs):
"""(Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064]
"""
# Check that sklearn is installed
from pingouin.utils import _is_sklearn_installed
_is_sklearn_installed(raise_error=True)
from sklearn.linear_model import LogisticRegression
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist()
elif isinstance(X, pd.Series):
names = [X.name]
else:
names = []
assert 0 < alpha < 1
assert y.ndim == 1, 'y must be one-dimensional.'
# Convert to numpy array
X = np.asarray(X)
y = np.asarray(y)
# Add axis if only one-dimensional array
if X.ndim == 1:
X = X[..., np.newaxis]
# Check for NaN / Inf
if remove_na:
X, y = rm_na(X, y[..., np.newaxis], paired=True, axis='rows')
y = np.squeeze(y)
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, 'Target variable contains NaN or Inf. Please remove them.'
assert X_gd, 'Predictors contains NaN or Inf. Please remove them.'
# Check that X and y have same length
assert y.shape[0] == X.shape[0], 'X and y must have same number of samples'
# Check that y is binary
if np.unique(y).size != 2:
raise ValueError('Dependent variable must be binary.')
if not names:
names = ['x' + str(i + 1) for i in range(X.shape[1])]
# Add intercept in names
names.insert(0, "Intercept")
# Initialize and fit
if 'solver' not in kwargs:
kwargs['solver'] = 'lbfgs'
if 'multi_class' not in kwargs:
kwargs['multi_class'] = 'auto'
lom = LogisticRegression(**kwargs)
lom.fit(X, y)
coef = np.append(lom.intercept_, lom.coef_)
if coef_only:
return coef
# Design matrix -- add intercept
X_design = np.column_stack((np.ones(X.shape[0]), X))
n, p = X_design.shape
# Fisher Information Matrix
denom = (2 * (1 + np.cosh(lom.decision_function(X))))
denom = np.tile(denom, (p, 1)).T
fim = np.dot((X_design / denom).T, X_design)
crao = np.linalg.inv(fim)
# Standard error and Z-scores
se = np.sqrt(np.diag(crao))
z_scores = coef / se
# Two-tailed p-values
pval = np.array([2 * norm.sf(abs(z)) for z in z_scores])
# Confidence intervals
crit = norm.ppf(1 - alpha / 2)
ll = coef - crit * se
ul = coef + crit * se
# Rename CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Create dict
stats = {'names': names, 'coef': coef, 'se': se, 'z': z_scores,
'pval': pval, ll_name: ll, ul_name: ul}
if as_dataframe:
return pd.DataFrame.from_dict(stats)
else:
return stats | (Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064] | Below is the the instruction that describes the task:
### Input:
(Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064]
### Response:
def logistic_regression(X, y, coef_only=False, alpha=0.05,
as_dataframe=True, remove_na=False, **kwargs):
"""(Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064]
"""
# Check that sklearn is installed
from pingouin.utils import _is_sklearn_installed
_is_sklearn_installed(raise_error=True)
from sklearn.linear_model import LogisticRegression
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist()
elif isinstance(X, pd.Series):
names = [X.name]
else:
names = []
assert 0 < alpha < 1
assert y.ndim == 1, 'y must be one-dimensional.'
# Convert to numpy array
X = np.asarray(X)
y = np.asarray(y)
# Add axis if only one-dimensional array
if X.ndim == 1:
X = X[..., np.newaxis]
# Check for NaN / Inf
if remove_na:
X, y = rm_na(X, y[..., np.newaxis], paired=True, axis='rows')
y = np.squeeze(y)
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, 'Target variable contains NaN or Inf. Please remove them.'
assert X_gd, 'Predictors contains NaN or Inf. Please remove them.'
# Check that X and y have same length
assert y.shape[0] == X.shape[0], 'X and y must have same number of samples'
# Check that y is binary
if np.unique(y).size != 2:
raise ValueError('Dependent variable must be binary.')
if not names:
names = ['x' + str(i + 1) for i in range(X.shape[1])]
# Add intercept in names
names.insert(0, "Intercept")
# Initialize and fit
if 'solver' not in kwargs:
kwargs['solver'] = 'lbfgs'
if 'multi_class' not in kwargs:
kwargs['multi_class'] = 'auto'
lom = LogisticRegression(**kwargs)
lom.fit(X, y)
coef = np.append(lom.intercept_, lom.coef_)
if coef_only:
return coef
# Design matrix -- add intercept
X_design = np.column_stack((np.ones(X.shape[0]), X))
n, p = X_design.shape
# Fisher Information Matrix
denom = (2 * (1 + np.cosh(lom.decision_function(X))))
denom = np.tile(denom, (p, 1)).T
fim = np.dot((X_design / denom).T, X_design)
crao = np.linalg.inv(fim)
# Standard error and Z-scores
se = np.sqrt(np.diag(crao))
z_scores = coef / se
# Two-tailed p-values
pval = np.array([2 * norm.sf(abs(z)) for z in z_scores])
# Confidence intervals
crit = norm.ppf(1 - alpha / 2)
ll = coef - crit * se
ul = coef + crit * se
# Rename CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Create dict
stats = {'names': names, 'coef': coef, 'se': se, 'z': z_scores,
'pval': pval, ll_name: ll, ul_name: ul}
if as_dataframe:
return pd.DataFrame.from_dict(stats)
else:
return stats |
def _generate_iam_invoke_role_policy(self):
"""
Generate the policy for the IAM role used by API Gateway to invoke
the lambda function.
Terraform name: aws_iam_role.invoke_role
"""
invoke_pol = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Resource": ["*"],
"Action": ["lambda:InvokeFunction"]
}
]
}
self.tf_conf['resource']['aws_iam_role_policy']['invoke_policy'] = {
'name': self.resource_name + '-invoke',
'role': '${aws_iam_role.invoke_role.id}',
'policy': json.dumps(invoke_pol)
} | Generate the policy for the IAM role used by API Gateway to invoke
the lambda function.
Terraform name: aws_iam_role.invoke_role | Below is the the instruction that describes the task:
### Input:
Generate the policy for the IAM role used by API Gateway to invoke
the lambda function.
Terraform name: aws_iam_role.invoke_role
### Response:
def _generate_iam_invoke_role_policy(self):
"""
Generate the policy for the IAM role used by API Gateway to invoke
the lambda function.
Terraform name: aws_iam_role.invoke_role
"""
invoke_pol = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Resource": ["*"],
"Action": ["lambda:InvokeFunction"]
}
]
}
self.tf_conf['resource']['aws_iam_role_policy']['invoke_policy'] = {
'name': self.resource_name + '-invoke',
'role': '${aws_iam_role.invoke_role.id}',
'policy': json.dumps(invoke_pol)
} |
def _dusty_hosts_config(hosts_specs):
"""Return a string of all host rules required to match
the given spec. This string is wrapped in the Dusty hosts
header and footer so it can be easily removed later."""
rules = ''.join(['{} {}\n'.format(spec['forwarded_ip'], spec['host_address']) for spec in hosts_specs])
return config_file.create_config_section(rules) | Return a string of all host rules required to match
the given spec. This string is wrapped in the Dusty hosts
header and footer so it can be easily removed later. | Below is the the instruction that describes the task:
### Input:
Return a string of all host rules required to match
the given spec. This string is wrapped in the Dusty hosts
header and footer so it can be easily removed later.
### Response:
def _dusty_hosts_config(hosts_specs):
"""Return a string of all host rules required to match
the given spec. This string is wrapped in the Dusty hosts
header and footer so it can be easily removed later."""
rules = ''.join(['{} {}\n'.format(spec['forwarded_ip'], spec['host_address']) for spec in hosts_specs])
return config_file.create_config_section(rules) |
def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['maximum'] = self.maximum_value
if self.exclusive:
field_schema['exclusiveMaximum'] = True | Modify field schema. | Below is the the instruction that describes the task:
### Input:
Modify field schema.
### Response:
def modify_schema(self, field_schema):
"""Modify field schema."""
field_schema['maximum'] = self.maximum_value
if self.exclusive:
field_schema['exclusiveMaximum'] = True |
def normalize_url(url):
"""Return a normalized url with trailing and without leading slash.
>>> normalize_url(None)
'/'
>>> normalize_url('/')
'/'
>>> normalize_url('/foo/bar')
'/foo/bar'
>>> normalize_url('foo/bar')
'/foo/bar'
>>> normalize_url('/foo/bar/')
'/foo/bar'
"""
if not url or len(url) == 0:
return '/'
if not url.startswith('/'):
url = '/' + url
if len(url) > 1 and url.endswith('/'):
url = url[0:len(url) - 1]
return url | Return a normalized url with trailing and without leading slash.
>>> normalize_url(None)
'/'
>>> normalize_url('/')
'/'
>>> normalize_url('/foo/bar')
'/foo/bar'
>>> normalize_url('foo/bar')
'/foo/bar'
>>> normalize_url('/foo/bar/')
'/foo/bar' | Below is the the instruction that describes the task:
### Input:
Return a normalized url with trailing and without leading slash.
>>> normalize_url(None)
'/'
>>> normalize_url('/')
'/'
>>> normalize_url('/foo/bar')
'/foo/bar'
>>> normalize_url('foo/bar')
'/foo/bar'
>>> normalize_url('/foo/bar/')
'/foo/bar'
### Response:
def normalize_url(url):
"""Return a normalized url with trailing and without leading slash.
>>> normalize_url(None)
'/'
>>> normalize_url('/')
'/'
>>> normalize_url('/foo/bar')
'/foo/bar'
>>> normalize_url('foo/bar')
'/foo/bar'
>>> normalize_url('/foo/bar/')
'/foo/bar'
"""
if not url or len(url) == 0:
return '/'
if not url.startswith('/'):
url = '/' + url
if len(url) > 1 and url.endswith('/'):
url = url[0:len(url) - 1]
return url |
def visualize(G, settings, filename="dependencies", no_graphviz=False):
"""
Uses networkX to draw a graphviz dot file either (a) calls the
graphviz command "dot" to turn it into a SVG and remove the
dotfile (default), or (b) if no_graphviz is True, just output
the graphviz dot file
Args:
a NetworkX DiGraph
the settings dictionary
a filename (a default is provided
a flag indicating whether graphviz should *not* be called
Returns:
0 if everything worked
will cause fatal error on failure
"""
error = settings["error"]
if no_graphviz:
write_dot_file(G, filename)
return 0
write_dot_file(G, "tempdot")
renderer = "svg"
if re.search("\.jpg$", filename, re.IGNORECASE):
renderer = "jpg"
elif re.search("\.jpeg$", filename, re.IGNORECASE):
renderer = "jpg"
elif re.search("\.svg$", filename, re.IGNORECASE):
renderer = "svg"
elif re.search("\.png$", filename, re.IGNORECASE):
renderer = "png"
elif re.search("\.gif$", filename, re.IGNORECASE):
renderer = "gif"
elif re.search("\.ps$", filename, re.IGNORECASE):
renderer = "ps"
elif re.search("\.pdf$", filename, re.IGNORECASE):
renderer = "pdf"
else:
renderer = "svg"
filename += ".svg"
command = "dot -T{} tempdot -o {}".format(renderer, filename)
p = Popen(command, shell=True)
p.communicate()
if p.returncode:
errmes = "Either graphviz is not installed, or its not on PATH"
os.remove("tempdot")
error(errmes)
sys.exit(1)
os.remove("tempdot")
return 0 | Uses networkX to draw a graphviz dot file either (a) calls the
graphviz command "dot" to turn it into a SVG and remove the
dotfile (default), or (b) if no_graphviz is True, just output
the graphviz dot file
Args:
a NetworkX DiGraph
the settings dictionary
a filename (a default is provided
a flag indicating whether graphviz should *not* be called
Returns:
0 if everything worked
will cause fatal error on failure | Below is the the instruction that describes the task:
### Input:
Uses networkX to draw a graphviz dot file either (a) calls the
graphviz command "dot" to turn it into a SVG and remove the
dotfile (default), or (b) if no_graphviz is True, just output
the graphviz dot file
Args:
a NetworkX DiGraph
the settings dictionary
a filename (a default is provided
a flag indicating whether graphviz should *not* be called
Returns:
0 if everything worked
will cause fatal error on failure
### Response:
def visualize(G, settings, filename="dependencies", no_graphviz=False):
"""
Uses networkX to draw a graphviz dot file either (a) calls the
graphviz command "dot" to turn it into a SVG and remove the
dotfile (default), or (b) if no_graphviz is True, just output
the graphviz dot file
Args:
a NetworkX DiGraph
the settings dictionary
a filename (a default is provided
a flag indicating whether graphviz should *not* be called
Returns:
0 if everything worked
will cause fatal error on failure
"""
error = settings["error"]
if no_graphviz:
write_dot_file(G, filename)
return 0
write_dot_file(G, "tempdot")
renderer = "svg"
if re.search("\.jpg$", filename, re.IGNORECASE):
renderer = "jpg"
elif re.search("\.jpeg$", filename, re.IGNORECASE):
renderer = "jpg"
elif re.search("\.svg$", filename, re.IGNORECASE):
renderer = "svg"
elif re.search("\.png$", filename, re.IGNORECASE):
renderer = "png"
elif re.search("\.gif$", filename, re.IGNORECASE):
renderer = "gif"
elif re.search("\.ps$", filename, re.IGNORECASE):
renderer = "ps"
elif re.search("\.pdf$", filename, re.IGNORECASE):
renderer = "pdf"
else:
renderer = "svg"
filename += ".svg"
command = "dot -T{} tempdot -o {}".format(renderer, filename)
p = Popen(command, shell=True)
p.communicate()
if p.returncode:
errmes = "Either graphviz is not installed, or its not on PATH"
os.remove("tempdot")
error(errmes)
sys.exit(1)
os.remove("tempdot")
return 0 |
def check_row(state, index, missing_msg=None, expand_msg=None):
"""Zoom in on a particular row in the query result, by index.
After zooming in on a row, which is represented as a single-row query result,
you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution
query result have a match in the student query result.
Args:
index: index of the row to zoom in on (zero-based indexed).
missing_msg: if specified, this overrides the automatically generated feedback
message in case the row is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists LIMIT 5``
* student : ``SELECT artist_id, name FROM artists LIMIT 2``
We can write the following SCTs: ::
# fails, since row 3 at index 2 is not in the student result
Ex().check_row(2)
# passes, since row 2 at index 1 is in the student result
Ex().check_row(0)
"""
if missing_msg is None:
missing_msg = "The system wants to verify row {{index + 1}} of your query result, but couldn't find it. Have another look."
if expand_msg is None:
expand_msg = "Have another look at row {{index + 1}} in your query result. "
msg_kwargs = {"index": index}
# check that query returned something
has_result(state)
stu_res = state.student_result
sol_res = state.solution_result
n_sol = len(next(iter(sol_res.values())))
n_stu = len(next(iter(stu_res.values())))
if index >= n_sol:
raise BaseException(
"There are only {} rows in the solution query result, and you're trying to fetch the row at index {}".format(
n_sol, index
)
)
if index >= n_stu:
_msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs)
state.do_test(_msg)
return state.to_child(
append_message={"msg": expand_msg, "kwargs": msg_kwargs},
student_result={k: [v[index]] for k, v in stu_res.items()},
solution_result={k: [v[index]] for k, v in sol_res.items()},
) | Zoom in on a particular row in the query result, by index.
After zooming in on a row, which is represented as a single-row query result,
you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution
query result have a match in the student query result.
Args:
index: index of the row to zoom in on (zero-based indexed).
missing_msg: if specified, this overrides the automatically generated feedback
message in case the row is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists LIMIT 5``
* student : ``SELECT artist_id, name FROM artists LIMIT 2``
We can write the following SCTs: ::
# fails, since row 3 at index 2 is not in the student result
Ex().check_row(2)
# passes, since row 2 at index 1 is in the student result
Ex().check_row(0) | Below is the the instruction that describes the task:
### Input:
Zoom in on a particular row in the query result, by index.
After zooming in on a row, which is represented as a single-row query result,
you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution
query result have a match in the student query result.
Args:
index: index of the row to zoom in on (zero-based indexed).
missing_msg: if specified, this overrides the automatically generated feedback
message in case the row is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists LIMIT 5``
* student : ``SELECT artist_id, name FROM artists LIMIT 2``
We can write the following SCTs: ::
# fails, since row 3 at index 2 is not in the student result
Ex().check_row(2)
# passes, since row 2 at index 1 is in the student result
Ex().check_row(0)
### Response:
def check_row(state, index, missing_msg=None, expand_msg=None):
"""Zoom in on a particular row in the query result, by index.
After zooming in on a row, which is represented as a single-row query result,
you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution
query result have a match in the student query result.
Args:
index: index of the row to zoom in on (zero-based indexed).
missing_msg: if specified, this overrides the automatically generated feedback
message in case the row is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists LIMIT 5``
* student : ``SELECT artist_id, name FROM artists LIMIT 2``
We can write the following SCTs: ::
# fails, since row 3 at index 2 is not in the student result
Ex().check_row(2)
# passes, since row 2 at index 1 is in the student result
Ex().check_row(0)
"""
if missing_msg is None:
missing_msg = "The system wants to verify row {{index + 1}} of your query result, but couldn't find it. Have another look."
if expand_msg is None:
expand_msg = "Have another look at row {{index + 1}} in your query result. "
msg_kwargs = {"index": index}
# check that query returned something
has_result(state)
stu_res = state.student_result
sol_res = state.solution_result
n_sol = len(next(iter(sol_res.values())))
n_stu = len(next(iter(stu_res.values())))
if index >= n_sol:
raise BaseException(
"There are only {} rows in the solution query result, and you're trying to fetch the row at index {}".format(
n_sol, index
)
)
if index >= n_stu:
_msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs)
state.do_test(_msg)
return state.to_child(
append_message={"msg": expand_msg, "kwargs": msg_kwargs},
student_result={k: [v[index]] for k, v in stu_res.items()},
solution_result={k: [v[index]] for k, v in sol_res.items()},
) |
def replace(self, **kwargs):
"""
Return: a new :class:`SlashSeparatedCourseKey` with specific ``kwargs`` replacing
their corresponding values.
Using CourseLocator's replace function results in a mismatch of __init__ args and kwargs.
Replace tries to instantiate a SlashSeparatedCourseKey object with CourseLocator args and kwargs.
"""
# Deprecation value is hard coded as True in __init__ and therefore does not need to be passed through.
return SlashSeparatedCourseKey(
kwargs.pop('org', self.org),
kwargs.pop('course', self.course),
kwargs.pop('run', self.run),
**kwargs
) | Return: a new :class:`SlashSeparatedCourseKey` with specific ``kwargs`` replacing
their corresponding values.
Using CourseLocator's replace function results in a mismatch of __init__ args and kwargs.
Replace tries to instantiate a SlashSeparatedCourseKey object with CourseLocator args and kwargs. | Below is the the instruction that describes the task:
### Input:
Return: a new :class:`SlashSeparatedCourseKey` with specific ``kwargs`` replacing
their corresponding values.
Using CourseLocator's replace function results in a mismatch of __init__ args and kwargs.
Replace tries to instantiate a SlashSeparatedCourseKey object with CourseLocator args and kwargs.
### Response:
def replace(self, **kwargs):
"""
Return: a new :class:`SlashSeparatedCourseKey` with specific ``kwargs`` replacing
their corresponding values.
Using CourseLocator's replace function results in a mismatch of __init__ args and kwargs.
Replace tries to instantiate a SlashSeparatedCourseKey object with CourseLocator args and kwargs.
"""
# Deprecation value is hard coded as True in __init__ and therefore does not need to be passed through.
return SlashSeparatedCourseKey(
kwargs.pop('org', self.org),
kwargs.pop('course', self.course),
kwargs.pop('run', self.run),
**kwargs
) |
def dtype(self):
"""Pixel data type."""
try:
return self.data.dtype
except AttributeError:
return numpy.dtype('%s%d' % (self._sample_type, self._sample_bytes)) | Pixel data type. | Below is the the instruction that describes the task:
### Input:
Pixel data type.
### Response:
def dtype(self):
"""Pixel data type."""
try:
return self.data.dtype
except AttributeError:
return numpy.dtype('%s%d' % (self._sample_type, self._sample_bytes)) |
def reverse(view, *args, **kwargs):
'''
User-friendly reverse. Pass arguments and keyword arguments to Django's `reverse`
as `args` and `kwargs` arguments, respectively.
The special optional keyword argument `query` is a dictionary of query (or GET) parameters
that can be appended to the `reverse`d URL.
Example:
reverse('products:category', categoryId = 5, query = {'page': 2})
is equivalent to
django.core.urlresolvers.reverse('products:category', kwargs = {'categoryId': 5}) + '?page=2'
'''
if 'query' in kwargs:
query = kwargs.pop('query')
else:
query = None
base = urlresolvers.reverse(view, args = args, kwargs = kwargs)
if query:
return '{}?{}'.format(base, django.utils.http.urlencode(query))
else:
return base | User-friendly reverse. Pass arguments and keyword arguments to Django's `reverse`
as `args` and `kwargs` arguments, respectively.
The special optional keyword argument `query` is a dictionary of query (or GET) parameters
that can be appended to the `reverse`d URL.
Example:
reverse('products:category', categoryId = 5, query = {'page': 2})
is equivalent to
django.core.urlresolvers.reverse('products:category', kwargs = {'categoryId': 5}) + '?page=2' | Below is the the instruction that describes the task:
### Input:
User-friendly reverse. Pass arguments and keyword arguments to Django's `reverse`
as `args` and `kwargs` arguments, respectively.
The special optional keyword argument `query` is a dictionary of query (or GET) parameters
that can be appended to the `reverse`d URL.
Example:
reverse('products:category', categoryId = 5, query = {'page': 2})
is equivalent to
django.core.urlresolvers.reverse('products:category', kwargs = {'categoryId': 5}) + '?page=2'
### Response:
def reverse(view, *args, **kwargs):
'''
User-friendly reverse. Pass arguments and keyword arguments to Django's `reverse`
as `args` and `kwargs` arguments, respectively.
The special optional keyword argument `query` is a dictionary of query (or GET) parameters
that can be appended to the `reverse`d URL.
Example:
reverse('products:category', categoryId = 5, query = {'page': 2})
is equivalent to
django.core.urlresolvers.reverse('products:category', kwargs = {'categoryId': 5}) + '?page=2'
'''
if 'query' in kwargs:
query = kwargs.pop('query')
else:
query = None
base = urlresolvers.reverse(view, args = args, kwargs = kwargs)
if query:
return '{}?{}'.format(base, django.utils.http.urlencode(query))
else:
return base |
def hacking_assert_equal(logical_line, noqa):
r"""Check that self.assertEqual and self.assertNotEqual are used.
Okay: self.assertEqual(x, y)
Okay: self.assertNotEqual(x, y)
H204: self.assertTrue(x == y)
H204: self.assertTrue(x != y)
H204: self.assertFalse(x == y)
H204: self.assertFalse(x != y)
"""
if noqa:
return
methods = ['assertTrue', 'assertFalse']
for method in methods:
start = logical_line.find('.%s' % method) + 1
if start != 0:
break
else:
return
comparisons = [ast.Eq, ast.NotEq]
checker = AssertTrueFalseChecker(methods, comparisons)
checker.visit(ast.parse(logical_line))
if checker.error:
yield start, 'H204: Use assert(Not)Equal()' | r"""Check that self.assertEqual and self.assertNotEqual are used.
Okay: self.assertEqual(x, y)
Okay: self.assertNotEqual(x, y)
H204: self.assertTrue(x == y)
H204: self.assertTrue(x != y)
H204: self.assertFalse(x == y)
H204: self.assertFalse(x != y) | Below is the the instruction that describes the task:
### Input:
r"""Check that self.assertEqual and self.assertNotEqual are used.
Okay: self.assertEqual(x, y)
Okay: self.assertNotEqual(x, y)
H204: self.assertTrue(x == y)
H204: self.assertTrue(x != y)
H204: self.assertFalse(x == y)
H204: self.assertFalse(x != y)
### Response:
def hacking_assert_equal(logical_line, noqa):
r"""Check that self.assertEqual and self.assertNotEqual are used.
Okay: self.assertEqual(x, y)
Okay: self.assertNotEqual(x, y)
H204: self.assertTrue(x == y)
H204: self.assertTrue(x != y)
H204: self.assertFalse(x == y)
H204: self.assertFalse(x != y)
"""
if noqa:
return
methods = ['assertTrue', 'assertFalse']
for method in methods:
start = logical_line.find('.%s' % method) + 1
if start != 0:
break
else:
return
comparisons = [ast.Eq, ast.NotEq]
checker = AssertTrueFalseChecker(methods, comparisons)
checker.visit(ast.parse(logical_line))
if checker.error:
yield start, 'H204: Use assert(Not)Equal()' |
def delete_feed(self, pid):
"""Delete a feed, identified by its local id.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local identifier of your feed you want to delete
"""
logger.info("delete_feed(pid=\"%s\") [lid=%s]", pid, self.__lid)
return self.__delete_point(R_FEED, pid) | Delete a feed, identified by its local id.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local identifier of your feed you want to delete | Below is the the instruction that describes the task:
### Input:
Delete a feed, identified by its local id.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local identifier of your feed you want to delete
### Response:
def delete_feed(self, pid):
"""Delete a feed, identified by its local id.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local identifier of your feed you want to delete
"""
logger.info("delete_feed(pid=\"%s\") [lid=%s]", pid, self.__lid)
return self.__delete_point(R_FEED, pid) |
def send(self, url, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (url, data, headers)) | Spawn an async request to a remote webserver. | Below is the the instruction that describes the task:
### Input:
Spawn an async request to a remote webserver.
### Response:
def send(self, url, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (url, data, headers)) |
def team(self, name=None, id=None, is_hidden=False, **kwargs):
"""
Team of KE-chain.
Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id.
:param name: (optional) team name to filter
:type name: basestring or None
:param id: (optional) id of the user to filter
:type id: basestring or None
:param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden)
:type is_hidden: bool or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Team`
:raises NotFoundError: when a user could not be found
:raises MultipleFoundError: when more than a single user can be found
"""
_teams = self.teams(name=name, id=id, **kwargs)
if len(_teams) == 0:
raise NotFoundError("No team criteria matches")
if len(_teams) != 1:
raise MultipleFoundError("Multiple teams fit criteria")
return _teams[0] | Team of KE-chain.
Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id.
:param name: (optional) team name to filter
:type name: basestring or None
:param id: (optional) id of the user to filter
:type id: basestring or None
:param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden)
:type is_hidden: bool or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Team`
:raises NotFoundError: when a user could not be found
:raises MultipleFoundError: when more than a single user can be found | Below is the the instruction that describes the task:
### Input:
Team of KE-chain.
Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id.
:param name: (optional) team name to filter
:type name: basestring or None
:param id: (optional) id of the user to filter
:type id: basestring or None
:param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden)
:type is_hidden: bool or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Team`
:raises NotFoundError: when a user could not be found
:raises MultipleFoundError: when more than a single user can be found
### Response:
def team(self, name=None, id=None, is_hidden=False, **kwargs):
"""
Team of KE-chain.
Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id.
:param name: (optional) team name to filter
:type name: basestring or None
:param id: (optional) id of the user to filter
:type id: basestring or None
:param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden)
:type is_hidden: bool or None
:param kwargs: Additional filtering keyword=value arguments
:type kwargs: dict or None
:return: List of :class:`Team`
:raises NotFoundError: when a user could not be found
:raises MultipleFoundError: when more than a single user can be found
"""
_teams = self.teams(name=name, id=id, **kwargs)
if len(_teams) == 0:
raise NotFoundError("No team criteria matches")
if len(_teams) != 1:
raise MultipleFoundError("Multiple teams fit criteria")
return _teams[0] |
def _extend(self, newsub):
'''
Append a subclass (extension) after the base class. For parser internal use.
'''
current = self
while hasattr(current, '_sub'):
current = current._sub
_set(current, '_sub', newsub)
try:
object.__delattr__(self, '_extra')
except:
pass | Append a subclass (extension) after the base class. For parser internal use. | Below is the the instruction that describes the task:
### Input:
Append a subclass (extension) after the base class. For parser internal use.
### Response:
def _extend(self, newsub):
'''
Append a subclass (extension) after the base class. For parser internal use.
'''
current = self
while hasattr(current, '_sub'):
current = current._sub
_set(current, '_sub', newsub)
try:
object.__delattr__(self, '_extra')
except:
pass |
def subclass(self, klass):
"""True if the Class is a subclass of the given one."""
return bool(lib.EnvSubclassP(self._env, self._cls, klass._cls)) | True if the Class is a subclass of the given one. | Below is the the instruction that describes the task:
### Input:
True if the Class is a subclass of the given one.
### Response:
def subclass(self, klass):
"""True if the Class is a subclass of the given one."""
return bool(lib.EnvSubclassP(self._env, self._cls, klass._cls)) |
def put(self, item, *args, **kwargs):
"""Put an item into the cache, for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments. If ``timeout`` is specified as one
of the keyword arguments, the item will remain available
for retrieval for ``timeout`` seconds. If ``timeout`` is
`None` or not specified, the ``default_timeout`` for this
cache will be used. Specify a ``timeout`` of 0 (or ensure that
the ``default_timeout`` for this cache is 0) if this item is
not to be cached.
"""
if not self.enabled:
return
# Check for a timeout keyword, store and remove it.
timeout = kwargs.pop('timeout', None)
if timeout is None:
timeout = self.default_timeout
cache_key = self.make_key(args, kwargs)
# Store the item, along with the time at which it will expire
with self._cache_lock:
self._cache[cache_key] = (time() + timeout, item) | Put an item into the cache, for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments. If ``timeout`` is specified as one
of the keyword arguments, the item will remain available
for retrieval for ``timeout`` seconds. If ``timeout`` is
`None` or not specified, the ``default_timeout`` for this
cache will be used. Specify a ``timeout`` of 0 (or ensure that
the ``default_timeout`` for this cache is 0) if this item is
not to be cached. | Below is the the instruction that describes the task:
### Input:
Put an item into the cache, for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments. If ``timeout`` is specified as one
of the keyword arguments, the item will remain available
for retrieval for ``timeout`` seconds. If ``timeout`` is
`None` or not specified, the ``default_timeout`` for this
cache will be used. Specify a ``timeout`` of 0 (or ensure that
the ``default_timeout`` for this cache is 0) if this item is
not to be cached.
### Response:
def put(self, item, *args, **kwargs):
"""Put an item into the cache, for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments. If ``timeout`` is specified as one
of the keyword arguments, the item will remain available
for retrieval for ``timeout`` seconds. If ``timeout`` is
`None` or not specified, the ``default_timeout`` for this
cache will be used. Specify a ``timeout`` of 0 (or ensure that
the ``default_timeout`` for this cache is 0) if this item is
not to be cached.
"""
if not self.enabled:
return
# Check for a timeout keyword, store and remove it.
timeout = kwargs.pop('timeout', None)
if timeout is None:
timeout = self.default_timeout
cache_key = self.make_key(args, kwargs)
# Store the item, along with the time at which it will expire
with self._cache_lock:
self._cache[cache_key] = (time() + timeout, item) |
def add_arguments(self, parser):
"""
Add arguments to the command parser.
Uses argparse syntax. See documentation at
https://docs.python.org/3/library/argparse.html.
"""
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help="Output what we're going to do, but don't actually do it."
)
parser.add_argument(
'--task-name', '-t',
default=None,
help=u"Restrict cleanup to tasks matching the named task.",
)
parser.add_argument(
'--age', '-a',
type=int,
default=30,
help=u"Only delete tasks that have been resolved for at least the specified number of days (default: 30)",
) | Add arguments to the command parser.
Uses argparse syntax. See documentation at
https://docs.python.org/3/library/argparse.html. | Below is the the instruction that describes the task:
### Input:
Add arguments to the command parser.
Uses argparse syntax. See documentation at
https://docs.python.org/3/library/argparse.html.
### Response:
def add_arguments(self, parser):
"""
Add arguments to the command parser.
Uses argparse syntax. See documentation at
https://docs.python.org/3/library/argparse.html.
"""
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help="Output what we're going to do, but don't actually do it."
)
parser.add_argument(
'--task-name', '-t',
default=None,
help=u"Restrict cleanup to tasks matching the named task.",
)
parser.add_argument(
'--age', '-a',
type=int,
default=30,
help=u"Only delete tasks that have been resolved for at least the specified number of days (default: 30)",
) |
def render_mail_template(subject_template, body_template, context):
"""
Renders both the subject and body templates in the given context.
Returns a tuple (subject, body) of the result.
"""
try:
subject = strip_spaces(render_to_string(subject_template, context))
body = render_to_string(body_template, context)
finally:
pass
return subject, body | Renders both the subject and body templates in the given context.
Returns a tuple (subject, body) of the result. | Below is the the instruction that describes the task:
### Input:
Renders both the subject and body templates in the given context.
Returns a tuple (subject, body) of the result.
### Response:
def render_mail_template(subject_template, body_template, context):
"""
Renders both the subject and body templates in the given context.
Returns a tuple (subject, body) of the result.
"""
try:
subject = strip_spaces(render_to_string(subject_template, context))
body = render_to_string(body_template, context)
finally:
pass
return subject, body |
def use_strategy(new_strategy):
"""Force the use of a different strategy.
This is an alternative to setting default_strategy in the class definition.
"""
def wrapped_class(klass):
klass._meta.strategy = new_strategy
return klass
return wrapped_class | Force the use of a different strategy.
This is an alternative to setting default_strategy in the class definition. | Below is the the instruction that describes the task:
### Input:
Force the use of a different strategy.
This is an alternative to setting default_strategy in the class definition.
### Response:
def use_strategy(new_strategy):
"""Force the use of a different strategy.
This is an alternative to setting default_strategy in the class definition.
"""
def wrapped_class(klass):
klass._meta.strategy = new_strategy
return klass
return wrapped_class |
def cart_create(self, items, **kwargs):
"""CartCreate.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
It is not possible to create an empty cart!
example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:return:
An :class:`~.AmazonCart`.
"""
if isinstance(items, dict):
items = [items]
if len(items) > 10:
raise CartException("You can't add more than 10 items at once")
offer_id_key_template = 'Item.{0}.OfferListingId'
quantity_key_template = 'Item.{0}.Quantity'
for i, item in enumerate(items):
kwargs[offer_id_key_template.format(i)] = item['offer_id']
kwargs[quantity_key_template.format(i)] = item['quantity']
response = self.api.CartCreate(**kwargs)
root = objectify.fromstring(response)
return AmazonCart(root) | CartCreate.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
It is not possible to create an empty cart!
example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:return:
An :class:`~.AmazonCart`. | Below is the the instruction that describes the task:
### Input:
CartCreate.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
It is not possible to create an empty cart!
example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:return:
An :class:`~.AmazonCart`.
### Response:
def cart_create(self, items, **kwargs):
"""CartCreate.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
It is not possible to create an empty cart!
example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:return:
An :class:`~.AmazonCart`.
"""
if isinstance(items, dict):
items = [items]
if len(items) > 10:
raise CartException("You can't add more than 10 items at once")
offer_id_key_template = 'Item.{0}.OfferListingId'
quantity_key_template = 'Item.{0}.Quantity'
for i, item in enumerate(items):
kwargs[offer_id_key_template.format(i)] = item['offer_id']
kwargs[quantity_key_template.format(i)] = item['quantity']
response = self.api.CartCreate(**kwargs)
root = objectify.fromstring(response)
return AmazonCart(root) |
def mel_to_hz(mels, htk=False):
"""Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
"""
mels = np.asanyarray(mels)
if htk:
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if mels.ndim:
# If we have vector data, vectorize
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
# If we have scalar data, check directly
freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel))
return freqs | Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel | Below is the the instruction that describes the task:
### Input:
Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
### Response:
def mel_to_hz(mels, htk=False):
"""Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
"""
mels = np.asanyarray(mels)
if htk:
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if mels.ndim:
# If we have vector data, vectorize
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
# If we have scalar data, check directly
freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel))
return freqs |
def prepare_data(self):
"""Prepare widget data for template."""
result = {}
for field in self.fields:
data = self.data.get(field.name)
result[field.name] = field.prepare_data(data)
return result | Prepare widget data for template. | Below is the the instruction that describes the task:
### Input:
Prepare widget data for template.
### Response:
def prepare_data(self):
"""Prepare widget data for template."""
result = {}
for field in self.fields:
data = self.data.get(field.name)
result[field.name] = field.prepare_data(data)
return result |
def compute_ssm(X, metric="seuclidean"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
D /= D.max()
return 1 - D | Computes the self-similarity matrix of X. | Below is the the instruction that describes the task:
### Input:
Computes the self-similarity matrix of X.
### Response:
def compute_ssm(X, metric="seuclidean"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
D /= D.max()
return 1 - D |
def id_request(self, device_id):
"""Get the device for the ID. ID request can return device type (cat/subcat),
firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']"""
self.logger.info("\nid_request for device %s", device_id)
device_id = device_id.upper()
self.direct_command(device_id, '10', '00')
sleep(2)
status = self.get_buffer_status(device_id)
if not status:
sleep(1)
status = self.get_buffer_status(device_id)
return status | Get the device for the ID. ID request can return device type (cat/subcat),
firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid'] | Below is the the instruction that describes the task:
### Input:
Get the device for the ID. ID request can return device type (cat/subcat),
firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']
### Response:
def id_request(self, device_id):
"""Get the device for the ID. ID request can return device type (cat/subcat),
firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']"""
self.logger.info("\nid_request for device %s", device_id)
device_id = device_id.upper()
self.direct_command(device_id, '10', '00')
sleep(2)
status = self.get_buffer_status(device_id)
if not status:
sleep(1)
status = self.get_buffer_status(device_id)
return status |
def on_send(self, frame):
"""
Add the heartbeat header to the frame when connecting, and bump
next outbound heartbeat timestamp.
:param Frame frame: the Frame object
"""
if frame.cmd == CMD_CONNECT or frame.cmd == CMD_STOMP:
if self.heartbeats != (0, 0):
frame.headers[HDR_HEARTBEAT] = '%s,%s' % self.heartbeats
if self.next_outbound_heartbeat is not None:
self.next_outbound_heartbeat = monotonic() + self.send_sleep | Add the heartbeat header to the frame when connecting, and bump
next outbound heartbeat timestamp.
:param Frame frame: the Frame object | Below is the the instruction that describes the task:
### Input:
Add the heartbeat header to the frame when connecting, and bump
next outbound heartbeat timestamp.
:param Frame frame: the Frame object
### Response:
def on_send(self, frame):
"""
Add the heartbeat header to the frame when connecting, and bump
next outbound heartbeat timestamp.
:param Frame frame: the Frame object
"""
if frame.cmd == CMD_CONNECT or frame.cmd == CMD_STOMP:
if self.heartbeats != (0, 0):
frame.headers[HDR_HEARTBEAT] = '%s,%s' % self.heartbeats
if self.next_outbound_heartbeat is not None:
self.next_outbound_heartbeat = monotonic() + self.send_sleep |
def create_udf_node(name, fields):
"""Create a new UDF node type.
Parameters
----------
name : str
Then name of the UDF node
fields : OrderedDict
Mapping of class member name to definition
Returns
-------
result : type
A new BigQueryUDFNode subclass
"""
definition = next(_udf_name_cache[name])
external_name = '{}_{:d}'.format(name, definition)
return type(external_name, (BigQueryUDFNode,), fields) | Create a new UDF node type.
Parameters
----------
name : str
Then name of the UDF node
fields : OrderedDict
Mapping of class member name to definition
Returns
-------
result : type
A new BigQueryUDFNode subclass | Below is the the instruction that describes the task:
### Input:
Create a new UDF node type.
Parameters
----------
name : str
Then name of the UDF node
fields : OrderedDict
Mapping of class member name to definition
Returns
-------
result : type
A new BigQueryUDFNode subclass
### Response:
def create_udf_node(name, fields):
"""Create a new UDF node type.
Parameters
----------
name : str
Then name of the UDF node
fields : OrderedDict
Mapping of class member name to definition
Returns
-------
result : type
A new BigQueryUDFNode subclass
"""
definition = next(_udf_name_cache[name])
external_name = '{}_{:d}'.format(name, definition)
return type(external_name, (BigQueryUDFNode,), fields) |
def visit_Module(self, node):
""" Build a compilation unit. """
# build all types
deps = sorted(self.dependencies)
headers = [Include(os.path.join("pythonic", "include", *t) + ".hpp")
for t in deps]
headers += [Include(os.path.join("pythonic", *t) + ".hpp")
for t in deps]
decls_n_defns = [self.visit(stmt) for stmt in node.body]
decls, defns = zip(*[s for s in decls_n_defns if s])
nsbody = [s for ls in decls + defns for s in ls]
ns = Namespace(pythran_ward + self.passmanager.module_name, nsbody)
self.result = CompilationUnit(headers + [ns]) | Build a compilation unit. | Below is the the instruction that describes the task:
### Input:
Build a compilation unit.
### Response:
def visit_Module(self, node):
""" Build a compilation unit. """
# build all types
deps = sorted(self.dependencies)
headers = [Include(os.path.join("pythonic", "include", *t) + ".hpp")
for t in deps]
headers += [Include(os.path.join("pythonic", *t) + ".hpp")
for t in deps]
decls_n_defns = [self.visit(stmt) for stmt in node.body]
decls, defns = zip(*[s for s in decls_n_defns if s])
nsbody = [s for ls in decls + defns for s in ls]
ns = Namespace(pythran_ward + self.passmanager.module_name, nsbody)
self.result = CompilationUnit(headers + [ns]) |
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added | Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none) | Below is the the instruction that describes the task:
### Input:
Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
### Response:
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added |
def has_method(obj, name):
"""
Checks if object has a method with specified name.
:param obj: an object to introspect.
:param name: a name of the method to check.
:return: true if the object has the method and false if it doesn't.
"""
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Method name cannot be null")
name = name.lower()
for method_name in dir(obj):
if method_name.lower() != name:
continue
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
return True
return False | Checks if object has a method with specified name.
:param obj: an object to introspect.
:param name: a name of the method to check.
:return: true if the object has the method and false if it doesn't. | Below is the the instruction that describes the task:
### Input:
Checks if object has a method with specified name.
:param obj: an object to introspect.
:param name: a name of the method to check.
:return: true if the object has the method and false if it doesn't.
### Response:
def has_method(obj, name):
"""
Checks if object has a method with specified name.
:param obj: an object to introspect.
:param name: a name of the method to check.
:return: true if the object has the method and false if it doesn't.
"""
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Method name cannot be null")
name = name.lower()
for method_name in dir(obj):
if method_name.lower() != name:
continue
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
return True
return False |
def copy(self, deep=True):
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series, DataFrame or Panel
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self) | Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series, DataFrame or Panel
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object | Below is the the instruction that describes the task:
### Input:
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series, DataFrame or Panel
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
### Response:
def copy(self, deep=True):
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series, DataFrame or Panel
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self) |
def Shell(device, *command):
"""Runs a command on the device and prints the stdout.
Args:
command: Command to run on the target.
"""
if command:
return device.StreamingShell(' '.join(command))
else:
# Retrieve the initial terminal prompt to use as a delimiter for future reads
terminal_prompt = device.InteractiveShell()
print(terminal_prompt.decode('utf-8'))
# Accept user input in a loop and write that into the interactive shells stdin, then print output
while True:
cmd = input('> ')
if not cmd:
continue
elif cmd == 'exit':
break
else:
stdout = device.InteractiveShell(cmd, strip_cmd=True, delim=terminal_prompt, strip_delim=True)
if stdout:
if isinstance(stdout, bytes):
stdout = stdout.decode('utf-8')
print(stdout)
device.Close() | Runs a command on the device and prints the stdout.
Args:
command: Command to run on the target. | Below is the the instruction that describes the task:
### Input:
Runs a command on the device and prints the stdout.
Args:
command: Command to run on the target.
### Response:
def Shell(device, *command):
"""Runs a command on the device and prints the stdout.
Args:
command: Command to run on the target.
"""
if command:
return device.StreamingShell(' '.join(command))
else:
# Retrieve the initial terminal prompt to use as a delimiter for future reads
terminal_prompt = device.InteractiveShell()
print(terminal_prompt.decode('utf-8'))
# Accept user input in a loop and write that into the interactive shells stdin, then print output
while True:
cmd = input('> ')
if not cmd:
continue
elif cmd == 'exit':
break
else:
stdout = device.InteractiveShell(cmd, strip_cmd=True, delim=terminal_prompt, strip_delim=True)
if stdout:
if isinstance(stdout, bytes):
stdout = stdout.decode('utf-8')
print(stdout)
device.Close() |
def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
"""
if not length.shape[0].value:
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map(
# pylint: disable=g-long-lambda
lambda x: tf.data.Dataset.from_tensor_slices(
chunk_sequence(x, chunk_length, padding_value)))
num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle(num_sequences // 2)
dataset = dataset.batch(batch_size or num_sequences)
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = num_epochs * num_chunks // (batch_size or num_sequences)
return tf.scan(
# pylint: disable=g-long-lambda
lambda _1, index: consumer_fn(iterator.get_next()),
tf.range(num_batches), output_template, parallel_iterations=1) | Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer. | Below is the the instruction that describes the task:
### Input:
Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
### Response:
def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
"""
if not length.shape[0].value:
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map(
# pylint: disable=g-long-lambda
lambda x: tf.data.Dataset.from_tensor_slices(
chunk_sequence(x, chunk_length, padding_value)))
num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle(num_sequences // 2)
dataset = dataset.batch(batch_size or num_sequences)
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = num_epochs * num_chunks // (batch_size or num_sequences)
return tf.scan(
# pylint: disable=g-long-lambda
lambda _1, index: consumer_fn(iterator.get_next()),
tf.range(num_batches), output_template, parallel_iterations=1) |
def applyIndex(self, lst, right):
"""Apply a list to something else."""
if len(right) != 1:
raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, int):
return lst[right]
raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right)) | Apply a list to something else. | Below is the the instruction that describes the task:
### Input:
Apply a list to something else.
### Response:
def applyIndex(self, lst, right):
"""Apply a list to something else."""
if len(right) != 1:
raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, int):
return lst[right]
raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right)) |
def print_user(user):
"""
Prints information about the current user.
"""
email = user['email']
domain = user['account']['domain']
role = user['role']
print('You are logged-in to the "{0}" domain '
'as {1} with role {2}.'
.format(domain, email, role)) | Prints information about the current user. | Below is the the instruction that describes the task:
### Input:
Prints information about the current user.
### Response:
def print_user(user):
"""
Prints information about the current user.
"""
email = user['email']
domain = user['account']['domain']
role = user['role']
print('You are logged-in to the "{0}" domain '
'as {1} with role {2}.'
.format(domain, email, role)) |
def list_vms(self, allow_clone=False):
"""
Gets VirtualBox VM list.
"""
vbox_vms = []
result = yield from self.execute("list", ["vms"])
for line in result:
if len(line) == 0 or line[0] != '"' or line[-1:] != "}":
continue # Broken output (perhaps a carriage return in VM name)
vmname, _ = line.rsplit(' ', 1)
vmname = vmname.strip('"')
if vmname == "<inaccessible>":
continue # ignore inaccessible VMs
extra_data = yield from self.execute("getextradata", [vmname, "GNS3/Clone"])
if allow_clone or len(extra_data) == 0 or not extra_data[0].strip() == "Value: yes":
# get the amount of RAM
info_results = yield from self.execute("showvminfo", [vmname, "--machinereadable"])
ram = 0
for info in info_results:
try:
name, value = info.split('=', 1)
if name.strip() == "memory":
ram = int(value.strip())
break
except ValueError:
continue
vbox_vms.append({"vmname": vmname, "ram": ram})
return vbox_vms | Gets VirtualBox VM list. | Below is the the instruction that describes the task:
### Input:
Gets VirtualBox VM list.
### Response:
def list_vms(self, allow_clone=False):
"""
Gets VirtualBox VM list.
"""
vbox_vms = []
result = yield from self.execute("list", ["vms"])
for line in result:
if len(line) == 0 or line[0] != '"' or line[-1:] != "}":
continue # Broken output (perhaps a carriage return in VM name)
vmname, _ = line.rsplit(' ', 1)
vmname = vmname.strip('"')
if vmname == "<inaccessible>":
continue # ignore inaccessible VMs
extra_data = yield from self.execute("getextradata", [vmname, "GNS3/Clone"])
if allow_clone or len(extra_data) == 0 or not extra_data[0].strip() == "Value: yes":
# get the amount of RAM
info_results = yield from self.execute("showvminfo", [vmname, "--machinereadable"])
ram = 0
for info in info_results:
try:
name, value = info.split('=', 1)
if name.strip() == "memory":
ram = int(value.strip())
break
except ValueError:
continue
vbox_vms.append({"vmname": vmname, "ram": ram})
return vbox_vms |
def getReflexRuleSetup(self):
"""
Return a json dict with all the setup data necessary to build the
relations:
- Relations between methods and analysis services options.
- The current saved data
the functions returns:
{'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [{
'ResultText': 'Failed',
'ResultValue': '1', 'value': ''},
...
]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'saved_actions': {'rules': [
{'actions': [{'act_row_idx': 0,
'action': 'repeat',
'an_result_id': '',
'analyst': '',
'otherWS': current,
'setresultdiscrete': '',
'setresulton': 'original',
'setresultvalue': '',
'worksheettemplate': '70d48adfb34c4231a145f76a858e94cf',}],
'conditions': [{'analysisservice': 'd802cdbf1f4742c094d45997b1038f9c',
'and_or': 'no',
'cond_row_idx': 0,
'discreteresult': '',
'range0': '12',
'range1': '12'}],
'rulenumber': '1',
'trigger': 'submit'},...],
'method_id': '<method_uid>',
'method_tile': '<method_tile>',
'method_uid': '<method_uid>'
}
}
"""
relations = {}
# Getting all the methods from the system
pc = getToolByName(self, 'portal_catalog')
methods = [obj.getObject() for obj in pc(
portal_type='Method',
is_active=True)]
bsc = getToolByName(self, 'bika_setup_catalog')
for method in methods:
# Get the analysis services related to each method
an_servs_brains = bsc(
portal_type='AnalysisService',
getMethodUIDs={
"query": method.UID(),
"operator": "or"
})
analysiservices = {}
for analysiservice in an_servs_brains:
analysiservice = analysiservice.getObject()
# Getting the worksheet templates that could be used with the
# analysis, those worksheet templates are the ones without
# method and the ones with a method shared with the
# analysis service.
service_methods_uid = analysiservice.getAvailableMethodUIDs()
query_dict = {
'portal_type': 'WorksheetTemplate',
'is_active': True,
'sort_on': 'sortable_title',
'getMethodUID': {
"query": service_methods_uid + [''],
"operator": "or"
}
}
wst_brains = bsc(query_dict)
analysiservices[analysiservice.UID()] = {
'as_id': analysiservice.getId(),
'as_title': analysiservice.Title(),
'resultoptions':
analysiservice.getResultOptions()
if analysiservice.getResultOptions()
else [],
'wstoptions': [
(brain.UID, brain.Title) for brain in wst_brains]
}
# Make the json dict
relations[method.UID()] = {
'method_id': method.getId(),
'method_tile': method.Title(),
'analysisservices': analysiservices,
'as_keys': analysiservices.keys(),
}
# Get the data saved in the object
reflex_rule = self.aq_parent.aq_inner
saved_method = reflex_rule.getMethod()
relations['saved_actions'] = {
'method_uid': saved_method.UID() if
saved_method else '',
'method_id': saved_method.getId() if
saved_method else '',
'method_tile': saved_method.Title() if
saved_method else '',
'rules': reflex_rule.getReflexRules(),
}
return json.dumps(relations) | Return a json dict with all the setup data necessary to build the
relations:
- Relations between methods and analysis services options.
- The current saved data
the functions returns:
{'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [{
'ResultText': 'Failed',
'ResultValue': '1', 'value': ''},
...
]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'saved_actions': {'rules': [
{'actions': [{'act_row_idx': 0,
'action': 'repeat',
'an_result_id': '',
'analyst': '',
'otherWS': current,
'setresultdiscrete': '',
'setresulton': 'original',
'setresultvalue': '',
'worksheettemplate': '70d48adfb34c4231a145f76a858e94cf',}],
'conditions': [{'analysisservice': 'd802cdbf1f4742c094d45997b1038f9c',
'and_or': 'no',
'cond_row_idx': 0,
'discreteresult': '',
'range0': '12',
'range1': '12'}],
'rulenumber': '1',
'trigger': 'submit'},...],
'method_id': '<method_uid>',
'method_tile': '<method_tile>',
'method_uid': '<method_uid>'
}
} | Below is the the instruction that describes the task:
### Input:
Return a json dict with all the setup data necessary to build the
relations:
- Relations between methods and analysis services options.
- The current saved data
the functions returns:
{'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [{
'ResultText': 'Failed',
'ResultValue': '1', 'value': ''},
...
]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'saved_actions': {'rules': [
{'actions': [{'act_row_idx': 0,
'action': 'repeat',
'an_result_id': '',
'analyst': '',
'otherWS': current,
'setresultdiscrete': '',
'setresulton': 'original',
'setresultvalue': '',
'worksheettemplate': '70d48adfb34c4231a145f76a858e94cf',}],
'conditions': [{'analysisservice': 'd802cdbf1f4742c094d45997b1038f9c',
'and_or': 'no',
'cond_row_idx': 0,
'discreteresult': '',
'range0': '12',
'range1': '12'}],
'rulenumber': '1',
'trigger': 'submit'},...],
'method_id': '<method_uid>',
'method_tile': '<method_tile>',
'method_uid': '<method_uid>'
}
}
### Response:
def getReflexRuleSetup(self):
"""
Return a json dict with all the setup data necessary to build the
relations:
- Relations between methods and analysis services options.
- The current saved data
the functions returns:
{'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [{
'ResultText': 'Failed',
'ResultValue': '1', 'value': ''},
...
]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'<method_uid>': {
'analysisservices': {
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
'<as_uid>': {'as_id': '<as_id>',
'as_title':'<as_title>',
'resultoptions': [,,]}
},
'as_keys': ['<as_uid>', '<as_uid>'],
'method_id': '<method_id>',
'method_tile': '<method_tile>'
},
'saved_actions': {'rules': [
{'actions': [{'act_row_idx': 0,
'action': 'repeat',
'an_result_id': '',
'analyst': '',
'otherWS': current,
'setresultdiscrete': '',
'setresulton': 'original',
'setresultvalue': '',
'worksheettemplate': '70d48adfb34c4231a145f76a858e94cf',}],
'conditions': [{'analysisservice': 'd802cdbf1f4742c094d45997b1038f9c',
'and_or': 'no',
'cond_row_idx': 0,
'discreteresult': '',
'range0': '12',
'range1': '12'}],
'rulenumber': '1',
'trigger': 'submit'},...],
'method_id': '<method_uid>',
'method_tile': '<method_tile>',
'method_uid': '<method_uid>'
}
}
"""
relations = {}
# Getting all the methods from the system
pc = getToolByName(self, 'portal_catalog')
methods = [obj.getObject() for obj in pc(
portal_type='Method',
is_active=True)]
bsc = getToolByName(self, 'bika_setup_catalog')
for method in methods:
# Get the analysis services related to each method
an_servs_brains = bsc(
portal_type='AnalysisService',
getMethodUIDs={
"query": method.UID(),
"operator": "or"
})
analysiservices = {}
for analysiservice in an_servs_brains:
analysiservice = analysiservice.getObject()
# Getting the worksheet templates that could be used with the
# analysis, those worksheet templates are the ones without
# method and the ones with a method shared with the
# analysis service.
service_methods_uid = analysiservice.getAvailableMethodUIDs()
query_dict = {
'portal_type': 'WorksheetTemplate',
'is_active': True,
'sort_on': 'sortable_title',
'getMethodUID': {
"query": service_methods_uid + [''],
"operator": "or"
}
}
wst_brains = bsc(query_dict)
analysiservices[analysiservice.UID()] = {
'as_id': analysiservice.getId(),
'as_title': analysiservice.Title(),
'resultoptions':
analysiservice.getResultOptions()
if analysiservice.getResultOptions()
else [],
'wstoptions': [
(brain.UID, brain.Title) for brain in wst_brains]
}
# Make the json dict
relations[method.UID()] = {
'method_id': method.getId(),
'method_tile': method.Title(),
'analysisservices': analysiservices,
'as_keys': analysiservices.keys(),
}
# Get the data saved in the object
reflex_rule = self.aq_parent.aq_inner
saved_method = reflex_rule.getMethod()
relations['saved_actions'] = {
'method_uid': saved_method.UID() if
saved_method else '',
'method_id': saved_method.getId() if
saved_method else '',
'method_tile': saved_method.Title() if
saved_method else '',
'rules': reflex_rule.getReflexRules(),
}
return json.dumps(relations) |
def affine_respective_zoom_matrix(w_range=0.8, h_range=1.1):
"""Get affine transform matrix for zooming/scaling that height and width are changed independently.
OpenCV format, x is width.
Parameters
-----------
w_range : float or tuple of 2 floats
The zooming/scaling ratio of width, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
h_range : float or tuple of 2 floats
The zooming/scaling ratio of height, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
Returns
-------
numpy.array
An affine transform matrix.
"""
if isinstance(h_range, (float, int)):
zy = h_range
elif isinstance(h_range, tuple):
zy = np.random.uniform(h_range[0], h_range[1])
else:
raise Exception("h_range: float or tuple of 2 floats")
if isinstance(w_range, (float, int)):
zx = w_range
elif isinstance(w_range, tuple):
zx = np.random.uniform(w_range[0], w_range[1])
else:
raise Exception("w_range: float or tuple of 2 floats")
zoom_matrix = np.array([[zx, 0, 0], \
[0, zy, 0], \
[0, 0, 1]])
return zoom_matrix | Get affine transform matrix for zooming/scaling that height and width are changed independently.
OpenCV format, x is width.
Parameters
-----------
w_range : float or tuple of 2 floats
The zooming/scaling ratio of width, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
h_range : float or tuple of 2 floats
The zooming/scaling ratio of height, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
Returns
-------
numpy.array
An affine transform matrix. | Below is the the instruction that describes the task:
### Input:
Get affine transform matrix for zooming/scaling that height and width are changed independently.
OpenCV format, x is width.
Parameters
-----------
w_range : float or tuple of 2 floats
The zooming/scaling ratio of width, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
h_range : float or tuple of 2 floats
The zooming/scaling ratio of height, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
Returns
-------
numpy.array
An affine transform matrix.
### Response:
def affine_respective_zoom_matrix(w_range=0.8, h_range=1.1):
"""Get affine transform matrix for zooming/scaling that height and width are changed independently.
OpenCV format, x is width.
Parameters
-----------
w_range : float or tuple of 2 floats
The zooming/scaling ratio of width, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
h_range : float or tuple of 2 floats
The zooming/scaling ratio of height, greater than 1 means larger.
- float, a fixed ratio.
- tuple of 2 floats, randomly sample a value as the ratio between 2 values.
Returns
-------
numpy.array
An affine transform matrix.
"""
if isinstance(h_range, (float, int)):
zy = h_range
elif isinstance(h_range, tuple):
zy = np.random.uniform(h_range[0], h_range[1])
else:
raise Exception("h_range: float or tuple of 2 floats")
if isinstance(w_range, (float, int)):
zx = w_range
elif isinstance(w_range, tuple):
zx = np.random.uniform(w_range[0], w_range[1])
else:
raise Exception("w_range: float or tuple of 2 floats")
zoom_matrix = np.array([[zx, 0, 0], \
[0, zy, 0], \
[0, 0, 1]])
return zoom_matrix |
def parent_suite(self):
"""Get the current parent suite.
A parent suite exists when a context within a suite is active. That is,
during execution of a tool within a suite, or after a user has entered
an interactive shell in a suite context, for example via the command-
line syntax 'tool +i', where 'tool' is an alias in a suite.
Returns:
`Suite` object, or None if there is no current parent suite.
"""
if self.context and self.context.parent_suite_path:
return Suite.load(self.context.parent_suite_path)
return None | Get the current parent suite.
A parent suite exists when a context within a suite is active. That is,
during execution of a tool within a suite, or after a user has entered
an interactive shell in a suite context, for example via the command-
line syntax 'tool +i', where 'tool' is an alias in a suite.
Returns:
`Suite` object, or None if there is no current parent suite. | Below is the the instruction that describes the task:
### Input:
Get the current parent suite.
A parent suite exists when a context within a suite is active. That is,
during execution of a tool within a suite, or after a user has entered
an interactive shell in a suite context, for example via the command-
line syntax 'tool +i', where 'tool' is an alias in a suite.
Returns:
`Suite` object, or None if there is no current parent suite.
### Response:
def parent_suite(self):
"""Get the current parent suite.
A parent suite exists when a context within a suite is active. That is,
during execution of a tool within a suite, or after a user has entered
an interactive shell in a suite context, for example via the command-
line syntax 'tool +i', where 'tool' is an alias in a suite.
Returns:
`Suite` object, or None if there is no current parent suite.
"""
if self.context and self.context.parent_suite_path:
return Suite.load(self.context.parent_suite_path)
return None |
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]:
"""
Sort equations by dependence
"""
J = ca.jacobian(f, x)
nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf()
return {
'J': J,
'nblock': nblock,
'rowperm': rowperm,
'colperm': colperm,
'rowblock': rowblock,
'colblock': colblock,
'coarserow': coarserow,
'coarsecol': coarsecol
} | Sort equations by dependence | Below is the the instruction that describes the task:
### Input:
Sort equations by dependence
### Response:
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]:
"""
Sort equations by dependence
"""
J = ca.jacobian(f, x)
nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf()
return {
'J': J,
'nblock': nblock,
'rowperm': rowperm,
'colperm': colperm,
'rowblock': rowblock,
'colblock': colblock,
'coarserow': coarserow,
'coarsecol': coarsecol
} |
def get(self, instance, **kw):
"""Get the value of the field
"""
# Gracefully avoid programming errors in Computed fields
try:
return self._get(instance, **kw)
except AttributeError:
logger.error("Could not get the value of the computed field '{}'"
.format(self.get_field_name()))
return None | Get the value of the field | Below is the the instruction that describes the task:
### Input:
Get the value of the field
### Response:
def get(self, instance, **kw):
"""Get the value of the field
"""
# Gracefully avoid programming errors in Computed fields
try:
return self._get(instance, **kw)
except AttributeError:
logger.error("Could not get the value of the computed field '{}'"
.format(self.get_field_name()))
return None |
def validate_request(self, path, action, body=None, query=None):
"""Check if the given request is valid.
Validates the body and the query
# Rules to validate the BODY:
# Let's limit this to mime types that either contain 'text' or 'json'
# 1. if body is None, there must not be any required parameters in
# the given schema
# 2. if the mime type contains 'json', body must not be '', but can
# be {}
# 3. if the mime type contains 'text', body can be any string
# 4. if no mime type ('consumes') is given.. DISALLOW
# 5. if the body is empty ('' or {}), there must not be any required parameters
# 6. if there is something in the body, it must adhere to the given schema
# -> will call the validate body function
Args:
path: path of the request.
action: action of the request(get, post, delete...).
body: body of the request.
query: dict with the query parameters.
Returns:
True if the request is valid, False otherwise.
TODO:
- For every http method, we might want to have some general checks
before we go deeper into the parameters
- Check form data parameters
"""
path_name, path_spec = self.get_path_spec(path)
if path_spec is None: # reject unknown path
logging.warn("there is no path")
return False
if action not in path_spec.keys(): # reject unknown http method
logging.warn("this http method is unknown '{0}'".format(action))
return False
action_spec = path_spec[action]
# check general post body guidelines (body + mime type)
if action == 'post':
is_ok, msg = _validate_post_body(body, action_spec)
if not is_ok:
logging.warn("the general post body did not validate due to '{0}'".format(msg))
return False
# If the body is empty and it validated so far, we can return here
# unless there is something in the query parameters we need to check
body_is_empty = body in [None, {}, '']
if body_is_empty and query is None:
return True
# Check body parameters
is_ok, msg = self._validate_body_parameters(body, action_spec)
if not is_ok:
logging.warn("the parameters in the body did not validate due to '{0}'".format(msg))
return False
# Check query parameters
if query is not None and not self._validate_query_parameters(query, action_spec):
return False
return True | Check if the given request is valid.
Validates the body and the query
# Rules to validate the BODY:
# Let's limit this to mime types that either contain 'text' or 'json'
# 1. if body is None, there must not be any required parameters in
# the given schema
# 2. if the mime type contains 'json', body must not be '', but can
# be {}
# 3. if the mime type contains 'text', body can be any string
# 4. if no mime type ('consumes') is given.. DISALLOW
# 5. if the body is empty ('' or {}), there must not be any required parameters
# 6. if there is something in the body, it must adhere to the given schema
# -> will call the validate body function
Args:
path: path of the request.
action: action of the request(get, post, delete...).
body: body of the request.
query: dict with the query parameters.
Returns:
True if the request is valid, False otherwise.
TODO:
- For every http method, we might want to have some general checks
before we go deeper into the parameters
- Check form data parameters | Below is the the instruction that describes the task:
### Input:
Check if the given request is valid.
Validates the body and the query
# Rules to validate the BODY:
# Let's limit this to mime types that either contain 'text' or 'json'
# 1. if body is None, there must not be any required parameters in
# the given schema
# 2. if the mime type contains 'json', body must not be '', but can
# be {}
# 3. if the mime type contains 'text', body can be any string
# 4. if no mime type ('consumes') is given.. DISALLOW
# 5. if the body is empty ('' or {}), there must not be any required parameters
# 6. if there is something in the body, it must adhere to the given schema
# -> will call the validate body function
Args:
path: path of the request.
action: action of the request(get, post, delete...).
body: body of the request.
query: dict with the query parameters.
Returns:
True if the request is valid, False otherwise.
TODO:
- For every http method, we might want to have some general checks
before we go deeper into the parameters
- Check form data parameters
### Response:
def validate_request(self, path, action, body=None, query=None):
"""Check if the given request is valid.
Validates the body and the query
# Rules to validate the BODY:
# Let's limit this to mime types that either contain 'text' or 'json'
# 1. if body is None, there must not be any required parameters in
# the given schema
# 2. if the mime type contains 'json', body must not be '', but can
# be {}
# 3. if the mime type contains 'text', body can be any string
# 4. if no mime type ('consumes') is given.. DISALLOW
# 5. if the body is empty ('' or {}), there must not be any required parameters
# 6. if there is something in the body, it must adhere to the given schema
# -> will call the validate body function
Args:
path: path of the request.
action: action of the request(get, post, delete...).
body: body of the request.
query: dict with the query parameters.
Returns:
True if the request is valid, False otherwise.
TODO:
- For every http method, we might want to have some general checks
before we go deeper into the parameters
- Check form data parameters
"""
path_name, path_spec = self.get_path_spec(path)
if path_spec is None: # reject unknown path
logging.warn("there is no path")
return False
if action not in path_spec.keys(): # reject unknown http method
logging.warn("this http method is unknown '{0}'".format(action))
return False
action_spec = path_spec[action]
# check general post body guidelines (body + mime type)
if action == 'post':
is_ok, msg = _validate_post_body(body, action_spec)
if not is_ok:
logging.warn("the general post body did not validate due to '{0}'".format(msg))
return False
# If the body is empty and it validated so far, we can return here
# unless there is something in the query parameters we need to check
body_is_empty = body in [None, {}, '']
if body_is_empty and query is None:
return True
# Check body parameters
is_ok, msg = self._validate_body_parameters(body, action_spec)
if not is_ok:
logging.warn("the parameters in the body did not validate due to '{0}'".format(msg))
return False
# Check query parameters
if query is not None and not self._validate_query_parameters(query, action_spec):
return False
return True |
def _find_rule_no(self, mac):
"""Find rule number associated with a given mac."""
ipt_cmd = ['iptables', '-L', '--line-numbers']
cmdo = dsl.execute(ipt_cmd, self._root_helper, log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
rule_no = o.split()[0]
LOG.info('Found rule %(rule)s for %(mac)s.',
{'rule': rule_no, 'mac': mac})
return rule_no | Find rule number associated with a given mac. | Below is the the instruction that describes the task:
### Input:
Find rule number associated with a given mac.
### Response:
def _find_rule_no(self, mac):
"""Find rule number associated with a given mac."""
ipt_cmd = ['iptables', '-L', '--line-numbers']
cmdo = dsl.execute(ipt_cmd, self._root_helper, log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
rule_no = o.split()[0]
LOG.info('Found rule %(rule)s for %(mac)s.',
{'rule': rule_no, 'mac': mac})
return rule_no |
def modelsClearAll(self):
""" Delete all models from the models table
Parameters:
----------------------------------------------------------------
"""
self._logger.info('Deleting all rows from models table %r',
self.modelsTableName)
with ConnectionFactory.get() as conn:
query = 'DELETE FROM %s' % (self.modelsTableName)
conn.cursor.execute(query) | Delete all models from the models table
Parameters:
---------------------------------------------------------------- | Below is the the instruction that describes the task:
### Input:
Delete all models from the models table
Parameters:
----------------------------------------------------------------
### Response:
def modelsClearAll(self):
""" Delete all models from the models table
Parameters:
----------------------------------------------------------------
"""
self._logger.info('Deleting all rows from models table %r',
self.modelsTableName)
with ConnectionFactory.get() as conn:
query = 'DELETE FROM %s' % (self.modelsTableName)
conn.cursor.execute(query) |
def source(self, format='xml', accessible=False):
"""
Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json'
"""
if accessible:
return self.http.get('/wda/accessibleSource').value
return self.http.get('source?format='+format).value | Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json' | Below is the the instruction that describes the task:
### Input:
Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json'
### Response:
def source(self, format='xml', accessible=False):
"""
Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json'
"""
if accessible:
return self.http.get('/wda/accessibleSource').value
return self.http.get('source?format='+format).value |
def _init_metadata(self):
"""stub"""
super(LabelOrthoFacesAnswerFormRecord, self)._init_metadata()
self._face_values_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'face_values'),
'element_label': 'Orthographic Face Values',
'instructions': '',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
} | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_metadata(self):
"""stub"""
super(LabelOrthoFacesAnswerFormRecord, self)._init_metadata()
self._face_values_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'face_values'),
'element_label': 'Orthographic Face Values',
'instructions': '',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
} |
def enable_apt_repositories(prefix, url, version, repositories):
""" adds an apt repository """
with settings(hide('warnings', 'running', 'stdout'),
warn_only=False, capture=True):
sudo('apt-add-repository "%s %s %s %s"' % (prefix,
url,
version,
repositories))
with hide('running', 'stdout'):
output = sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update")
if 'Some index files failed to download' in output:
raise SystemExit(1)
else:
# if we didn't abort above, we should return True
return True | adds an apt repository | Below is the the instruction that describes the task:
### Input:
adds an apt repository
### Response:
def enable_apt_repositories(prefix, url, version, repositories):
""" adds an apt repository """
with settings(hide('warnings', 'running', 'stdout'),
warn_only=False, capture=True):
sudo('apt-add-repository "%s %s %s %s"' % (prefix,
url,
version,
repositories))
with hide('running', 'stdout'):
output = sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update")
if 'Some index files failed to download' in output:
raise SystemExit(1)
else:
# if we didn't abort above, we should return True
return True |
def plotres(psr,deleted=False,group=None,**kwargs):
"""Plot residuals, compute unweighted rms residual."""
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = (flagmask == flagval)
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))
P.xlabel('MJD'); P.ylabel('res [us]')
P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres)) | Plot residuals, compute unweighted rms residual. | Below is the the instruction that describes the task:
### Input:
Plot residuals, compute unweighted rms residual.
### Response:
def plotres(psr,deleted=False,group=None,**kwargs):
"""Plot residuals, compute unweighted rms residual."""
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = (flagmask == flagval)
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))
P.xlabel('MJD'); P.ylabel('res [us]')
P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres)) |
def has_activity(graph: BELGraph, node: BaseEntity) -> bool:
"""Return true if over any of the node's edges, it has a molecular activity."""
return _node_has_modifier(graph, node, ACTIVITY) | Return true if over any of the node's edges, it has a molecular activity. | Below is the the instruction that describes the task:
### Input:
Return true if over any of the node's edges, it has a molecular activity.
### Response:
def has_activity(graph: BELGraph, node: BaseEntity) -> bool:
"""Return true if over any of the node's edges, it has a molecular activity."""
return _node_has_modifier(graph, node, ACTIVITY) |
def thread( mafs, species ):
"""
Restrict an list of alignments to a given list of species by:
1) Removing components for any other species
2) Remove any columns containing all gaps
Example:
>>> import bx.align.maf
>>> block1 = bx.align.maf.from_string( '''
... a score=4964.0
... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA
... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT
... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc
... ''' )
>>> block2 = bx.align.maf.from_string( '''
... a score=9151.0
... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC
... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG
... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT
... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa
... ''' )
>>> mafs = [ block1, block2 ]
>>> threaded = [ t for t in thread( mafs, [ "hg18", "panTro1" ] ) ]
>>> len( threaded )
2
>>> print(threaded[0])
a score=0.0
s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
<BLANKLINE>
>>> print(threaded[1])
a score=0.0
s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC
s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT
<BLANKLINE>
"""
for m in mafs:
new_maf = deepcopy( m )
new_components = get_components_for_species( new_maf, species )
if new_components:
remove_all_gap_columns( new_components )
new_maf.components = new_components
new_maf.score = 0.0
new_maf.text_size = len(new_components[0].text)
yield new_maf | Restrict an list of alignments to a given list of species by:
1) Removing components for any other species
2) Remove any columns containing all gaps
Example:
>>> import bx.align.maf
>>> block1 = bx.align.maf.from_string( '''
... a score=4964.0
... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA
... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT
... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc
... ''' )
>>> block2 = bx.align.maf.from_string( '''
... a score=9151.0
... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC
... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG
... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT
... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa
... ''' )
>>> mafs = [ block1, block2 ]
>>> threaded = [ t for t in thread( mafs, [ "hg18", "panTro1" ] ) ]
>>> len( threaded )
2
>>> print(threaded[0])
a score=0.0
s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
<BLANKLINE>
>>> print(threaded[1])
a score=0.0
s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC
s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT
<BLANKLINE> | Below is the the instruction that describes the task:
### Input:
Restrict an list of alignments to a given list of species by:
1) Removing components for any other species
2) Remove any columns containing all gaps
Example:
>>> import bx.align.maf
>>> block1 = bx.align.maf.from_string( '''
... a score=4964.0
... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA
... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT
... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc
... ''' )
>>> block2 = bx.align.maf.from_string( '''
... a score=9151.0
... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC
... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG
... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT
... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa
... ''' )
>>> mafs = [ block1, block2 ]
>>> threaded = [ t for t in thread( mafs, [ "hg18", "panTro1" ] ) ]
>>> len( threaded )
2
>>> print(threaded[0])
a score=0.0
s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
<BLANKLINE>
>>> print(threaded[1])
a score=0.0
s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC
s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT
<BLANKLINE>
### Response:
def thread( mafs, species ):
"""
Restrict an list of alignments to a given list of species by:
1) Removing components for any other species
2) Remove any columns containing all gaps
Example:
>>> import bx.align.maf
>>> block1 = bx.align.maf.from_string( '''
... a score=4964.0
... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA
... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT
... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc
... ''' )
>>> block2 = bx.align.maf.from_string( '''
... a score=9151.0
... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC
... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG
... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT
... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa
... ''' )
>>> mafs = [ block1, block2 ]
>>> threaded = [ t for t in thread( mafs, [ "hg18", "panTro1" ] ) ]
>>> len( threaded )
2
>>> print(threaded[0])
a score=0.0
s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
<BLANKLINE>
>>> print(threaded[1])
a score=0.0
s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC
s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT
<BLANKLINE>
"""
for m in mafs:
new_maf = deepcopy( m )
new_components = get_components_for_species( new_maf, species )
if new_components:
remove_all_gap_columns( new_components )
new_maf.components = new_components
new_maf.score = 0.0
new_maf.text_size = len(new_components[0].text)
yield new_maf |
def get_component_tasks(self, component_id):
"""Returns the task ids allocated for the given component id"""
ret = []
for task_id, comp_id in self.task_to_component_map.items():
if comp_id == component_id:
ret.append(task_id)
return ret | Returns the task ids allocated for the given component id | Below is the the instruction that describes the task:
### Input:
Returns the task ids allocated for the given component id
### Response:
def get_component_tasks(self, component_id):
"""Returns the task ids allocated for the given component id"""
ret = []
for task_id, comp_id in self.task_to_component_map.items():
if comp_id == component_id:
ret.append(task_id)
return ret |
def check_stop_times(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stop_times``.
"""
table = "stop_times"
problems = []
# Preliminary checks
if feed.stop_times is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stop_times.copy().sort_values(["trip_id", "stop_sequence"])
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check arrival_time and departure_time
v = lambda x: pd.isnull(x) or valid_time(x)
for col in ["arrival_time", "departure_time"]:
problems = check_column(problems, table, f, col, v)
# Check that arrival and departure times exist for the first and last
# stop of each trip and for each timepoint.
# For feeds with many trips, iterating through the stop time rows is
# faster than uisg groupby.
if "timepoint" not in f.columns:
f["timepoint"] = np.nan # This will not mess up later timepoint check
indices = []
prev_tid = None
prev_atime = 1
prev_dtime = 1
for i, tid, atime, dtime, tp in f[
["trip_id", "arrival_time", "departure_time", "timepoint"]
].itertuples():
if tid != prev_tid:
# Check last stop of previous trip
if pd.isnull(prev_atime) or pd.isnull(prev_dtime):
indices.append(i - 1)
# Check first stop of current trip
if pd.isnull(atime) or pd.isnull(dtime):
indices.append(i)
elif tp == 1 and (pd.isnull(atime) or pd.isnull(dtime)):
# Failure at timepoint
indices.append(i)
prev_tid = tid
prev_atime = atime
prev_dtime = dtime
if indices:
problems.append(
[
"error",
"First/last/time point arrival/departure time missing",
table,
indices,
]
)
# Check stop_id
problems = check_column_linked_id(
problems, table, f, "stop_id", feed.stops
)
# Check for duplicated (trip_id, stop_sequence) pairs
cond = f[["trip_id", "stop_sequence"]].dropna().duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (trip_id, stop_sequence)"
)
# Check stop_headsign
problems = check_column(
problems, table, f, "stop_headsign", valid_str, column_required=False
)
# Check pickup_type and drop_off_type
for col in ["pickup_type", "drop_off_type"]:
v = lambda x: x in range(4)
problems = check_column(
problems, table, f, col, v, column_required=False
)
# Check if shape_dist_traveled decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_tid = None
prev_dist = -1
for i, tid, dist in g[["trip_id", "shape_dist_traveled"]].itertuples():
if tid == prev_tid and dist < prev_dist:
indices.append(i)
prev_tid = tid
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
# Check timepoint
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "timepoint", v, column_required=False
)
if include_warnings:
# Check for duplicated (trip_id, departure_time) pairs
cond = f[["trip_id", "departure_time"]].duplicated()
problems = check_table(
problems,
table,
f,
cond,
"Repeated pair (trip_id, departure_time)",
"warning",
)
return format_problems(problems, as_df=as_df) | Analog of :func:`check_agency` for ``feed.stop_times``. | Below is the the instruction that describes the task:
### Input:
Analog of :func:`check_agency` for ``feed.stop_times``.
### Response:
def check_stop_times(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stop_times``.
"""
table = "stop_times"
problems = []
# Preliminary checks
if feed.stop_times is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stop_times.copy().sort_values(["trip_id", "stop_sequence"])
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check arrival_time and departure_time
v = lambda x: pd.isnull(x) or valid_time(x)
for col in ["arrival_time", "departure_time"]:
problems = check_column(problems, table, f, col, v)
# Check that arrival and departure times exist for the first and last
# stop of each trip and for each timepoint.
# For feeds with many trips, iterating through the stop time rows is
# faster than uisg groupby.
if "timepoint" not in f.columns:
f["timepoint"] = np.nan # This will not mess up later timepoint check
indices = []
prev_tid = None
prev_atime = 1
prev_dtime = 1
for i, tid, atime, dtime, tp in f[
["trip_id", "arrival_time", "departure_time", "timepoint"]
].itertuples():
if tid != prev_tid:
# Check last stop of previous trip
if pd.isnull(prev_atime) or pd.isnull(prev_dtime):
indices.append(i - 1)
# Check first stop of current trip
if pd.isnull(atime) or pd.isnull(dtime):
indices.append(i)
elif tp == 1 and (pd.isnull(atime) or pd.isnull(dtime)):
# Failure at timepoint
indices.append(i)
prev_tid = tid
prev_atime = atime
prev_dtime = dtime
if indices:
problems.append(
[
"error",
"First/last/time point arrival/departure time missing",
table,
indices,
]
)
# Check stop_id
problems = check_column_linked_id(
problems, table, f, "stop_id", feed.stops
)
# Check for duplicated (trip_id, stop_sequence) pairs
cond = f[["trip_id", "stop_sequence"]].dropna().duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (trip_id, stop_sequence)"
)
# Check stop_headsign
problems = check_column(
problems, table, f, "stop_headsign", valid_str, column_required=False
)
# Check pickup_type and drop_off_type
for col in ["pickup_type", "drop_off_type"]:
v = lambda x: x in range(4)
problems = check_column(
problems, table, f, col, v, column_required=False
)
# Check if shape_dist_traveled decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_tid = None
prev_dist = -1
for i, tid, dist in g[["trip_id", "shape_dist_traveled"]].itertuples():
if tid == prev_tid and dist < prev_dist:
indices.append(i)
prev_tid = tid
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
# Check timepoint
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "timepoint", v, column_required=False
)
if include_warnings:
# Check for duplicated (trip_id, departure_time) pairs
cond = f[["trip_id", "departure_time"]].duplicated()
problems = check_table(
problems,
table,
f,
cond,
"Repeated pair (trip_id, departure_time)",
"warning",
)
return format_problems(problems, as_df=as_df) |
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
if 'decorate' in config:
depr("The 'decorate' parameter was renamed to 'apply'") # 0.9
plugins += makelist(config.pop('decorate'))
if config.pop('no_hooks', False):
depr("The no_hooks parameter is no longer used. Add 'hooks' to the"\
" list of skipped plugins instead.") # 0.9
skiplist.append('hooks')
static = config.get('static', False) # depr 0.9
def decorator(callback):
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
cfg = dict(rule=rule, method=verb, callback=callback,
name=name, app=self, config=config,
apply=plugins, skip=skiplist)
self.routes.append(cfg)
cfg['id'] = self.routes.index(cfg)
self.router.add(rule, verb, cfg['id'], name=name, static=static)
if DEBUG: self.ccache[cfg['id']] = self._build_callback(cfg)
return callback
return decorator(callback) if callback else decorator | A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`). | Below is the the instruction that describes the task:
### Input:
A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
### Response:
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
if 'decorate' in config:
depr("The 'decorate' parameter was renamed to 'apply'") # 0.9
plugins += makelist(config.pop('decorate'))
if config.pop('no_hooks', False):
depr("The no_hooks parameter is no longer used. Add 'hooks' to the"\
" list of skipped plugins instead.") # 0.9
skiplist.append('hooks')
static = config.get('static', False) # depr 0.9
def decorator(callback):
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
cfg = dict(rule=rule, method=verb, callback=callback,
name=name, app=self, config=config,
apply=plugins, skip=skiplist)
self.routes.append(cfg)
cfg['id'] = self.routes.index(cfg)
self.router.add(rule, verb, cfg['id'], name=name, static=static)
if DEBUG: self.ccache[cfg['id']] = self._build_callback(cfg)
return callback
return decorator(callback) if callback else decorator |
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image | Returns the QImage stored as the ImageResource with 'name'. | Below is the the instruction that describes the task:
### Input:
Returns the QImage stored as the ImageResource with 'name'.
### Response:
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image |
def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
"""
Retrieves a METAR string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str
"""
url = _BASE_METAR_URL.format(station=station_icao)
with requests.get(url) as resp:
if not resp.ok:
return f'unable to obtain METAR for station {station_icao}\n' \
f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \
f'for a list of valid stations', None
return None, resp.content.decode().split('\n')[1] | Retrieves a METAR string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str | Below is the the instruction that describes the task:
### Input:
Retrieves a METAR string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str
### Response:
def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
"""
Retrieves a METAR string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str
"""
url = _BASE_METAR_URL.format(station=station_icao)
with requests.get(url) as resp:
if not resp.ok:
return f'unable to obtain METAR for station {station_icao}\n' \
f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \
f'for a list of valid stations', None
return None, resp.content.decode().split('\n')[1] |
def _add_command(self, name, **parameters):
"""
Add a new command to the blueprint.
:param name: The command name
:type name: str
:param parameters: The command parameters
:type parameters: dict
:rtype: Fluent
"""
command = self._create_command(name, **parameters)
self._commands.append(command)
return command | Add a new command to the blueprint.
:param name: The command name
:type name: str
:param parameters: The command parameters
:type parameters: dict
:rtype: Fluent | Below is the the instruction that describes the task:
### Input:
Add a new command to the blueprint.
:param name: The command name
:type name: str
:param parameters: The command parameters
:type parameters: dict
:rtype: Fluent
### Response:
def _add_command(self, name, **parameters):
"""
Add a new command to the blueprint.
:param name: The command name
:type name: str
:param parameters: The command parameters
:type parameters: dict
:rtype: Fluent
"""
command = self._create_command(name, **parameters)
self._commands.append(command)
return command |
def browser(i):
"""
Input: {
(template) - use this web template
(repo_uoa) -
(module_uoa) -
(data_uoa) - view a given entry
(extra_url) - extra URL
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Check if ck-web is installed
r=find({'module_uoa':'module',
'data_uoa':'wfe'})
if r['return']>0:
if r['return']!=16: return r
out('Seems like ck-web repository is not installed (can\'t find wfe module)!')
out('Please, install it via "ck pull repo:ck-web" and try again!')
return {'return':0}
t=i.get('template','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
cid=''
if duoa!='' or muoa!='' or ruoa!='':
if ruoa!='': cid=ruoa+':'
if muoa!='': cid+=muoa+':'
if duoa!='': cid+=duoa
# Starting web service and asking to open page
return access({'action':'start', 'module_uoa':'web', 'browser':'yes',
'template':t, 'cid':cid, 'extra_url':i.get('extra_url','')}) | Input: {
(template) - use this web template
(repo_uoa) -
(module_uoa) -
(data_uoa) - view a given entry
(extra_url) - extra URL
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} | Below is the the instruction that describes the task:
### Input:
Input: {
(template) - use this web template
(repo_uoa) -
(module_uoa) -
(data_uoa) - view a given entry
(extra_url) - extra URL
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
### Response:
def browser(i):
"""
Input: {
(template) - use this web template
(repo_uoa) -
(module_uoa) -
(data_uoa) - view a given entry
(extra_url) - extra URL
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Check if ck-web is installed
r=find({'module_uoa':'module',
'data_uoa':'wfe'})
if r['return']>0:
if r['return']!=16: return r
out('Seems like ck-web repository is not installed (can\'t find wfe module)!')
out('Please, install it via "ck pull repo:ck-web" and try again!')
return {'return':0}
t=i.get('template','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
cid=''
if duoa!='' or muoa!='' or ruoa!='':
if ruoa!='': cid=ruoa+':'
if muoa!='': cid+=muoa+':'
if duoa!='': cid+=duoa
# Starting web service and asking to open page
return access({'action':'start', 'module_uoa':'web', 'browser':'yes',
'template':t, 'cid':cid, 'extra_url':i.get('extra_url','')}) |
def read_file(filename):
"""Read contents of the specified file.
Parameters:
-----------
filename : str
The name of the file to be read
Returns:
lines : list of str
The contents of the file, split by line
"""
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
return lines | Read contents of the specified file.
Parameters:
-----------
filename : str
The name of the file to be read
Returns:
lines : list of str
The contents of the file, split by line | Below is the the instruction that describes the task:
### Input:
Read contents of the specified file.
Parameters:
-----------
filename : str
The name of the file to be read
Returns:
lines : list of str
The contents of the file, split by line
### Response:
def read_file(filename):
"""Read contents of the specified file.
Parameters:
-----------
filename : str
The name of the file to be read
Returns:
lines : list of str
The contents of the file, split by line
"""
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
return lines |