code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def attention_lm_moe_memory_efficient():
"""Memory-efficient version."""
hparams = attention_lm_moe_large()
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams | Memory-efficient version. | Below is the the instruction that describes the task:
### Input:
Memory-efficient version.
### Response:
def attention_lm_moe_memory_efficient():
"""Memory-efficient version."""
hparams = attention_lm_moe_large()
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams |
def figure_rst(figure_list, sources_dir):
"""Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document
"""
figure_paths = [os.path.relpath(figure_path, sources_dir)
.replace(os.sep, '/').lstrip('/')
for figure_path in figure_list]
images_rst = ""
if len(figure_paths) == 1:
figure_name = figure_paths[0]
images_rst = SINGLE_IMAGE % figure_name
elif len(figure_paths) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_paths:
images_rst += HLIST_IMAGE_TEMPLATE % figure_name
return images_rst | Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document | Below is the the instruction that describes the task:
### Input:
Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document
### Response:
def figure_rst(figure_list, sources_dir):
"""Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document
"""
figure_paths = [os.path.relpath(figure_path, sources_dir)
.replace(os.sep, '/').lstrip('/')
for figure_path in figure_list]
images_rst = ""
if len(figure_paths) == 1:
figure_name = figure_paths[0]
images_rst = SINGLE_IMAGE % figure_name
elif len(figure_paths) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_paths:
images_rst += HLIST_IMAGE_TEMPLATE % figure_name
return images_rst |
def _stack_bands(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two rasters with non overlapping bands by stacking the bands.
"""
assert set(one.band_names).intersection(set(other.band_names)) == set()
# We raise an error in the bands are the same. See above.
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
# Apply "or" to the mask in the same way rasterio does, see
# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks
# In other words, mask the values that are already masked in either
# of the two rasters, since one mask per band is not supported
new_mask = np.ma.getmaskarray(one.image)[0] | np.ma.getmaskarray(other.image)[0]
# Concatenate the data along the band axis and apply the mask
new_image = np.ma.masked_array(
np.concatenate([
one.image.data,
other.image.data
]),
mask=[new_mask] * (one.image.shape[0] + other.image.shape[0])
)
new_bands = one.band_names + other.band_names
# We don't copy image and mask here, due to performance issues,
# this output should not use without eventually being copied
# In this context we are copying the object in the end of merge_all merge_first and merge
return _Raster(image=new_image, band_names=new_bands) | Merges two rasters with non overlapping bands by stacking the bands. | Below is the the instruction that describes the task:
### Input:
Merges two rasters with non overlapping bands by stacking the bands.
### Response:
def _stack_bands(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two rasters with non overlapping bands by stacking the bands.
"""
assert set(one.band_names).intersection(set(other.band_names)) == set()
# We raise an error in the bands are the same. See above.
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
# Apply "or" to the mask in the same way rasterio does, see
# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks
# In other words, mask the values that are already masked in either
# of the two rasters, since one mask per band is not supported
new_mask = np.ma.getmaskarray(one.image)[0] | np.ma.getmaskarray(other.image)[0]
# Concatenate the data along the band axis and apply the mask
new_image = np.ma.masked_array(
np.concatenate([
one.image.data,
other.image.data
]),
mask=[new_mask] * (one.image.shape[0] + other.image.shape[0])
)
new_bands = one.band_names + other.band_names
# We don't copy image and mask here, due to performance issues,
# this output should not use without eventually being copied
# In this context we are copying the object in the end of merge_all merge_first and merge
return _Raster(image=new_image, band_names=new_bands) |
def handle_abs(self):
"""Gets the state as the raw abolute numbers."""
# pylint: disable=no-member
x_raw = self.microbit.accelerometer.get_x()
y_raw = self.microbit.accelerometer.get_y()
x_abs = ('Absolute', 0x00, x_raw)
y_abs = ('Absolute', 0x01, y_raw)
return x_abs, y_abs | Gets the state as the raw abolute numbers. | Below is the the instruction that describes the task:
### Input:
Gets the state as the raw abolute numbers.
### Response:
def handle_abs(self):
"""Gets the state as the raw abolute numbers."""
# pylint: disable=no-member
x_raw = self.microbit.accelerometer.get_x()
y_raw = self.microbit.accelerometer.get_y()
x_abs = ('Absolute', 0x00, x_raw)
y_abs = ('Absolute', 0x01, y_raw)
return x_abs, y_abs |
def walk_processes(top, topname='top', topdown=True, ignoreFlag=False):
"""Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation
"""
if not ignoreFlag:
flag = topdown
else:
flag = True
proc = top
level = 0
if flag:
yield topname, proc, level
if len(proc.subprocess) > 0: # there are sub-processes
level += 1
for name, subproc in proc.subprocess.items():
for name2, subproc2, level2 in walk_processes(subproc,
topname=name,
topdown=subproc.topdown,
ignoreFlag=ignoreFlag):
yield name2, subproc2, level+level2
if not flag:
yield topname, proc, level | Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation | Below is the the instruction that describes the task:
### Input:
Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation
### Response:
def walk_processes(top, topname='top', topdown=True, ignoreFlag=False):
"""Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation
"""
if not ignoreFlag:
flag = topdown
else:
flag = True
proc = top
level = 0
if flag:
yield topname, proc, level
if len(proc.subprocess) > 0: # there are sub-processes
level += 1
for name, subproc in proc.subprocess.items():
for name2, subproc2, level2 in walk_processes(subproc,
topname=name,
topdown=subproc.topdown,
ignoreFlag=ignoreFlag):
yield name2, subproc2, level+level2
if not flag:
yield topname, proc, level |
def get_audit_log(self, begin_time=None, end_time=None):
"""
Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms = self._time_query_parms(begin_time, end_time)
uri = self.uri + '/operations/get-audit-log' + query_parms
result = self.manager.session.post(uri)
return result | Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | Below is the the instruction that describes the task:
### Input:
Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
### Response:
def get_audit_log(self, begin_time=None, end_time=None):
"""
Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms = self._time_query_parms(begin_time, end_time)
uri = self.uri + '/operations/get-audit-log' + query_parms
result = self.manager.session.post(uri)
return result |
def locks(self):
"""
Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records
"""
return self.execute(
sql.LOCKS.format(
pid_column=self.pid_column,
query_column=self.query_column
)
) | Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records | Below is the the instruction that describes the task:
### Input:
Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records
### Response:
def locks(self):
"""
Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records
"""
return self.execute(
sql.LOCKS.format(
pid_column=self.pid_column,
query_column=self.query_column
)
) |
def read_csr(csr):
'''
Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr
'''
csr = _get_request_obj(csr)
ret = {
# X509 Version 3 has a value of 2 in the field.
# Version 2 has a value of 1.
# https://tools.ietf.org/html/rfc5280#section-4.1.2.1
'Version': csr.get_version() + 1,
# Get size returns in bytes. The world thinks of key sizes in bits.
'Subject': _parse_subject(csr.get_subject()),
'Subject Hash': _dec2hex(csr.get_subject().as_hash()),
'Public Key Hash': hashlib.sha1(csr.get_pubkey().get_modulus()).hexdigest(),
'X509v3 Extensions': _get_csr_extensions(csr),
}
return ret | Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr | Below is the the instruction that describes the task:
### Input:
Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr
### Response:
def read_csr(csr):
'''
Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr
'''
csr = _get_request_obj(csr)
ret = {
# X509 Version 3 has a value of 2 in the field.
# Version 2 has a value of 1.
# https://tools.ietf.org/html/rfc5280#section-4.1.2.1
'Version': csr.get_version() + 1,
# Get size returns in bytes. The world thinks of key sizes in bits.
'Subject': _parse_subject(csr.get_subject()),
'Subject Hash': _dec2hex(csr.get_subject().as_hash()),
'Public Key Hash': hashlib.sha1(csr.get_pubkey().get_modulus()).hexdigest(),
'X509v3 Extensions': _get_csr_extensions(csr),
}
return ret |
def authenticate_credentials(self, request, access_token):
"""
Authenticate the request, given the access token.
"""
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('user')
# provider_now switches to timezone aware datetime when
# the oauth2_provider version supports to it.
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (user, token) | Authenticate the request, given the access token. | Below is the the instruction that describes the task:
### Input:
Authenticate the request, given the access token.
### Response:
def authenticate_credentials(self, request, access_token):
"""
Authenticate the request, given the access token.
"""
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('user')
# provider_now switches to timezone aware datetime when
# the oauth2_provider version supports to it.
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (user, token) |
def encoded_datastream(self):
'''Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content
'''
# return a generator of data to be uploaded to fedora
size = 0
if self.verify:
md5 = hashlib.md5()
leftover = None
while self.within_file:
content = self.get_next_section()
if content == BINARY_CONTENT_END:
if self.verify:
logger.info('Decoded content size %s (%s) MD5 %s',
size, humanize_file_size(size), md5.hexdigest())
self.within_file = False
elif self.within_file:
# if there was leftover binary content from the last chunk,
# add it to the content now
if leftover is not None:
content = b''.join([leftover, content])
leftover = None
try:
# decode method used by base64.decode
decoded_content = binascii.a2b_base64(content)
except binascii.Error:
# decoding can fail with a padding error when
# a line of encoded content runs across a read chunk
lines = content.split(b'\n')
# decode and yield all but the last line of encoded content
decoded_content = binascii.a2b_base64(b''.join(lines[:-1]))
# store the leftover to be decoded with the next chunk
leftover = lines[-1]
if decoded_content is not None:
if self.verify:
md5.update(decoded_content)
size += len(decoded_content)
yield decoded_content | Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content | Below is the the instruction that describes the task:
### Input:
Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content
### Response:
def encoded_datastream(self):
'''Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content
'''
# return a generator of data to be uploaded to fedora
size = 0
if self.verify:
md5 = hashlib.md5()
leftover = None
while self.within_file:
content = self.get_next_section()
if content == BINARY_CONTENT_END:
if self.verify:
logger.info('Decoded content size %s (%s) MD5 %s',
size, humanize_file_size(size), md5.hexdigest())
self.within_file = False
elif self.within_file:
# if there was leftover binary content from the last chunk,
# add it to the content now
if leftover is not None:
content = b''.join([leftover, content])
leftover = None
try:
# decode method used by base64.decode
decoded_content = binascii.a2b_base64(content)
except binascii.Error:
# decoding can fail with a padding error when
# a line of encoded content runs across a read chunk
lines = content.split(b'\n')
# decode and yield all but the last line of encoded content
decoded_content = binascii.a2b_base64(b''.join(lines[:-1]))
# store the leftover to be decoded with the next chunk
leftover = lines[-1]
if decoded_content is not None:
if self.verify:
md5.update(decoded_content)
size += len(decoded_content)
yield decoded_content |
def set_transfer_spec(self):
''' run the function to set the transfer spec on error set associated exception '''
_ret = False
try:
self._args.transfer_spec_func(self._args)
_ret = True
except Exception as ex:
self.notify_exception(AsperaTransferSpecError(ex), False)
return _ret | run the function to set the transfer spec on error set associated exception | Below is the the instruction that describes the task:
### Input:
run the function to set the transfer spec on error set associated exception
### Response:
def set_transfer_spec(self):
''' run the function to set the transfer spec on error set associated exception '''
_ret = False
try:
self._args.transfer_spec_func(self._args)
_ret = True
except Exception as ex:
self.notify_exception(AsperaTransferSpecError(ex), False)
return _ret |
def _determine_timeout(default_timeout, specified_timeout, retry):
"""Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
"""
if specified_timeout is DEFAULT:
specified_timeout = default_timeout
if specified_timeout is default_timeout:
# If timeout is the default and the default timeout is exponential and
# a non-default retry is specified, make sure the timeout's deadline
# matches the retry's. This handles the case where the user leaves
# the timeout default but specifies a lower deadline via the retry.
if (
retry
and retry is not DEFAULT
and isinstance(default_timeout, timeout.ExponentialTimeout)
):
return default_timeout.with_deadline(retry._deadline)
else:
return default_timeout
# If timeout is specified as a number instead of a Timeout instance,
# convert it to a ConstantTimeout.
if isinstance(specified_timeout, (int, float)):
return timeout.ConstantTimeout(specified_timeout)
else:
return specified_timeout | Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``. | Below is the the instruction that describes the task:
### Input:
Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
### Response:
def _determine_timeout(default_timeout, specified_timeout, retry):
"""Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
"""
if specified_timeout is DEFAULT:
specified_timeout = default_timeout
if specified_timeout is default_timeout:
# If timeout is the default and the default timeout is exponential and
# a non-default retry is specified, make sure the timeout's deadline
# matches the retry's. This handles the case where the user leaves
# the timeout default but specifies a lower deadline via the retry.
if (
retry
and retry is not DEFAULT
and isinstance(default_timeout, timeout.ExponentialTimeout)
):
return default_timeout.with_deadline(retry._deadline)
else:
return default_timeout
# If timeout is specified as a number instead of a Timeout instance,
# convert it to a ConstantTimeout.
if isinstance(specified_timeout, (int, float)):
return timeout.ConstantTimeout(specified_timeout)
else:
return specified_timeout |
def cli(ctx, board, fpga, pack, type, size, project_dir,
verbose, verbose_yosys, verbose_arachne):
"""Bitstream timing analysis."""
# Run scons
exit_code = SCons(project_dir).time({
'board': board,
'fpga': fpga,
'size': size,
'type': type,
'pack': pack,
'verbose': {
'all': verbose,
'yosys': verbose_yosys,
'arachne': verbose_arachne
}
})
ctx.exit(exit_code) | Bitstream timing analysis. | Below is the the instruction that describes the task:
### Input:
Bitstream timing analysis.
### Response:
def cli(ctx, board, fpga, pack, type, size, project_dir,
verbose, verbose_yosys, verbose_arachne):
"""Bitstream timing analysis."""
# Run scons
exit_code = SCons(project_dir).time({
'board': board,
'fpga': fpga,
'size': size,
'type': type,
'pack': pack,
'verbose': {
'all': verbose,
'yosys': verbose_yosys,
'arachne': verbose_arachne
}
})
ctx.exit(exit_code) |
def set_level(self, level):
"""
Set the logging level of this logger.
:param level: must be an int or a str.
"""
for handler in self.__coloredlogs_handlers:
handler.setLevel(level=level)
self.logger.setLevel(level=level) | Set the logging level of this logger.
:param level: must be an int or a str. | Below is the the instruction that describes the task:
### Input:
Set the logging level of this logger.
:param level: must be an int or a str.
### Response:
def set_level(self, level):
"""
Set the logging level of this logger.
:param level: must be an int or a str.
"""
for handler in self.__coloredlogs_handlers:
handler.setLevel(level=level)
self.logger.setLevel(level=level) |
def _is_path(instance, attribute, s, exists=True):
"Validator for path-yness"
if not s:
# allow False as a default
return
if exists:
if os.path.exists(s):
return
else:
raise OSError("path does not exist")
else:
# how do we tell if it's a path if it doesn't exist?
raise TypeError("Not a path?") | Validator for path-yness | Below is the the instruction that describes the task:
### Input:
Validator for path-yness
### Response:
def _is_path(instance, attribute, s, exists=True):
"Validator for path-yness"
if not s:
# allow False as a default
return
if exists:
if os.path.exists(s):
return
else:
raise OSError("path does not exist")
else:
# how do we tell if it's a path if it doesn't exist?
raise TypeError("Not a path?") |
def nucmer_hits_to_ref_and_qry_coords(cls, nucmer_hits, contig=None):
'''Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists'''
if contig is None:
ctg_coords = {key: [] for key in nucmer_hits.keys()}
else:
ctg_coords = {contig: []}
ref_coords = {}
for key in ctg_coords:
hits = copy.copy(nucmer_hits[key])
hits.sort(key=lambda x: len(x.ref_coords()))
if len(hits) > 1:
i = 0
while i < len(hits) - 1:
c1 = hits[i].ref_coords()
c2 = hits[i+1].ref_coords()
if c2.contains(c1):
hits.pop(i)
else:
i += 1
ref_coords[key] = [hit.ref_coords() for hit in hits]
ctg_coords[key] = [hit.qry_coords() for hit in hits]
pyfastaq.intervals.merge_overlapping_in_list(ref_coords[key])
pyfastaq.intervals.merge_overlapping_in_list(ctg_coords[key])
return ctg_coords, ref_coords | Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists | Below is the the instruction that describes the task:
### Input:
Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists
### Response:
def nucmer_hits_to_ref_and_qry_coords(cls, nucmer_hits, contig=None):
'''Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists'''
if contig is None:
ctg_coords = {key: [] for key in nucmer_hits.keys()}
else:
ctg_coords = {contig: []}
ref_coords = {}
for key in ctg_coords:
hits = copy.copy(nucmer_hits[key])
hits.sort(key=lambda x: len(x.ref_coords()))
if len(hits) > 1:
i = 0
while i < len(hits) - 1:
c1 = hits[i].ref_coords()
c2 = hits[i+1].ref_coords()
if c2.contains(c1):
hits.pop(i)
else:
i += 1
ref_coords[key] = [hit.ref_coords() for hit in hits]
ctg_coords[key] = [hit.qry_coords() for hit in hits]
pyfastaq.intervals.merge_overlapping_in_list(ref_coords[key])
pyfastaq.intervals.merge_overlapping_in_list(ctg_coords[key])
return ctg_coords, ref_coords |
def _get_capabilities(self):
"""Get the servers NETCONF capabilities.
:return: List of server capabilities.
"""
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities | Get the servers NETCONF capabilities.
:return: List of server capabilities. | Below is the the instruction that describes the task:
### Input:
Get the servers NETCONF capabilities.
:return: List of server capabilities.
### Response:
def _get_capabilities(self):
"""Get the servers NETCONF capabilities.
:return: List of server capabilities.
"""
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities |
def set_loop_points(self, start_sample=-1, end_sample=0):
'''Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at
'''
lib.SetVoiceLoopPoints(self._handle, start_sample, end_sample) | Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at | Below is the the instruction that describes the task:
### Input:
Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at
### Response:
def set_loop_points(self, start_sample=-1, end_sample=0):
'''Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at
'''
lib.SetVoiceLoopPoints(self._handle, start_sample, end_sample) |
def _compute_all_features(self):
"""Computes all the features (beatsync, framesync) from the audio."""
# Read actual audio waveform
self._audio, _ = librosa.load(self.file_struct.audio_file,
sr=self.sr)
# Get duration of audio file
self.dur = len(self._audio) / float(self.sr)
# Compute actual features
self._framesync_features = self.compute_features()
# Compute framesync times
self._compute_framesync_times()
# Compute/Read beats
self._est_beats_times, self._est_beats_frames = self.estimate_beats()
self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats()
# Beat-Synchronize
pad = True # Always append to the end of the features
self._est_beatsync_features, self._est_beatsync_times = \
self.compute_beat_sync_features(self._est_beats_frames,
self._est_beats_times, pad)
self._ann_beatsync_features, self._ann_beatsync_times = \
self.compute_beat_sync_features(self._ann_beats_frames,
self._ann_beats_times, pad) | Computes all the features (beatsync, framesync) from the audio. | Below is the the instruction that describes the task:
### Input:
Computes all the features (beatsync, framesync) from the audio.
### Response:
def _compute_all_features(self):
"""Computes all the features (beatsync, framesync) from the audio."""
# Read actual audio waveform
self._audio, _ = librosa.load(self.file_struct.audio_file,
sr=self.sr)
# Get duration of audio file
self.dur = len(self._audio) / float(self.sr)
# Compute actual features
self._framesync_features = self.compute_features()
# Compute framesync times
self._compute_framesync_times()
# Compute/Read beats
self._est_beats_times, self._est_beats_frames = self.estimate_beats()
self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats()
# Beat-Synchronize
pad = True # Always append to the end of the features
self._est_beatsync_features, self._est_beatsync_times = \
self.compute_beat_sync_features(self._est_beats_frames,
self._est_beats_times, pad)
self._ann_beatsync_features, self._ann_beatsync_times = \
self.compute_beat_sync_features(self._ann_beats_frames,
self._ann_beats_times, pad) |
def aggregate_by_index(self, function, level=0):
"""
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
"""
result = self._map_by_index(function, level=level)
return result.map(lambda v: array(v), index=result.index) | Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int. | Below is the the instruction that describes the task:
### Input:
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
### Response:
def aggregate_by_index(self, function, level=0):
"""
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
"""
result = self._map_by_index(function, level=level)
return result.map(lambda v: array(v), index=result.index) |
def association(self, group_xid):
"""Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate.
"""
association = {'groupXid': group_xid}
self._indicator_data.setdefault('associatedGroups', []).append(association) | Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate. | Below is the the instruction that describes the task:
### Input:
Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate.
### Response:
def association(self, group_xid):
"""Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate.
"""
association = {'groupXid': group_xid}
self._indicator_data.setdefault('associatedGroups', []).append(association) |
def _get_timezone(self, root):
"""Find timezone informatation on bottom of the page."""
tz_str = root.xpath('//div[@class="smallfont" and @align="center"]')[0].text
hours = int(self._tz_re.search(tz_str).group(1))
return tzoffset(tz_str, hours * 60) | Find timezone informatation on bottom of the page. | Below is the the instruction that describes the task:
### Input:
Find timezone informatation on bottom of the page.
### Response:
def _get_timezone(self, root):
"""Find timezone informatation on bottom of the page."""
tz_str = root.xpath('//div[@class="smallfont" and @align="center"]')[0].text
hours = int(self._tz_re.search(tz_str).group(1))
return tzoffset(tz_str, hours * 60) |
def _erase_card(self, number):
"""Destroy cards with this or higher number."""
with self._lock:
if number < (len(self.cards) - 1):
self._erase_card(number + 1)
if number > (len(self.cards) - 1):
return
max_cards_horiz = int(curses.COLS / 35)
obliterate = curses.newwin(
6,
35,
7 + 6 * (number // max_cards_horiz),
35 * (number % max_cards_horiz),
)
obliterate.erase()
obliterate.noutrefresh()
del self.cards[number] | Destroy cards with this or higher number. | Below is the the instruction that describes the task:
### Input:
Destroy cards with this or higher number.
### Response:
def _erase_card(self, number):
"""Destroy cards with this or higher number."""
with self._lock:
if number < (len(self.cards) - 1):
self._erase_card(number + 1)
if number > (len(self.cards) - 1):
return
max_cards_horiz = int(curses.COLS / 35)
obliterate = curses.newwin(
6,
35,
7 + 6 * (number // max_cards_horiz),
35 * (number % max_cards_horiz),
)
obliterate.erase()
obliterate.noutrefresh()
del self.cards[number] |
def inspect_task(self, task):
"""
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True) | Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def inspect_task(self, task):
"""
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True) |
def config_name_from_full_name(full_name):
"""Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
"""
projects, _, configs, result = full_name.split("/")
if projects != "projects" or configs != "configs":
raise ValueError(
"Unexpected format of resource",
full_name,
'Expected "projects/{proj}/configs/{cfg}"',
)
return result | Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format | Below is the the instruction that describes the task:
### Input:
Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
### Response:
def config_name_from_full_name(full_name):
"""Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
"""
projects, _, configs, result = full_name.split("/")
if projects != "projects" or configs != "configs":
raise ValueError(
"Unexpected format of resource",
full_name,
'Expected "projects/{proj}/configs/{cfg}"',
)
return result |
def golfclap(rest):
"Clap for something"
clapv = random.choice(phrases.clapvl)
adv = random.choice(phrases.advl)
adj = random.choice(phrases.adjl)
if rest:
clapee = rest.strip()
karma.Karma.store.change(clapee, 1)
return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj)
return "/me claps %s, %s %s." % (clapv, adv, adj) | Clap for something | Below is the the instruction that describes the task:
### Input:
Clap for something
### Response:
def golfclap(rest):
"Clap for something"
clapv = random.choice(phrases.clapvl)
adv = random.choice(phrases.advl)
adj = random.choice(phrases.adjl)
if rest:
clapee = rest.strip()
karma.Karma.store.change(clapee, 1)
return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj)
return "/me claps %s, %s %s." % (clapv, adv, adj) |
def exc_thrown_by_descriptor():
"""Return True if the last exception was thrown by a
Descriptor instance.
"""
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
# relying on naming convention to get the object that threw
# the exception
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False | Return True if the last exception was thrown by a
Descriptor instance. | Below is the the instruction that describes the task:
### Input:
Return True if the last exception was thrown by a
Descriptor instance.
### Response:
def exc_thrown_by_descriptor():
"""Return True if the last exception was thrown by a
Descriptor instance.
"""
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
# relying on naming convention to get the object that threw
# the exception
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False |
def AnnotateBED(bed, GTF, genome_file, bedcols=None, promoter=[1000,200]):
"""
Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column.
"""
if type(bed) == type("string"):
bed=pd.read_table(bed,header=None)
bed.columns=bedcols.split(",")
print("Reading GTF file.")
sys.stdout.flush()
GTF=readGTF(GTF)
GTF["gene_name"]=retrieve_GTF_field("gene_name", GTF)
GTF["gene_id"]=retrieve_GTF_field("gene_id", GTF)
GTF["gene_name"]=GTF["gene_name"]+"/"+GTF["gene_id"]
GTF=GTF.drop(["gene_id"],axis=1)
print("Generating promoters annotation.")
sys.stdout.flush()
promoters=GTF[GTF["feature"]=="transcript"]
promoters_plus=promoters[promoters["strand"]=="+"]
promoters_minus=promoters[promoters["strand"]=="-"]
upstream=promoter[0]
downstream=promoter[1]
promoters_plus.loc[:,"promoter_start"]=promoters_plus.loc[:,"start"].astype(int)-upstream
promoters_plus.loc[:,"promoter_end"]=promoters_plus.loc[:,"start"].astype(int)+downstream
promoters_minus.loc[:,"promoter_start"]=promoters_minus["end"].astype(int)-downstream
promoters_minus.loc[:,"promoter_end"]=promoters_minus["end"].astype(int)+upstream
promoters=pd.concat([promoters_plus,promoters_minus])
promoters=promoters[["seqname","feature","promoter_start","promoter_end","gene_name"]]
promoters.columns=["seqname","feature","start","end","gene_name"]
promoters.loc[:,"feature"]="promoter"
promoters.drop_duplicates(inplace=True)
promoters.reset_index(inplace=True, drop=True)
chr_sizes=pd.read_table(genome_file,header=None)
chr_sizes.columns=["seqname","size"]
chr_sizes.loc[:,"seqname"]=chr_sizes["seqname"].astype(str)
promoters.loc[:,"seqname"]=promoters["seqname"].astype(str)
promoters=pd.merge(promoters,chr_sizes,how="left",on=["seqname"])
def CorrectStart(df):
s=df["start"]
if s < 0:
s=0
return s
def CorrectEnd(df):
s=df["end"]
e=df["size"]
if s > e:
s=e
return s
promoters.loc[:,"start"]=promoters.apply(CorrectStart,axis=1)
promoters.loc[:,"end"]=promoters.apply(CorrectEnd,axis=1)
promoters.drop(["size"],axis=1, inplace=True)
GTFs=GTF[["seqname","feature","start","end","gene_name"]]
GTFs=GTFs[ GTFs["feature"]!= "gene"]
GTFs.drop_duplicates(inplace=True)
GTFs.reset_index(inplace=True, drop=True)
GTFs=pd.concat([GTFs,promoters])
def NewName(df):
name=df["gene_name"]
feature=df["feature"]
if feature == "transcript":
res=name
else:
res=name+":"+feature
return res
GTFs.loc[:,"gene_name"]=GTFs.apply(NewName, axis=1)
GTFs=GTFs[["seqname","start","end","gene_name"]]
print( "Intersecting annotation tables and bed." )
sys.stdout.flush()
refGTF=dfTObedtool(GTFs)
pos=dfTObedtool(bed)
colsGTF=GTFs.columns.tolist()
newCols=bed.columns.tolist()
for f in colsGTF:
newCols.append(f+"_")
newCols_=[ s for s in newCols if s not in ["seqname_","start_", "end_"]]
pos=pos.intersect(refGTF, loj=True)
pos=pd.read_table(pos.fn , names=newCols)
pos=pos[newCols_]
print("Merging features.")
sys.stdout.flush()
def GetFeature(x):
if ":" in x:
res=x.split(":")[1]
else:
res=np.nan
return res
def GetName(x):
if ":" in x:
res=x.split(":")[0]
elif type(x) == type("string"):
if x != ".":
res=x
else:
res=np.nan
else:
res=np.nan
return res
pos["gene_feature_"]=pos["gene_name_"].apply( lambda x: GetFeature(x) )
pos["gene_name_"]=pos["gene_name_"].apply( lambda x: GetName(x) )
refcol=pos.columns.tolist()
refcol=[ s for s in refcol if s != "gene_feature_" ]
def CombineAnn(df):
def JOIN(x):
return ', '.join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( gene_feature_ = JOIN("gene_feature_") ) )
pos_=pos.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
def MergeNameFeatures(df):
name=df["gene_name_"]
feature=df["gene_feature_"]
if (type(name) == type("string")) & (name != ".") :
if type(feature) == type("string"):
if len(feature) > 0:
res=name+": "+feature
else:
res=name
else:
res=name
else:
res=np.nan
return res
pos_["annotated_gene_features"]=pos_.apply(MergeNameFeatures,axis=1)
pos_=pos_.drop(["gene_name_","gene_feature_"],axis=1)
def CombineAnn(df):
def JOIN(x):
return '; '.join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( annotated_gene_features = JOIN("annotated_gene_features") ) )
refcol=[ s for s in refcol if s != "gene_name_" ]
pos_=pos_.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
return pos_ | Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column. | Below is the the instruction that describes the task:
### Input:
Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column.
### Response:
def AnnotateBED(bed, GTF, genome_file, bedcols=None, promoter=[1000,200]):
"""
Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column.
"""
if type(bed) == type("string"):
bed=pd.read_table(bed,header=None)
bed.columns=bedcols.split(",")
print("Reading GTF file.")
sys.stdout.flush()
GTF=readGTF(GTF)
GTF["gene_name"]=retrieve_GTF_field("gene_name", GTF)
GTF["gene_id"]=retrieve_GTF_field("gene_id", GTF)
GTF["gene_name"]=GTF["gene_name"]+"/"+GTF["gene_id"]
GTF=GTF.drop(["gene_id"],axis=1)
print("Generating promoters annotation.")
sys.stdout.flush()
promoters=GTF[GTF["feature"]=="transcript"]
promoters_plus=promoters[promoters["strand"]=="+"]
promoters_minus=promoters[promoters["strand"]=="-"]
upstream=promoter[0]
downstream=promoter[1]
promoters_plus.loc[:,"promoter_start"]=promoters_plus.loc[:,"start"].astype(int)-upstream
promoters_plus.loc[:,"promoter_end"]=promoters_plus.loc[:,"start"].astype(int)+downstream
promoters_minus.loc[:,"promoter_start"]=promoters_minus["end"].astype(int)-downstream
promoters_minus.loc[:,"promoter_end"]=promoters_minus["end"].astype(int)+upstream
promoters=pd.concat([promoters_plus,promoters_minus])
promoters=promoters[["seqname","feature","promoter_start","promoter_end","gene_name"]]
promoters.columns=["seqname","feature","start","end","gene_name"]
promoters.loc[:,"feature"]="promoter"
promoters.drop_duplicates(inplace=True)
promoters.reset_index(inplace=True, drop=True)
chr_sizes=pd.read_table(genome_file,header=None)
chr_sizes.columns=["seqname","size"]
chr_sizes.loc[:,"seqname"]=chr_sizes["seqname"].astype(str)
promoters.loc[:,"seqname"]=promoters["seqname"].astype(str)
promoters=pd.merge(promoters,chr_sizes,how="left",on=["seqname"])
def CorrectStart(df):
s=df["start"]
if s < 0:
s=0
return s
def CorrectEnd(df):
s=df["end"]
e=df["size"]
if s > e:
s=e
return s
promoters.loc[:,"start"]=promoters.apply(CorrectStart,axis=1)
promoters.loc[:,"end"]=promoters.apply(CorrectEnd,axis=1)
promoters.drop(["size"],axis=1, inplace=True)
GTFs=GTF[["seqname","feature","start","end","gene_name"]]
GTFs=GTFs[ GTFs["feature"]!= "gene"]
GTFs.drop_duplicates(inplace=True)
GTFs.reset_index(inplace=True, drop=True)
GTFs=pd.concat([GTFs,promoters])
def NewName(df):
name=df["gene_name"]
feature=df["feature"]
if feature == "transcript":
res=name
else:
res=name+":"+feature
return res
GTFs.loc[:,"gene_name"]=GTFs.apply(NewName, axis=1)
GTFs=GTFs[["seqname","start","end","gene_name"]]
print( "Intersecting annotation tables and bed." )
sys.stdout.flush()
refGTF=dfTObedtool(GTFs)
pos=dfTObedtool(bed)
colsGTF=GTFs.columns.tolist()
newCols=bed.columns.tolist()
for f in colsGTF:
newCols.append(f+"_")
newCols_=[ s for s in newCols if s not in ["seqname_","start_", "end_"]]
pos=pos.intersect(refGTF, loj=True)
pos=pd.read_table(pos.fn , names=newCols)
pos=pos[newCols_]
print("Merging features.")
sys.stdout.flush()
def GetFeature(x):
if ":" in x:
res=x.split(":")[1]
else:
res=np.nan
return res
def GetName(x):
if ":" in x:
res=x.split(":")[0]
elif type(x) == type("string"):
if x != ".":
res=x
else:
res=np.nan
else:
res=np.nan
return res
pos["gene_feature_"]=pos["gene_name_"].apply( lambda x: GetFeature(x) )
pos["gene_name_"]=pos["gene_name_"].apply( lambda x: GetName(x) )
refcol=pos.columns.tolist()
refcol=[ s for s in refcol if s != "gene_feature_" ]
def CombineAnn(df):
def JOIN(x):
return ', '.join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( gene_feature_ = JOIN("gene_feature_") ) )
pos_=pos.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
def MergeNameFeatures(df):
name=df["gene_name_"]
feature=df["gene_feature_"]
if (type(name) == type("string")) & (name != ".") :
if type(feature) == type("string"):
if len(feature) > 0:
res=name+": "+feature
else:
res=name
else:
res=name
else:
res=np.nan
return res
pos_["annotated_gene_features"]=pos_.apply(MergeNameFeatures,axis=1)
pos_=pos_.drop(["gene_name_","gene_feature_"],axis=1)
def CombineAnn(df):
def JOIN(x):
return '; '.join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( annotated_gene_features = JOIN("annotated_gene_features") ) )
refcol=[ s for s in refcol if s != "gene_name_" ]
pos_=pos_.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
return pos_ |
def process_file(self, path, dryrun):
"""
Print files path.
"""
# if dryrun just return files
if dryrun:
return path
# scan file and match lines
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
# if found matches return list of lines, else return None
return ret if len(ret) > 0 else None | Print files path. | Below is the the instruction that describes the task:
### Input:
Print files path.
### Response:
def process_file(self, path, dryrun):
"""
Print files path.
"""
# if dryrun just return files
if dryrun:
return path
# scan file and match lines
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
# if found matches return list of lines, else return None
return ret if len(ret) > 0 else None |
def tunnel_to_kernel(connection_info, sshserver, sshkey=None):
"""tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel.
"""
if isinstance(connection_info, basestring):
# it's a path, unpack it
with open(connection_info) as f:
connection_info = json.loads(f.read())
cf = connection_info
lports = tunnel.select_random_ports(4)
rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port']
remote_ip = cf['ip']
if tunnel.try_passwordless_ssh(sshserver, sshkey):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
for lp,rp in zip(lports, rports):
tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password)
return tuple(lports) | tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel. | Below is the the instruction that describes the task:
### Input:
tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel.
### Response:
def tunnel_to_kernel(connection_info, sshserver, sshkey=None):
"""tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel.
"""
if isinstance(connection_info, basestring):
# it's a path, unpack it
with open(connection_info) as f:
connection_info = json.loads(f.read())
cf = connection_info
lports = tunnel.select_random_ports(4)
rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port']
remote_ip = cf['ip']
if tunnel.try_passwordless_ssh(sshserver, sshkey):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
for lp,rp in zip(lports, rports):
tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password)
return tuple(lports) |
def get_domain_config(self, domain):
"""Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
"""
domain_root = self.identify_domain_root(domain)
host = ''
if len(domain_root) != len(domain):
host = domain.replace('.' + domain_root, '')
domain_connect_api = self._identify_domain_connect_api(domain_root)
ret = self._get_domain_config_for_root(domain_root, domain_connect_api)
return DomainConnectConfig(domain, domain_root, host, ret) | Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found | Below is the the instruction that describes the task:
### Input:
Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
### Response:
def get_domain_config(self, domain):
"""Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
"""
domain_root = self.identify_domain_root(domain)
host = ''
if len(domain_root) != len(domain):
host = domain.replace('.' + domain_root, '')
domain_connect_api = self._identify_domain_connect_api(domain_root)
ret = self._get_domain_config_for_root(domain_root, domain_connect_api)
return DomainConnectConfig(domain, domain_root, host, ret) |
def my_protocol_parser(out, buf):
"""Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py
"""
while True:
tp = yield from buf.read(5)
if tp in (MSG_PING, MSG_PONG):
# skip line
yield from buf.skipuntil(b'\r\n')
out.feed_data(Message(tp, None))
elif tp == MSG_STOP:
out.feed_data(Message(tp, None))
elif tp == MSG_TEXT:
# read text
text = yield from buf.readuntil(b'\r\n')
out.feed_data(Message(tp, text.strip().decode('utf-8')))
else:
raise ValueError('Unknown protocol prefix.') | Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py | Below is the the instruction that describes the task:
### Input:
Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py
### Response:
def my_protocol_parser(out, buf):
"""Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py
"""
while True:
tp = yield from buf.read(5)
if tp in (MSG_PING, MSG_PONG):
# skip line
yield from buf.skipuntil(b'\r\n')
out.feed_data(Message(tp, None))
elif tp == MSG_STOP:
out.feed_data(Message(tp, None))
elif tp == MSG_TEXT:
# read text
text = yield from buf.readuntil(b'\r\n')
out.feed_data(Message(tp, text.strip().decode('utf-8')))
else:
raise ValueError('Unknown protocol prefix.') |
def hash_data(data, hasher=NoParam, base=NoParam, types=False,
hashlen=NoParam, convert=False):
"""
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
"""
if convert and isinstance(data, six.string_types): # nocover
try:
data = json.dumps(data)
except TypeError as ex:
# import warnings
# warnings.warn('Unable to encode input as json due to: {!r}'.format(ex))
pass
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
# Feed the data into the hasher
_update_hasher(hasher, data, types=types)
# Get the hashed representation
text = _digest_hasher(hasher, hashlen, base)
return text | Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej | Below is the the instruction that describes the task:
### Input:
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
### Response:
def hash_data(data, hasher=NoParam, base=NoParam, types=False,
hashlen=NoParam, convert=False):
"""
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
"""
if convert and isinstance(data, six.string_types): # nocover
try:
data = json.dumps(data)
except TypeError as ex:
# import warnings
# warnings.warn('Unable to encode input as json due to: {!r}'.format(ex))
pass
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
# Feed the data into the hasher
_update_hasher(hasher, data, types=types)
# Get the hashed representation
text = _digest_hasher(hasher, hashlen, base)
return text |
def haiz(obj, chart):
""" Returns if an object is in Haiz. """
objGender = obj.gender()
objFaction = obj.faction()
if obj.id == const.MERCURY:
# Gender and faction of mercury depends on orientality
sun = chart.getObject(const.SUN)
orientalityM = orientality(obj, sun)
if orientalityM == ORIENTAL:
objGender = const.MASCULINE
objFaction = const.DIURNAL
else:
objGender = const.FEMININE
objFaction = const.NOCTURNAL
# Object gender match sign gender?
signGender = props.sign.gender[obj.sign]
genderConformity = (objGender == signGender)
# Match faction
factionConformity = False
diurnalChart = chart.isDiurnal()
if obj.id == const.SUN and not diurnalChart:
# Sun is in conformity only when above horizon
factionConformity = False
else:
# Get list of houses in the chart's diurnal faction
if diurnalChart:
diurnalFaction = props.house.aboveHorizon
nocturnalFaction = props.house.belowHorizon
else:
diurnalFaction = props.house.belowHorizon
nocturnalFaction = props.house.aboveHorizon
# Get the object's house and match factions
objHouse = chart.houses.getObjectHouse(obj)
if (objFaction == const.DIURNAL and objHouse.id in diurnalFaction or
objFaction == const.NOCTURNAL and objHouse.id in nocturnalFaction):
factionConformity = True
# Match things
if (genderConformity and factionConformity):
return HAIZ
elif (not genderConformity and not factionConformity):
return CHAIZ
else:
return None | Returns if an object is in Haiz. | Below is the the instruction that describes the task:
### Input:
Returns if an object is in Haiz.
### Response:
def haiz(obj, chart):
""" Returns if an object is in Haiz. """
objGender = obj.gender()
objFaction = obj.faction()
if obj.id == const.MERCURY:
# Gender and faction of mercury depends on orientality
sun = chart.getObject(const.SUN)
orientalityM = orientality(obj, sun)
if orientalityM == ORIENTAL:
objGender = const.MASCULINE
objFaction = const.DIURNAL
else:
objGender = const.FEMININE
objFaction = const.NOCTURNAL
# Object gender match sign gender?
signGender = props.sign.gender[obj.sign]
genderConformity = (objGender == signGender)
# Match faction
factionConformity = False
diurnalChart = chart.isDiurnal()
if obj.id == const.SUN and not diurnalChart:
# Sun is in conformity only when above horizon
factionConformity = False
else:
# Get list of houses in the chart's diurnal faction
if diurnalChart:
diurnalFaction = props.house.aboveHorizon
nocturnalFaction = props.house.belowHorizon
else:
diurnalFaction = props.house.belowHorizon
nocturnalFaction = props.house.aboveHorizon
# Get the object's house and match factions
objHouse = chart.houses.getObjectHouse(obj)
if (objFaction == const.DIURNAL and objHouse.id in diurnalFaction or
objFaction == const.NOCTURNAL and objHouse.id in nocturnalFaction):
factionConformity = True
# Match things
if (genderConformity and factionConformity):
return HAIZ
elif (not genderConformity and not factionConformity):
return CHAIZ
else:
return None |
def scrape_wikinews(conn, project, articleset, query):
"""
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
"""
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts) | Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name | Below is the the instruction that describes the task:
### Input:
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
### Response:
def scrape_wikinews(conn, project, articleset, query):
"""
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
"""
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts) |
def _send_textmetrics(metrics):
'''
Format metrics for the carbon plaintext protocol
'''
data = [' '.join(map(six.text_type, metric)) for metric in metrics] + ['']
return '\n'.join(data) | Format metrics for the carbon plaintext protocol | Below is the the instruction that describes the task:
### Input:
Format metrics for the carbon plaintext protocol
### Response:
def _send_textmetrics(metrics):
'''
Format metrics for the carbon plaintext protocol
'''
data = [' '.join(map(six.text_type, metric)) for metric in metrics] + ['']
return '\n'.join(data) |
def get_citation_by_reference(self, type: str, reference: str) -> Optional[Citation]:
"""Get a citation object by its type and reference."""
citation_hash = hash_citation(type=type, reference=reference)
return self.get_citation_by_hash(citation_hash) | Get a citation object by its type and reference. | Below is the the instruction that describes the task:
### Input:
Get a citation object by its type and reference.
### Response:
def get_citation_by_reference(self, type: str, reference: str) -> Optional[Citation]:
"""Get a citation object by its type and reference."""
citation_hash = hash_citation(type=type, reference=reference)
return self.get_citation_by_hash(citation_hash) |
def entropy(args):
"""
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
"""
p = OptionParser(entropy.__doc__)
p.add_option("--threshold", default=0, type="int",
help="Complexity needs to be above")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
kmc_out, = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score)))) | %prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34 | Below is the the instruction that describes the task:
### Input:
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
### Response:
def entropy(args):
"""
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
"""
p = OptionParser(entropy.__doc__)
p.add_option("--threshold", default=0, type="int",
help="Complexity needs to be above")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
kmc_out, = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score)))) |
def load_tabs(self):
"""Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
"""
tab_group = self.get_tabs(self.request, **self.kwargs)
tabs = tab_group.get_tabs()
for tab in [t for t in tabs if issubclass(t.__class__, TableTab)]:
self.table_classes.extend(tab.table_classes)
for table in tab._tables.values():
self._table_dict[table._meta.name] = {'table': table,
'tab': tab} | Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions. | Below is the the instruction that describes the task:
### Input:
Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
### Response:
def load_tabs(self):
"""Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
"""
tab_group = self.get_tabs(self.request, **self.kwargs)
tabs = tab_group.get_tabs()
for tab in [t for t in tabs if issubclass(t.__class__, TableTab)]:
self.table_classes.extend(tab.table_classes)
for table in tab._tables.values():
self._table_dict[table._meta.name] = {'table': table,
'tab': tab} |
def abort(*args, **kwargs):
"""
Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
"""
code = kwargs.pop("code", 1)
logger = kwargs.pop("logger", LOG.error if code else LOG.info)
fatal = kwargs.pop("fatal", True)
return_value = fatal
if isinstance(fatal, tuple) and len(fatal) == 2:
fatal, return_value = fatal
if logger and fatal is not None and args:
if logging.root.handlers:
logger(*args, **kwargs)
else:
sys.stderr.write("%s\n" % formatted_string(*args))
if fatal:
if isinstance(fatal, type) and issubclass(fatal, BaseException):
raise fatal(code)
if AbortException is not None:
if isinstance(AbortException, type) and issubclass(AbortException, BaseException):
raise AbortException(code)
return AbortException(code)
return return_value | Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers | Below is the the instruction that describes the task:
### Input:
Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
### Response:
def abort(*args, **kwargs):
"""
Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
"""
code = kwargs.pop("code", 1)
logger = kwargs.pop("logger", LOG.error if code else LOG.info)
fatal = kwargs.pop("fatal", True)
return_value = fatal
if isinstance(fatal, tuple) and len(fatal) == 2:
fatal, return_value = fatal
if logger and fatal is not None and args:
if logging.root.handlers:
logger(*args, **kwargs)
else:
sys.stderr.write("%s\n" % formatted_string(*args))
if fatal:
if isinstance(fatal, type) and issubclass(fatal, BaseException):
raise fatal(code)
if AbortException is not None:
if isinstance(AbortException, type) and issubclass(AbortException, BaseException):
raise AbortException(code)
return AbortException(code)
return return_value |
def sigma_cached(self, psd):
""" Cache sigma calculate for use in tandem with the FilterBank class
"""
if not hasattr(self, '_sigmasq'):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, '_sigma_cached_key'):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
# If possible, we precalculate the sigmasq vector for all possible waveforms
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, 'sigmasq_vec'):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm(
self.approximant, psd, len(psd), psd.delta_f, self.f_lower)
if not hasattr(self, 'sigma_scale'):
# Get an amplitude normalization (mass dependant constant norm)
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
self._sigmasq[key] = self.sigma_scale * \
psd.sigmasq_vec[self.approximant][self.end_idx-1]
else:
if not hasattr(self, 'sigma_view'):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, 'invsqrt'):
psd.invsqrt = 1.0 / psd[self.sslice]
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt)
return self._sigmasq[key] | Cache sigma calculate for use in tandem with the FilterBank class | Below is the the instruction that describes the task:
### Input:
Cache sigma calculate for use in tandem with the FilterBank class
### Response:
def sigma_cached(self, psd):
""" Cache sigma calculate for use in tandem with the FilterBank class
"""
if not hasattr(self, '_sigmasq'):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, '_sigma_cached_key'):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
# If possible, we precalculate the sigmasq vector for all possible waveforms
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, 'sigmasq_vec'):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm(
self.approximant, psd, len(psd), psd.delta_f, self.f_lower)
if not hasattr(self, 'sigma_scale'):
# Get an amplitude normalization (mass dependant constant norm)
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
self._sigmasq[key] = self.sigma_scale * \
psd.sigmasq_vec[self.approximant][self.end_idx-1]
else:
if not hasattr(self, 'sigma_view'):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, 'invsqrt'):
psd.invsqrt = 1.0 / psd[self.sslice]
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt)
return self._sigmasq[key] |
def depends (self, d):
""" Adds additional instances of 'VirtualTarget' that this
one depends on.
"""
self.dependencies_ = unique (self.dependencies_ + d).sort () | Adds additional instances of 'VirtualTarget' that this
one depends on. | Below is the the instruction that describes the task:
### Input:
Adds additional instances of 'VirtualTarget' that this
one depends on.
### Response:
def depends (self, d):
""" Adds additional instances of 'VirtualTarget' that this
one depends on.
"""
self.dependencies_ = unique (self.dependencies_ + d).sort () |
def add_module(self, module, cython=False):
"""Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
"""
name_module = module.__name__.split('.')[-1]
short = ('|%s|'
% name_module)
long = (':mod:`~%s`'
% module.__name__)
self._short2long[short] = long
for (name_member, member) in vars(module).items():
if self.consider_member(
name_member, member, module):
role = self.get_role(member, cython)
short = ('|%s|'
% name_member)
medium = ('|%s.%s|'
% (name_module,
name_member))
long = (':%s:`~%s.%s`'
% (role,
module.__name__,
name_member))
self.add_substitution(short, medium, long, module)
if inspect.isclass(member):
for name_submember, submember in vars(member).items():
if self.consider_member(
name_submember, submember, module, member):
role = self.get_role(submember, cython)
short = ('|%s.%s|'
% (name_member,
name_submember))
medium = ('|%s.%s.%s|'
% (name_module,
name_member,
name_submember))
long = (':%s:`~%s.%s.%s`'
% (role,
module.__name__,
name_member,
name_submember))
self.add_substitution(short, medium, long, module) | Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer` | Below is the the instruction that describes the task:
### Input:
Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
### Response:
def add_module(self, module, cython=False):
"""Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
"""
name_module = module.__name__.split('.')[-1]
short = ('|%s|'
% name_module)
long = (':mod:`~%s`'
% module.__name__)
self._short2long[short] = long
for (name_member, member) in vars(module).items():
if self.consider_member(
name_member, member, module):
role = self.get_role(member, cython)
short = ('|%s|'
% name_member)
medium = ('|%s.%s|'
% (name_module,
name_member))
long = (':%s:`~%s.%s`'
% (role,
module.__name__,
name_member))
self.add_substitution(short, medium, long, module)
if inspect.isclass(member):
for name_submember, submember in vars(member).items():
if self.consider_member(
name_submember, submember, module, member):
role = self.get_role(submember, cython)
short = ('|%s.%s|'
% (name_member,
name_submember))
medium = ('|%s.%s.%s|'
% (name_module,
name_member,
name_submember))
long = (':%s:`~%s.%s.%s`'
% (role,
module.__name__,
name_member,
name_submember))
self.add_substitution(short, medium, long, module) |
def _hashable_bytes(data):
"""
Coerce strings to hashable bytes.
"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('ascii') # Fail on anything non-ASCII.
else:
raise TypeError(data) | Coerce strings to hashable bytes. | Below is the the instruction that describes the task:
### Input:
Coerce strings to hashable bytes.
### Response:
def _hashable_bytes(data):
"""
Coerce strings to hashable bytes.
"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('ascii') # Fail on anything non-ASCII.
else:
raise TypeError(data) |
def _set_least_batch_id(self, txn_signature):
"""Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
"""
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
# Test to see if all batches from the least_batch to
# the prior batch to the current batch have results.
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
# Find the first batch from the current batch on, that doesn't have
# all results.
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least | Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set. | Below is the the instruction that describes the task:
### Input:
Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
### Response:
def _set_least_batch_id(self, txn_signature):
"""Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
"""
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
# Test to see if all batches from the least_batch to
# the prior batch to the current batch have results.
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
# Find the first batch from the current batch on, that doesn't have
# all results.
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least |
def with_reconnect(func):
"""
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
"""
from pymongo.errors import AutoReconnect
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for _ in range(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
raise
return _reconnector | Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely. | Below is the the instruction that describes the task:
### Input:
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
### Response:
def with_reconnect(func):
"""
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
"""
from pymongo.errors import AutoReconnect
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for _ in range(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
raise
return _reconnector |
def stats(self):
"""
Gets performance statistics and server information
"""
status, _, body = self._request('GET', self.stats_path(),
{'Accept': 'application/json'})
if status == 200:
return json.loads(bytes_to_str(body))
else:
return None | Gets performance statistics and server information | Below is the the instruction that describes the task:
### Input:
Gets performance statistics and server information
### Response:
def stats(self):
"""
Gets performance statistics and server information
"""
status, _, body = self._request('GET', self.stats_path(),
{'Accept': 'application/json'})
if status == 200:
return json.loads(bytes_to_str(body))
else:
return None |
def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type):
"""
Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse:
"""
r_type = None
if cysparse_type in ['COMPLEX64_t']:
r_type = 'FLOAT32_t'
elif cysparse_type in ['COMPLEX128_t']:
r_type = 'FLOAT64_t'
elif cysparse_type in ['COMPLEX256_t']:
r_type = 'FLOAT128_t'
else:
raise TypeError("Not a recognized complex type")
return r_type | Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse: | Below is the the instruction that describes the task:
### Input:
Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse:
### Response:
def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type):
"""
Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse:
"""
r_type = None
if cysparse_type in ['COMPLEX64_t']:
r_type = 'FLOAT32_t'
elif cysparse_type in ['COMPLEX128_t']:
r_type = 'FLOAT64_t'
elif cysparse_type in ['COMPLEX256_t']:
r_type = 'FLOAT128_t'
else:
raise TypeError("Not a recognized complex type")
return r_type |
def get_mysql_credentials(cfg_file):
"""Get the credentials and database name from options in config file."""
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print('Failed to find mysql connections credentials.')
sys.exit(1)
except IOError:
print('ERROR: Cannot open %s.', cfg_file)
sys.exit(1)
value = parser.get('dfa_mysql', 'connection')
try:
# Find location of pattern in connection parameter as shown below:
# http://username:password@host/databasename?characterset=encoding'
sobj = re.search(r"(://).*(@).*(/).*(\?)", value)
# The list parameter contains:
# indices[0], is the index of '://'
# indices[1], is the index of '@'
# indices[2], is the index of '/'
# indices[3], is the index of '?'
indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)]
# Get the credentials
cred = value[indices[0] + 3:indices[1]].split(':')
# Get the host name
host = value[indices[1] + 1:indices[2]]
# Get the database name
db_name = value[indices[2] + 1:indices[3]]
# Get the character encoding
charset = value[indices[3] + 1:].split('=')[1]
return cred[0], cred[1], host, db_name, charset
except (ValueError, IndexError, AttributeError):
print('Failed to find mysql connections credentials.')
sys.exit(1) | Get the credentials and database name from options in config file. | Below is the the instruction that describes the task:
### Input:
Get the credentials and database name from options in config file.
### Response:
def get_mysql_credentials(cfg_file):
"""Get the credentials and database name from options in config file."""
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print('Failed to find mysql connections credentials.')
sys.exit(1)
except IOError:
print('ERROR: Cannot open %s.', cfg_file)
sys.exit(1)
value = parser.get('dfa_mysql', 'connection')
try:
# Find location of pattern in connection parameter as shown below:
# http://username:password@host/databasename?characterset=encoding'
sobj = re.search(r"(://).*(@).*(/).*(\?)", value)
# The list parameter contains:
# indices[0], is the index of '://'
# indices[1], is the index of '@'
# indices[2], is the index of '/'
# indices[3], is the index of '?'
indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)]
# Get the credentials
cred = value[indices[0] + 3:indices[1]].split(':')
# Get the host name
host = value[indices[1] + 1:indices[2]]
# Get the database name
db_name = value[indices[2] + 1:indices[3]]
# Get the character encoding
charset = value[indices[3] + 1:].split('=')[1]
return cred[0], cred[1], host, db_name, charset
except (ValueError, IndexError, AttributeError):
print('Failed to find mysql connections credentials.')
sys.exit(1) |
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid) | Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`. | Below is the the instruction that describes the task:
### Input:
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
### Response:
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid) |
def _make_grid_of_axes(self,
bounding_rect=cfg.bounding_rect_default,
num_rows=cfg.num_rows_per_view_default,
num_cols=cfg.num_cols_grid_default,
axis_pad=cfg.axis_pad_default,
commn_annot=None,
**axis_kwargs):
"""Creates a grid of axes bounded within a given rectangle."""
axes_in_grid = list()
extents = self._compute_cell_extents_grid(bounding_rect=bounding_rect,
num_cols=num_cols,
num_rows=num_rows, axis_pad=axis_pad)
for cell_ext in extents:
ax_cell = self.fig.add_axes(cell_ext, frameon=False, visible=False,
**axis_kwargs)
if commn_annot is not None:
ax_cell.set_title(commn_annot)
ax_cell.set_axis_off()
axes_in_grid.append(ax_cell)
return axes_in_grid | Creates a grid of axes bounded within a given rectangle. | Below is the the instruction that describes the task:
### Input:
Creates a grid of axes bounded within a given rectangle.
### Response:
def _make_grid_of_axes(self,
bounding_rect=cfg.bounding_rect_default,
num_rows=cfg.num_rows_per_view_default,
num_cols=cfg.num_cols_grid_default,
axis_pad=cfg.axis_pad_default,
commn_annot=None,
**axis_kwargs):
"""Creates a grid of axes bounded within a given rectangle."""
axes_in_grid = list()
extents = self._compute_cell_extents_grid(bounding_rect=bounding_rect,
num_cols=num_cols,
num_rows=num_rows, axis_pad=axis_pad)
for cell_ext in extents:
ax_cell = self.fig.add_axes(cell_ext, frameon=False, visible=False,
**axis_kwargs)
if commn_annot is not None:
ax_cell.set_title(commn_annot)
ax_cell.set_axis_off()
axes_in_grid.append(ax_cell)
return axes_in_grid |
def from_kwargs(cls, **kwargs):
"""Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.]))
"""
arrays = []
names = []
for p,vals in kwargs.items():
if not isinstance(vals, numpy.ndarray):
if not isinstance(vals, list):
vals = [vals]
vals = numpy.array(vals)
arrays.append(vals)
names.append(p)
return cls.from_arrays(arrays, names=names) | Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.])) | Below is the the instruction that describes the task:
### Input:
Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.]))
### Response:
def from_kwargs(cls, **kwargs):
"""Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.]))
"""
arrays = []
names = []
for p,vals in kwargs.items():
if not isinstance(vals, numpy.ndarray):
if not isinstance(vals, list):
vals = [vals]
vals = numpy.array(vals)
arrays.append(vals)
names.append(p)
return cls.from_arrays(arrays, names=names) |
def web(connection, host, port):
"""Run a combine web interface."""
from bio2bel.web.application import create_application
app = create_application(connection=connection)
app.run(host=host, port=port) | Run a combine web interface. | Below is the the instruction that describes the task:
### Input:
Run a combine web interface.
### Response:
def web(connection, host, port):
"""Run a combine web interface."""
from bio2bel.web.application import create_application
app = create_application(connection=connection)
app.run(host=host, port=port) |
def reflectance_from_tbs(self, sun_zenith, tb_near_ir, tb_thermal, **kwargs):
"""
The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied.
"""
# Check for dask arrays
if hasattr(tb_near_ir, 'compute') or hasattr(tb_thermal, 'compute'):
compute = False
else:
compute = True
if hasattr(tb_near_ir, 'mask') or hasattr(tb_thermal, 'mask'):
is_masked = True
else:
is_masked = False
if np.isscalar(tb_near_ir):
tb_nir = np.array([tb_near_ir, ])
else:
tb_nir = np.asanyarray(tb_near_ir)
if np.isscalar(tb_thermal):
tb_therm = np.array([tb_thermal, ])
else:
tb_therm = np.asanyarray(tb_thermal)
if tb_therm.shape != tb_nir.shape:
errmsg = 'Dimensions do not match! {0} and {1}'.format(
str(tb_therm.shape), str(tb_nir.shape))
raise ValueError(errmsg)
tb_ir_co2 = kwargs.get('tb_ir_co2')
lut = kwargs.get('lut', self.lut)
if tb_ir_co2 is None:
co2corr = False
tbco2 = None
else:
co2corr = True
if np.isscalar(tb_ir_co2):
tbco2 = np.array([tb_ir_co2, ])
else:
tbco2 = np.asanyarray(tb_ir_co2)
if not self.rsr:
raise NotImplementedError("Reflectance calculations without "
"rsr not yet supported!")
# Assume rsr is in microns!!!
# FIXME!
self._rad3x_t11 = self.tb2radiance(tb_therm, lut=lut)['radiance']
thermal_emiss_one = self._rad3x_t11 * self.rsr_integral
l_nir = self.tb2radiance(tb_nir, lut=lut)['radiance'] * self.rsr_integral
if thermal_emiss_one.ravel().shape[0] < 10:
LOG.info('thermal_emiss_one = %s', str(thermal_emiss_one))
if l_nir.ravel().shape[0] < 10:
LOG.info('l_nir = %s', str(l_nir))
sunzmask = (sun_zenith < 0.0) | (sun_zenith > 88.0)
sunz = where(sunzmask, 88.0, sun_zenith)
mu0 = np.cos(np.deg2rad(sunz))
# mu0 = np.where(np.less(mu0, 0.1), 0.1, mu0)
self._rad3x = l_nir
self._solar_radiance = self.solar_flux * mu0 / np.pi
# CO2 correction to the 3.9 radiance, only if tbs of a co2 band around
# 13.4 micron is provided:
if co2corr:
self.derive_rad39_corr(tb_therm, tbco2)
LOG.info("CO2 correction applied...")
else:
self._rad3x_correction = 1.0
nomin = l_nir - thermal_emiss_one * self._rad3x_correction
denom = self._solar_radiance - thermal_emiss_one * self._rad3x_correction
data = nomin / denom
mask = (self._solar_radiance - thermal_emiss_one *
self._rad3x_correction) < EPSILON
logical_or(sunzmask, mask, out=mask)
logical_or(mask, np.isnan(tb_nir), out=mask)
self._r3x = where(mask, np.nan, data)
# Reflectances should be between 0 and 1, but values above 1 is
# perfectly possible and okay! (Multiply by 100 to get reflectances
# in percent)
if hasattr(self._r3x, 'compute') and compute:
res = self._r3x.compute()
else:
res = self._r3x
if is_masked:
res = np.ma.masked_array(res, mask=np.isnan(res))
return res | The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied. | Below is the the instruction that describes the task:
### Input:
The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied.
### Response:
def reflectance_from_tbs(self, sun_zenith, tb_near_ir, tb_thermal, **kwargs):
"""
The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied.
"""
# Check for dask arrays
if hasattr(tb_near_ir, 'compute') or hasattr(tb_thermal, 'compute'):
compute = False
else:
compute = True
if hasattr(tb_near_ir, 'mask') or hasattr(tb_thermal, 'mask'):
is_masked = True
else:
is_masked = False
if np.isscalar(tb_near_ir):
tb_nir = np.array([tb_near_ir, ])
else:
tb_nir = np.asanyarray(tb_near_ir)
if np.isscalar(tb_thermal):
tb_therm = np.array([tb_thermal, ])
else:
tb_therm = np.asanyarray(tb_thermal)
if tb_therm.shape != tb_nir.shape:
errmsg = 'Dimensions do not match! {0} and {1}'.format(
str(tb_therm.shape), str(tb_nir.shape))
raise ValueError(errmsg)
tb_ir_co2 = kwargs.get('tb_ir_co2')
lut = kwargs.get('lut', self.lut)
if tb_ir_co2 is None:
co2corr = False
tbco2 = None
else:
co2corr = True
if np.isscalar(tb_ir_co2):
tbco2 = np.array([tb_ir_co2, ])
else:
tbco2 = np.asanyarray(tb_ir_co2)
if not self.rsr:
raise NotImplementedError("Reflectance calculations without "
"rsr not yet supported!")
# Assume rsr is in microns!!!
# FIXME!
self._rad3x_t11 = self.tb2radiance(tb_therm, lut=lut)['radiance']
thermal_emiss_one = self._rad3x_t11 * self.rsr_integral
l_nir = self.tb2radiance(tb_nir, lut=lut)['radiance'] * self.rsr_integral
if thermal_emiss_one.ravel().shape[0] < 10:
LOG.info('thermal_emiss_one = %s', str(thermal_emiss_one))
if l_nir.ravel().shape[0] < 10:
LOG.info('l_nir = %s', str(l_nir))
sunzmask = (sun_zenith < 0.0) | (sun_zenith > 88.0)
sunz = where(sunzmask, 88.0, sun_zenith)
mu0 = np.cos(np.deg2rad(sunz))
# mu0 = np.where(np.less(mu0, 0.1), 0.1, mu0)
self._rad3x = l_nir
self._solar_radiance = self.solar_flux * mu0 / np.pi
# CO2 correction to the 3.9 radiance, only if tbs of a co2 band around
# 13.4 micron is provided:
if co2corr:
self.derive_rad39_corr(tb_therm, tbco2)
LOG.info("CO2 correction applied...")
else:
self._rad3x_correction = 1.0
nomin = l_nir - thermal_emiss_one * self._rad3x_correction
denom = self._solar_radiance - thermal_emiss_one * self._rad3x_correction
data = nomin / denom
mask = (self._solar_radiance - thermal_emiss_one *
self._rad3x_correction) < EPSILON
logical_or(sunzmask, mask, out=mask)
logical_or(mask, np.isnan(tb_nir), out=mask)
self._r3x = where(mask, np.nan, data)
# Reflectances should be between 0 and 1, but values above 1 is
# perfectly possible and okay! (Multiply by 100 to get reflectances
# in percent)
if hasattr(self._r3x, 'compute') and compute:
res = self._r3x.compute()
else:
res = self._r3x
if is_masked:
res = np.ma.masked_array(res, mask=np.isnan(res))
return res |
def password_change(self, wallet, password):
"""
Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet, "password": password}
resp = self.call('password_change', payload)
return resp['changed'] == '1' | Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True | Below is the the instruction that describes the task:
### Input:
Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
### Response:
def password_change(self, wallet, password):
"""
Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet, "password": password}
resp = self.call('password_change', payload)
return resp['changed'] == '1' |
def generate_classified_legend(
analysis,
exposure,
hazard,
use_rounding,
debug_mode):
"""Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict
"""
# We need to read the analysis layer to get the number of features.
analysis_row = next(analysis.getFeatures())
# Let's style the hazard class in each layers.
hazard_classification = hazard.keywords['classification']
hazard_classification = definition(hazard_classification)
# Let's check if there is some thresholds:
thresholds = hazard.keywords.get('thresholds')
if thresholds:
hazard_unit = hazard.keywords.get('continuous_hazard_unit')
hazard_unit = definition(hazard_unit)['abbreviation']
else:
hazard_unit = None
exposure = exposure.keywords['exposure']
exposure_definitions = definition(exposure)
exposure_units = exposure_definitions['units']
exposure_unit = exposure_units[0]
coefficient = 1
# We check if can use a greater unit, such as kilometre for instance.
if len(exposure_units) > 1:
# We use only two units for now.
delta = coefficient_between_units(
exposure_units[1], exposure_units[0])
all_values_are_greater = True
# We check if all values are greater than the coefficient
for i, hazard_class in enumerate(hazard_classification['classes']):
field_name = hazard_count_field['field_name'] % hazard_class['key']
try:
value = analysis_row[field_name]
except KeyError:
value = 0
if 0 < value < delta:
# 0 is fine, we can still keep the second unit.
all_values_are_greater = False
if all_values_are_greater:
# If yes, we can use this unit.
exposure_unit = exposure_units[1]
coefficient = delta
classes = OrderedDict()
for i, hazard_class in enumerate(hazard_classification['classes']):
# Get the hazard class name.
field_name = hazard_count_field['field_name'] % hazard_class['key']
# Get the number of affected feature by this hazard class.
try:
value = analysis_row[field_name]
except KeyError:
# The field might not exist if no feature impacted in this hazard
# zone.
value = 0
value = format_number(
value,
use_rounding,
exposure_definitions['use_population_rounding'],
coefficient)
minimum = None
maximum = None
# Check if we need to add thresholds.
if thresholds:
if i == 0:
minimum = thresholds[hazard_class['key']][0]
elif i == len(hazard_classification['classes']) - 1:
maximum = thresholds[hazard_class['key']][1]
else:
minimum = thresholds[hazard_class['key']][0]
maximum = thresholds[hazard_class['key']][1]
label = _format_label(
hazard_class=hazard_class['name'],
value=value,
exposure_unit=exposure_unit['abbreviation'],
minimum=minimum,
maximum=maximum,
hazard_unit=hazard_unit)
classes[hazard_class['key']] = (hazard_class['color'], label)
if exposure_definitions['display_not_exposed'] or debug_mode:
classes[not_exposed_class['key']] = _add_not_exposed(
analysis_row,
use_rounding,
exposure_definitions['use_population_rounding'],
exposure_unit['abbreviation'],
coefficient)
return classes | Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict | Below is the the instruction that describes the task:
### Input:
Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict
### Response:
def generate_classified_legend(
analysis,
exposure,
hazard,
use_rounding,
debug_mode):
"""Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict
"""
# We need to read the analysis layer to get the number of features.
analysis_row = next(analysis.getFeatures())
# Let's style the hazard class in each layers.
hazard_classification = hazard.keywords['classification']
hazard_classification = definition(hazard_classification)
# Let's check if there is some thresholds:
thresholds = hazard.keywords.get('thresholds')
if thresholds:
hazard_unit = hazard.keywords.get('continuous_hazard_unit')
hazard_unit = definition(hazard_unit)['abbreviation']
else:
hazard_unit = None
exposure = exposure.keywords['exposure']
exposure_definitions = definition(exposure)
exposure_units = exposure_definitions['units']
exposure_unit = exposure_units[0]
coefficient = 1
# We check if can use a greater unit, such as kilometre for instance.
if len(exposure_units) > 1:
# We use only two units for now.
delta = coefficient_between_units(
exposure_units[1], exposure_units[0])
all_values_are_greater = True
# We check if all values are greater than the coefficient
for i, hazard_class in enumerate(hazard_classification['classes']):
field_name = hazard_count_field['field_name'] % hazard_class['key']
try:
value = analysis_row[field_name]
except KeyError:
value = 0
if 0 < value < delta:
# 0 is fine, we can still keep the second unit.
all_values_are_greater = False
if all_values_are_greater:
# If yes, we can use this unit.
exposure_unit = exposure_units[1]
coefficient = delta
classes = OrderedDict()
for i, hazard_class in enumerate(hazard_classification['classes']):
# Get the hazard class name.
field_name = hazard_count_field['field_name'] % hazard_class['key']
# Get the number of affected feature by this hazard class.
try:
value = analysis_row[field_name]
except KeyError:
# The field might not exist if no feature impacted in this hazard
# zone.
value = 0
value = format_number(
value,
use_rounding,
exposure_definitions['use_population_rounding'],
coefficient)
minimum = None
maximum = None
# Check if we need to add thresholds.
if thresholds:
if i == 0:
minimum = thresholds[hazard_class['key']][0]
elif i == len(hazard_classification['classes']) - 1:
maximum = thresholds[hazard_class['key']][1]
else:
minimum = thresholds[hazard_class['key']][0]
maximum = thresholds[hazard_class['key']][1]
label = _format_label(
hazard_class=hazard_class['name'],
value=value,
exposure_unit=exposure_unit['abbreviation'],
minimum=minimum,
maximum=maximum,
hazard_unit=hazard_unit)
classes[hazard_class['key']] = (hazard_class['color'], label)
if exposure_definitions['display_not_exposed'] or debug_mode:
classes[not_exposed_class['key']] = _add_not_exposed(
analysis_row,
use_rounding,
exposure_definitions['use_population_rounding'],
exposure_unit['abbreviation'],
coefficient)
return classes |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu(R,z,ac=self._ac,Delta=self._Delta)
return -1./(nu.sqrt(l) + nu.sqrt(n)) | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA) | Below is the the instruction that describes the task:
### Input:
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA)
### Response:
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu(R,z,ac=self._ac,Delta=self._Delta)
return -1./(nu.sqrt(l) + nu.sqrt(n)) |
def plotActivation(self, position=None, time=None, velocity=None):
"""
Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal
"""
self.ax1.clear()
y = self.activations["n"] + self.activations["s"] + self.activations["e"] + \
self.activations["w"]
self.ax1.matshow(y.reshape(self.dimensions))
self.ax2.clear()
self.ax2.matshow(self.activationsI.reshape(self.dimensions))
self.ax3.clear()
self.ax3.matshow(self.activationHistoryI.reshape(self.dimensions))
titleString = ""
if time is not None:
titleString += "Time = {}".format(str(time))
if velocity is not None:
titleString += " Velocity = {}".format(str(velocity)[:4])
if position is not None:
titleString += " Position = {}".format(str(position)[:4])
plt.suptitle(titleString)
self.ax1.set_xlabel("Excitatory activity")
self.ax2.set_xlabel("Inhibitory activity")
self.ax3.set_xlabel("Boosting activity")
plt.tight_layout()
self.fig.canvas.draw() | Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal | Below is the the instruction that describes the task:
### Input:
Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal
### Response:
def plotActivation(self, position=None, time=None, velocity=None):
"""
Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal
"""
self.ax1.clear()
y = self.activations["n"] + self.activations["s"] + self.activations["e"] + \
self.activations["w"]
self.ax1.matshow(y.reshape(self.dimensions))
self.ax2.clear()
self.ax2.matshow(self.activationsI.reshape(self.dimensions))
self.ax3.clear()
self.ax3.matshow(self.activationHistoryI.reshape(self.dimensions))
titleString = ""
if time is not None:
titleString += "Time = {}".format(str(time))
if velocity is not None:
titleString += " Velocity = {}".format(str(velocity)[:4])
if position is not None:
titleString += " Position = {}".format(str(position)[:4])
plt.suptitle(titleString)
self.ax1.set_xlabel("Excitatory activity")
self.ax2.set_xlabel("Inhibitory activity")
self.ax3.set_xlabel("Boosting activity")
plt.tight_layout()
self.fig.canvas.draw() |
def get_dataset(self, key, info):
"""Read data from file and return the corresponding projectables."""
datadict = {
1000: ['EV_250_Aggr1km_RefSB',
'EV_500_Aggr1km_RefSB',
'EV_1KM_RefSB',
'EV_1KM_Emissive'],
500: ['EV_250_Aggr500_RefSB',
'EV_500_RefSB'],
250: ['EV_250_RefSB']}
platform_name = self.metadata['INVENTORYMETADATA']['ASSOCIATEDPLATFORMINSTRUMENTSENSOR'][
'ASSOCIATEDPLATFORMINSTRUMENTSENSORCONTAINER']['ASSOCIATEDPLATFORMSHORTNAME']['VALUE']
info.update({'platform_name': 'EOS-' + platform_name})
info.update({'sensor': 'modis'})
if self.resolution != key.resolution:
return
datasets = datadict[self.resolution]
for dataset in datasets:
subdata = self.sd.select(dataset)
var_attrs = subdata.attributes()
band_names = var_attrs["band_names"].split(",")
# get the relative indices of the desired channel
try:
index = band_names.index(key.name)
except ValueError:
continue
uncertainty = self.sd.select(dataset + "_Uncert_Indexes")
array = xr.DataArray(from_sds(subdata, chunks=CHUNK_SIZE)[index, :, :],
dims=['y', 'x']).astype(np.float32)
valid_range = var_attrs['valid_range']
# Fill values:
# Data Value Meaning
# 65535 Fill Value (includes reflective band data at night mode
# and completely missing L1A scans)
# 65534 L1A DN is missing within a scan
# 65533 Detector is saturated
# 65532 Cannot compute zero point DN, e.g., SV is saturated
# 65531 Detector is dead (see comments below)
# 65530 RSB dn** below the minimum of the scaling range
# 65529 TEB radiance or RSB dn** exceeds the maximum of the
# scaling range
# 65528 Aggregation algorithm failure
# 65527 Rotation of Earth view Sector from nominal science
# collection position
# 65526 Calibration coefficient b1 could not be computed
# 65525 Subframe is dead
# 65524 Both sides of the PCLW electronics on simultaneously
# 65501 - 65523 (reserved for future use)
# 65500 NAD closed upper limit
array = array.where(array >= np.float32(valid_range[0]))
array = array.where(array <= np.float32(valid_range[1]))
array = array.where(from_sds(uncertainty, chunks=CHUNK_SIZE)[index, :, :] < 15)
if key.calibration == 'brightness_temperature':
projectable = calibrate_bt(array, var_attrs, index, key.name)
info.setdefault('units', 'K')
info.setdefault('standard_name', 'toa_brightness_temperature')
elif key.calibration == 'reflectance':
projectable = calibrate_refl(array, var_attrs, index)
info.setdefault('units', '%')
info.setdefault('standard_name',
'toa_bidirectional_reflectance')
elif key.calibration == 'radiance':
projectable = calibrate_radiance(array, var_attrs, index)
info.setdefault('units', var_attrs.get('radiance_units'))
info.setdefault('standard_name',
'toa_outgoing_radiance_per_unit_wavelength')
elif key.calibration == 'counts':
projectable = calibrate_counts(array, var_attrs, index)
info.setdefault('units', 'counts')
info.setdefault('standard_name', 'counts') # made up
else:
raise ValueError("Unknown calibration for "
"key: {}".format(key))
projectable.attrs = info
# if ((platform_name == 'Aqua' and key.name in ["6", "27", "36"]) or
# (platform_name == 'Terra' and key.name in ["29"])):
# height, width = projectable.shape
# row_indices = projectable.mask.sum(1) == width
# if row_indices.sum() != height:
# projectable.mask[row_indices, :] = True
# Get the orbit number
# if not satscene.orbit:
# mda = self.data.attributes()["CoreMetadata.0"]
# orbit_idx = mda.index("ORBITNUMBER")
# satscene.orbit = mda[orbit_idx + 111:orbit_idx + 116]
# Trimming out dead sensor lines (detectors) on terra:
# (in addition channel 27, 30, 34, 35, and 36 are nosiy)
# if satscene.satname == "terra":
# for band in ["29"]:
# if not satscene[band].is_loaded() or satscene[band].data.mask.all():
# continue
# width = satscene[band].data.shape[1]
# height = satscene[band].data.shape[0]
# indices = satscene[band].data.mask.sum(1) < width
# if indices.sum() == height:
# continue
# satscene[band] = satscene[band].data[indices, :]
# satscene[band].area = geometry.SwathDefinition(
# lons=satscene[band].area.lons[indices, :],
# lats=satscene[band].area.lats[indices, :])
return projectable | Read data from file and return the corresponding projectables. | Below is the the instruction that describes the task:
### Input:
Read data from file and return the corresponding projectables.
### Response:
def get_dataset(self, key, info):
"""Read data from file and return the corresponding projectables."""
datadict = {
1000: ['EV_250_Aggr1km_RefSB',
'EV_500_Aggr1km_RefSB',
'EV_1KM_RefSB',
'EV_1KM_Emissive'],
500: ['EV_250_Aggr500_RefSB',
'EV_500_RefSB'],
250: ['EV_250_RefSB']}
platform_name = self.metadata['INVENTORYMETADATA']['ASSOCIATEDPLATFORMINSTRUMENTSENSOR'][
'ASSOCIATEDPLATFORMINSTRUMENTSENSORCONTAINER']['ASSOCIATEDPLATFORMSHORTNAME']['VALUE']
info.update({'platform_name': 'EOS-' + platform_name})
info.update({'sensor': 'modis'})
if self.resolution != key.resolution:
return
datasets = datadict[self.resolution]
for dataset in datasets:
subdata = self.sd.select(dataset)
var_attrs = subdata.attributes()
band_names = var_attrs["band_names"].split(",")
# get the relative indices of the desired channel
try:
index = band_names.index(key.name)
except ValueError:
continue
uncertainty = self.sd.select(dataset + "_Uncert_Indexes")
array = xr.DataArray(from_sds(subdata, chunks=CHUNK_SIZE)[index, :, :],
dims=['y', 'x']).astype(np.float32)
valid_range = var_attrs['valid_range']
# Fill values:
# Data Value Meaning
# 65535 Fill Value (includes reflective band data at night mode
# and completely missing L1A scans)
# 65534 L1A DN is missing within a scan
# 65533 Detector is saturated
# 65532 Cannot compute zero point DN, e.g., SV is saturated
# 65531 Detector is dead (see comments below)
# 65530 RSB dn** below the minimum of the scaling range
# 65529 TEB radiance or RSB dn** exceeds the maximum of the
# scaling range
# 65528 Aggregation algorithm failure
# 65527 Rotation of Earth view Sector from nominal science
# collection position
# 65526 Calibration coefficient b1 could not be computed
# 65525 Subframe is dead
# 65524 Both sides of the PCLW electronics on simultaneously
# 65501 - 65523 (reserved for future use)
# 65500 NAD closed upper limit
array = array.where(array >= np.float32(valid_range[0]))
array = array.where(array <= np.float32(valid_range[1]))
array = array.where(from_sds(uncertainty, chunks=CHUNK_SIZE)[index, :, :] < 15)
if key.calibration == 'brightness_temperature':
projectable = calibrate_bt(array, var_attrs, index, key.name)
info.setdefault('units', 'K')
info.setdefault('standard_name', 'toa_brightness_temperature')
elif key.calibration == 'reflectance':
projectable = calibrate_refl(array, var_attrs, index)
info.setdefault('units', '%')
info.setdefault('standard_name',
'toa_bidirectional_reflectance')
elif key.calibration == 'radiance':
projectable = calibrate_radiance(array, var_attrs, index)
info.setdefault('units', var_attrs.get('radiance_units'))
info.setdefault('standard_name',
'toa_outgoing_radiance_per_unit_wavelength')
elif key.calibration == 'counts':
projectable = calibrate_counts(array, var_attrs, index)
info.setdefault('units', 'counts')
info.setdefault('standard_name', 'counts') # made up
else:
raise ValueError("Unknown calibration for "
"key: {}".format(key))
projectable.attrs = info
# if ((platform_name == 'Aqua' and key.name in ["6", "27", "36"]) or
# (platform_name == 'Terra' and key.name in ["29"])):
# height, width = projectable.shape
# row_indices = projectable.mask.sum(1) == width
# if row_indices.sum() != height:
# projectable.mask[row_indices, :] = True
# Get the orbit number
# if not satscene.orbit:
# mda = self.data.attributes()["CoreMetadata.0"]
# orbit_idx = mda.index("ORBITNUMBER")
# satscene.orbit = mda[orbit_idx + 111:orbit_idx + 116]
# Trimming out dead sensor lines (detectors) on terra:
# (in addition channel 27, 30, 34, 35, and 36 are nosiy)
# if satscene.satname == "terra":
# for band in ["29"]:
# if not satscene[band].is_loaded() or satscene[band].data.mask.all():
# continue
# width = satscene[band].data.shape[1]
# height = satscene[band].data.shape[0]
# indices = satscene[band].data.mask.sum(1) < width
# if indices.sum() == height:
# continue
# satscene[band] = satscene[band].data[indices, :]
# satscene[band].area = geometry.SwathDefinition(
# lons=satscene[band].area.lons[indices, :],
# lats=satscene[band].area.lats[indices, :])
return projectable |
def _read_by_weight(self, F, att_weights, value):
"""Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
"""
output = F.batch_dot(att_weights, value)
return output | Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim) | Below is the the instruction that describes the task:
### Input:
Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
### Response:
def _read_by_weight(self, F, att_weights, value):
"""Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
"""
output = F.batch_dot(att_weights, value)
return output |
def for_meters(cls, meter_x, meter_y, zoom):
"""Creates a tile from X Y meters in Spherical Mercator EPSG:900913"""
point = Point.from_meters(meter_x=meter_x, meter_y=meter_y)
pixel_x, pixel_y = point.pixels(zoom=zoom)
return cls.for_pixels(pixel_x=pixel_x, pixel_y=pixel_y, zoom=zoom) | Creates a tile from X Y meters in Spherical Mercator EPSG:900913 | Below is the the instruction that describes the task:
### Input:
Creates a tile from X Y meters in Spherical Mercator EPSG:900913
### Response:
def for_meters(cls, meter_x, meter_y, zoom):
"""Creates a tile from X Y meters in Spherical Mercator EPSG:900913"""
point = Point.from_meters(meter_x=meter_x, meter_y=meter_y)
pixel_x, pixel_y = point.pixels(zoom=zoom)
return cls.for_pixels(pixel_x=pixel_x, pixel_y=pixel_y, zoom=zoom) |
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp | Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered | Below is the the instruction that describes the task:
### Input:
Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
### Response:
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp |
def contains(self, string):
"""Summary
Returns:
TYPE: Description
"""
# Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type") | Summary
Returns:
TYPE: Description | Below is the the instruction that describes the task:
### Input:
Summary
Returns:
TYPE: Description
### Response:
def contains(self, string):
"""Summary
Returns:
TYPE: Description
"""
# Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type") |
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg) | Flask's flash if available, logging call if not | Below is the the instruction that describes the task:
### Input:
Flask's flash if available, logging call if not
### Response:
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg) |
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section | Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found. | Below is the the instruction that describes the task:
### Input:
Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
### Response:
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section |
def url(self):
"""
Returns the rendered URL of the chart
"""
self.render()
return self._apiurl + '&'.join(self._parts()).replace(' ','+') | Returns the rendered URL of the chart | Below is the the instruction that describes the task:
### Input:
Returns the rendered URL of the chart
### Response:
def url(self):
"""
Returns the rendered URL of the chart
"""
self.render()
return self._apiurl + '&'.join(self._parts()).replace(' ','+') |
def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id | Below is the the instruction that describes the task:
### Input:
Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
### Response:
def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) |
def train(self):
"""Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
"""
self.stamp_start = time.time()
for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),
desc='train', total=self.max_iter,
ncols=80):
self.epoch = self.iter_train.epoch
self.iteration = iteration
############
# validate #
############
if self.interval_validate and \
self.iteration % self.interval_validate == 0:
self.validate()
#########
# train #
#########
batch = map(datasets.transform_lsvrc2012_vgg16, batch)
in_vars = utils.batch_to_vars(batch, device=self.device)
self.model.zerograds()
loss = self.model(*in_vars)
if loss is not None:
loss.backward()
self.optimizer.update()
lbl_true = zip(*batch)[1]
lbl_pred = chainer.functions.argmax(self.model.score, axis=1)
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
acc = utils.label_accuracy_score(
lbl_true, lbl_pred, self.model.n_class)
self._write_log(**{
'epoch': self.epoch,
'iteration': self.iteration,
'elapsed_time': time.time() - self.stamp_start,
'train/loss': float(loss.data),
'train/acc': acc[0],
'train/acc_cls': acc[1],
'train/mean_iu': acc[2],
'train/fwavacc': acc[3],
})
if iteration >= self.max_iter:
self._save_model()
break | Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
### Response:
def train(self):
"""Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
"""
self.stamp_start = time.time()
for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),
desc='train', total=self.max_iter,
ncols=80):
self.epoch = self.iter_train.epoch
self.iteration = iteration
############
# validate #
############
if self.interval_validate and \
self.iteration % self.interval_validate == 0:
self.validate()
#########
# train #
#########
batch = map(datasets.transform_lsvrc2012_vgg16, batch)
in_vars = utils.batch_to_vars(batch, device=self.device)
self.model.zerograds()
loss = self.model(*in_vars)
if loss is not None:
loss.backward()
self.optimizer.update()
lbl_true = zip(*batch)[1]
lbl_pred = chainer.functions.argmax(self.model.score, axis=1)
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
acc = utils.label_accuracy_score(
lbl_true, lbl_pred, self.model.n_class)
self._write_log(**{
'epoch': self.epoch,
'iteration': self.iteration,
'elapsed_time': time.time() - self.stamp_start,
'train/loss': float(loss.data),
'train/acc': acc[0],
'train/acc_cls': acc[1],
'train/mean_iu': acc[2],
'train/fwavacc': acc[3],
})
if iteration >= self.max_iter:
self._save_model()
break |
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters | ?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax. | Below is the the instruction that describes the task:
### Input:
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
### Response:
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters |
def create_blueprint(endpoints):
"""Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint.
"""
blueprint = Blueprint(
'invenio_records_ui',
__name__,
url_prefix='',
template_folder='templates',
static_folder='static',
)
@blueprint.errorhandler(PIDDeletedError)
def tombstone_errorhandler(error):
return render_template(
current_app.config['RECORDS_UI_TOMBSTONE_TEMPLATE'],
pid=error.pid,
record=error.record or {},
), 410
@blueprint.context_processor
def inject_export_formats():
return dict(
export_formats=(
current_app.extensions['invenio-records-ui'].export_formats)
)
for endpoint, options in (endpoints or {}).items():
blueprint.add_url_rule(**create_url_rule(endpoint, **options))
return blueprint | Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint. | Below is the the instruction that describes the task:
### Input:
Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint.
### Response:
def create_blueprint(endpoints):
"""Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint.
"""
blueprint = Blueprint(
'invenio_records_ui',
__name__,
url_prefix='',
template_folder='templates',
static_folder='static',
)
@blueprint.errorhandler(PIDDeletedError)
def tombstone_errorhandler(error):
return render_template(
current_app.config['RECORDS_UI_TOMBSTONE_TEMPLATE'],
pid=error.pid,
record=error.record or {},
), 410
@blueprint.context_processor
def inject_export_formats():
return dict(
export_formats=(
current_app.extensions['invenio-records-ui'].export_formats)
)
for endpoint, options in (endpoints or {}).items():
blueprint.add_url_rule(**create_url_rule(endpoint, **options))
return blueprint |
def stop(self):
""" Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop.
"""
if not self._providers_registered:
self.queue_consumer.unregister_provider(self)
self._unregistered_from_queue_consumer.send(True) | Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop. | Below is the the instruction that describes the task:
### Input:
Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop.
### Response:
def stop(self):
""" Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop.
"""
if not self._providers_registered:
self.queue_consumer.unregister_provider(self)
self._unregistered_from_queue_consumer.send(True) |
def get_object_or_child_by_type(self, *types):
""" Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types.
"""
objects = self.get_objects_or_children_by_type(*types)
return objects[0] if any(objects) else None | Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types. | Below is the the instruction that describes the task:
### Input:
Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types.
### Response:
def get_object_or_child_by_type(self, *types):
""" Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types.
"""
objects = self.get_objects_or_children_by_type(*types)
return objects[0] if any(objects) else None |
def answer(part, module='mlai2014.json'):
"""Returns the answers to the lab classes."""
marks = json.load(open(os.path.join(data_directory, module), 'rb'))
return marks['Lab ' + str(part+1)] | Returns the answers to the lab classes. | Below is the the instruction that describes the task:
### Input:
Returns the answers to the lab classes.
### Response:
def answer(part, module='mlai2014.json'):
"""Returns the answers to the lab classes."""
marks = json.load(open(os.path.join(data_directory, module), 'rb'))
return marks['Lab ' + str(part+1)] |
def import_medusa_data(mat_filename, config_file):
"""Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
-------
"""
df_emd, df_md = _read_mat_mnu0(mat_filename)
# 'configs' can be a numpy array or a filename
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
# construct four-point measurements via superposition
print('constructing four-point measurements')
quadpole_list = []
if df_emd is not None:
index = 0
for Ar, Br, M, N in configs:
# print('constructing', Ar, Br, M, N)
# the order of A and B doesn't concern us
A = np.min((Ar, Br))
B = np.max((Ar, Br))
# first choice: correct ordering
query_M = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, M
))
query_N = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, N
))
if query_M.size == 0 or query_N.size == 0:
continue
index += 1
# keep these columns as they are (no subtracting)
keep_cols = [
'datetime',
'frequency',
'a', 'b',
'Zg1', 'Zg2', 'Zg3',
'Is',
'Il',
'Zg',
'Iab',
]
df4 = pd.DataFrame()
diff_cols = ['Zt', ]
df4[keep_cols] = query_M[keep_cols]
for col in diff_cols:
df4[col] = query_M[col].values - query_N[col].values
df4['m'] = query_M['p'].values
df4['n'] = query_N['p'].values
quadpole_list.append(df4)
if quadpole_list:
dfn = pd.concat(quadpole_list)
Rsign = np.sign(dfn['Zt'].real)
dfn['r'] = Rsign * np.abs(dfn['Zt'])
dfn['Vmn'] = dfn['r'] * dfn['Iab']
dfn['rpha'] = np.arctan2(
np.imag(dfn['Zt'].values),
np.real(dfn['Zt'].values)
) * 1e3
else:
dfn = pd.DataFrame()
return dfn, df_md | Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
------- | Below is the the instruction that describes the task:
### Input:
Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
-------
### Response:
def import_medusa_data(mat_filename, config_file):
"""Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
-------
"""
df_emd, df_md = _read_mat_mnu0(mat_filename)
# 'configs' can be a numpy array or a filename
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
# construct four-point measurements via superposition
print('constructing four-point measurements')
quadpole_list = []
if df_emd is not None:
index = 0
for Ar, Br, M, N in configs:
# print('constructing', Ar, Br, M, N)
# the order of A and B doesn't concern us
A = np.min((Ar, Br))
B = np.max((Ar, Br))
# first choice: correct ordering
query_M = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, M
))
query_N = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, N
))
if query_M.size == 0 or query_N.size == 0:
continue
index += 1
# keep these columns as they are (no subtracting)
keep_cols = [
'datetime',
'frequency',
'a', 'b',
'Zg1', 'Zg2', 'Zg3',
'Is',
'Il',
'Zg',
'Iab',
]
df4 = pd.DataFrame()
diff_cols = ['Zt', ]
df4[keep_cols] = query_M[keep_cols]
for col in diff_cols:
df4[col] = query_M[col].values - query_N[col].values
df4['m'] = query_M['p'].values
df4['n'] = query_N['p'].values
quadpole_list.append(df4)
if quadpole_list:
dfn = pd.concat(quadpole_list)
Rsign = np.sign(dfn['Zt'].real)
dfn['r'] = Rsign * np.abs(dfn['Zt'])
dfn['Vmn'] = dfn['r'] * dfn['Iab']
dfn['rpha'] = np.arctan2(
np.imag(dfn['Zt'].values),
np.real(dfn['Zt'].values)
) * 1e3
else:
dfn = pd.DataFrame()
return dfn, df_md |
def escape(identifier, ansi_quotes, should_quote):
"""
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
"""
if not should_quote(identifier):
return identifier
quote = '"' if ansi_quotes else '`'
identifier = identifier.replace(quote, 2*quote)
return '{0}{1}{2}'.format(quote, identifier, quote) | Escape identifiers.
ANSI uses single quotes, but many databases use back quotes. | Below is the the instruction that describes the task:
### Input:
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
### Response:
def escape(identifier, ansi_quotes, should_quote):
"""
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
"""
if not should_quote(identifier):
return identifier
quote = '"' if ansi_quotes else '`'
identifier = identifier.replace(quote, 2*quote)
return '{0}{1}{2}'.format(quote, identifier, quote) |
def is_valid_ipv4 (ip):
"""
Return True if given ip is a valid IPv4 address.
"""
if not _ipv4_re.match(ip):
return False
a, b, c, d = [int(i) for i in ip.split(".")]
return a <= 255 and b <= 255 and c <= 255 and d <= 255 | Return True if given ip is a valid IPv4 address. | Below is the the instruction that describes the task:
### Input:
Return True if given ip is a valid IPv4 address.
### Response:
def is_valid_ipv4 (ip):
"""
Return True if given ip is a valid IPv4 address.
"""
if not _ipv4_re.match(ip):
return False
a, b, c, d = [int(i) for i in ip.split(".")]
return a <= 255 and b <= 255 and c <= 255 and d <= 255 |
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId='Api')
return response['StackResourceDetail'].get('PhysicalResourceId', None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response['items']:
if item['name'] == lambda_name:
return item['id']
logger.exception('Could not get API ID.')
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None | Given a lambda_name, return the API id. | Below is the the instruction that describes the task:
### Input:
Given a lambda_name, return the API id.
### Response:
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId='Api')
return response['StackResourceDetail'].get('PhysicalResourceId', None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response['items']:
if item['name'] == lambda_name:
return item['id']
logger.exception('Could not get API ID.')
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None |
def fromdict(dict):
"""Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert
"""
index = dict['index']
seed = hb_decode(dict['seed'])
n = dict['n']
root = hb_decode(dict['root'])
hmac = hb_decode(dict['hmac'])
timestamp = dict['timestamp']
self = State(index, seed, n, root, hmac, timestamp)
return self | Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert | Below is the the instruction that describes the task:
### Input:
Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert
### Response:
def fromdict(dict):
"""Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert
"""
index = dict['index']
seed = hb_decode(dict['seed'])
n = dict['n']
root = hb_decode(dict['root'])
hmac = hb_decode(dict['hmac'])
timestamp = dict['timestamp']
self = State(index, seed, n, root, hmac, timestamp)
return self |
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[],
policies=[], dashboards=[], credentials=[], description=''):
'''group_add name, restrict, repos
'''
return self.raw_query('group', 'add', data={
'lces': [{'id': i} for i in lces],
'assets': [{'id': i} for i in assets],
'queries': [{'id': i} for i in queries],
'policies': [{'id': i} for i in policies],
'dashboardTabs': [{'id': i} for i in dashboards],
'credentials': [{'id': i} for i in credentials],
'repositories': [{'id': i} for i in repos],
'definingAssets': [{'id': i} for i in restrict],
'name': name,
'description': description,
'users': [],
'context': ''
}) | group_add name, restrict, repos | Below is the the instruction that describes the task:
### Input:
group_add name, restrict, repos
### Response:
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[],
policies=[], dashboards=[], credentials=[], description=''):
'''group_add name, restrict, repos
'''
return self.raw_query('group', 'add', data={
'lces': [{'id': i} for i in lces],
'assets': [{'id': i} for i in assets],
'queries': [{'id': i} for i in queries],
'policies': [{'id': i} for i in policies],
'dashboardTabs': [{'id': i} for i in dashboards],
'credentials': [{'id': i} for i in credentials],
'repositories': [{'id': i} for i in repos],
'definingAssets': [{'id': i} for i in restrict],
'name': name,
'description': description,
'users': [],
'context': ''
}) |
def verify_connectivity(config):
"""
Verify connectivity to satellite server
"""
logger.debug("Verifying Connectivity")
ic = InsightsConnection(config)
try:
branch_info = ic.get_branch_info()
except requests.ConnectionError as e:
logger.debug(e)
logger.debug("Failed to connect to satellite")
return False
except LookupError as e:
logger.debug(e)
logger.debug("Failed to parse response from satellite")
return False
try:
remote_leaf = branch_info['remote_leaf']
return remote_leaf
except LookupError as e:
logger.debug(e)
logger.debug("Failed to find accurate branch_info")
return False | Verify connectivity to satellite server | Below is the the instruction that describes the task:
### Input:
Verify connectivity to satellite server
### Response:
def verify_connectivity(config):
"""
Verify connectivity to satellite server
"""
logger.debug("Verifying Connectivity")
ic = InsightsConnection(config)
try:
branch_info = ic.get_branch_info()
except requests.ConnectionError as e:
logger.debug(e)
logger.debug("Failed to connect to satellite")
return False
except LookupError as e:
logger.debug(e)
logger.debug("Failed to parse response from satellite")
return False
try:
remote_leaf = branch_info['remote_leaf']
return remote_leaf
except LookupError as e:
logger.debug(e)
logger.debug("Failed to find accurate branch_info")
return False |
def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
if hasattr(resp,'text'):
# modern requests will do this for us
text = resp.text # this is unicode(py2)/str(py3)
else:
encoding = requests.utils.get_encoding_from_headers(resp.headers)
if encoding is None:
encoding='utf-8' # FIXME: what to do here?
if sys.version_info[0]==2:
text = unicode(resp.content, encoding, errors='replace')
else:
assert sys.version_info[0]==3
text = str(resp.content, encoding, errors='replace')
p.feed(text)
p.close()
return u.close() | Parse the xmlrpc response. | Below is the the instruction that describes the task:
### Input:
Parse the xmlrpc response.
### Response:
def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
if hasattr(resp,'text'):
# modern requests will do this for us
text = resp.text # this is unicode(py2)/str(py3)
else:
encoding = requests.utils.get_encoding_from_headers(resp.headers)
if encoding is None:
encoding='utf-8' # FIXME: what to do here?
if sys.version_info[0]==2:
text = unicode(resp.content, encoding, errors='replace')
else:
assert sys.version_info[0]==3
text = str(resp.content, encoding, errors='replace')
p.feed(text)
p.close()
return u.close() |
def validate_path(xj_path):
"""Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
"""
if not isinstance(xj_path, str):
raise XJPathError('XJPath must be a string')
for path in split(xj_path, '.'):
if path == '*':
continue
if path.startswith('@'):
if path == '@first' or path == '@last':
continue
try:
int(path[1:])
except ValueError:
raise XJPathError('Array index must be either integer or '
'@first or @last') | Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails. | Below is the the instruction that describes the task:
### Input:
Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
### Response:
def validate_path(xj_path):
"""Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
"""
if not isinstance(xj_path, str):
raise XJPathError('XJPath must be a string')
for path in split(xj_path, '.'):
if path == '*':
continue
if path.startswith('@'):
if path == '@first' or path == '@last':
continue
try:
int(path[1:])
except ValueError:
raise XJPathError('Array index must be either integer or '
'@first or @last') |
def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry)) | Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file. | Below is the the instruction that describes the task:
### Input:
Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
### Response:
def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry)) |
def find_enclosing_bracket_left(self, left_ch, right_ch, start_pos=None):
"""
Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position.
"""
if self.current_char == left_ch:
return 0
if start_pos is None:
start_pos = 0
else:
start_pos = max(0, start_pos)
stack = 1
# Look backward.
for i in range(self.cursor_position - 1, start_pos - 1, -1):
c = self.text[i]
if c == right_ch:
stack += 1
elif c == left_ch:
stack -= 1
if stack == 0:
return i - self.cursor_position | Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position. | Below is the the instruction that describes the task:
### Input:
Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position.
### Response:
def find_enclosing_bracket_left(self, left_ch, right_ch, start_pos=None):
"""
Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position.
"""
if self.current_char == left_ch:
return 0
if start_pos is None:
start_pos = 0
else:
start_pos = max(0, start_pos)
stack = 1
# Look backward.
for i in range(self.cursor_position - 1, start_pos - 1, -1):
c = self.text[i]
if c == right_ch:
stack += 1
elif c == left_ch:
stack -= 1
if stack == 0:
return i - self.cursor_position |
def cancel(batch_fn, cancel_fn, ops):
"""Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# Canceling many operations one-by-one can be slow.
# The Pipelines API doesn't directly support a list of operations to cancel,
# but the requests can be performed in batch.
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages | Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages. | Below is the the instruction that describes the task:
### Input:
Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
### Response:
def cancel(batch_fn, cancel_fn, ops):
"""Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# Canceling many operations one-by-one can be slow.
# The Pipelines API doesn't directly support a list of operations to cancel,
# but the requests can be performed in batch.
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages |
def get_or_create(name=None, group=None, config=None, extra=0, verbose=0, backend_opts=None):
"""
Creates a virtual machine instance.
"""
require('vm_type', 'vm_group')
backend_opts = backend_opts or {}
verbose = int(verbose)
extra = int(extra)
if config:
config_fn = common.find_template(config)
config = yaml.load(open(config_fn))
env.update(config)
env.vm_type = (env.vm_type or '').lower()
assert env.vm_type, 'No VM type specified.'
group = group or env.vm_group
assert group, 'No VM group specified.'
ret = exists(name=name, group=group)
if not extra and ret:
if verbose:
print('VM %s:%s exists.' % (name, group))
return ret
today = datetime.date.today()
release = int('%i%02i%02i' % (today.year, today.month, today.day))
if not name:
existing_instances = list_instances(
group=group,
release=release,
verbose=verbose)
name = env.vm_name_template.format(index=len(existing_instances)+1)
if env.vm_type == EC2:
return get_or_create_ec2_instance(
name=name,
group=group,
release=release,
verbose=verbose,
backend_opts=backend_opts)
else:
raise NotImplementedError | Creates a virtual machine instance. | Below is the the instruction that describes the task:
### Input:
Creates a virtual machine instance.
### Response:
def get_or_create(name=None, group=None, config=None, extra=0, verbose=0, backend_opts=None):
"""
Creates a virtual machine instance.
"""
require('vm_type', 'vm_group')
backend_opts = backend_opts or {}
verbose = int(verbose)
extra = int(extra)
if config:
config_fn = common.find_template(config)
config = yaml.load(open(config_fn))
env.update(config)
env.vm_type = (env.vm_type or '').lower()
assert env.vm_type, 'No VM type specified.'
group = group or env.vm_group
assert group, 'No VM group specified.'
ret = exists(name=name, group=group)
if not extra and ret:
if verbose:
print('VM %s:%s exists.' % (name, group))
return ret
today = datetime.date.today()
release = int('%i%02i%02i' % (today.year, today.month, today.day))
if not name:
existing_instances = list_instances(
group=group,
release=release,
verbose=verbose)
name = env.vm_name_template.format(index=len(existing_instances)+1)
if env.vm_type == EC2:
return get_or_create_ec2_instance(
name=name,
group=group,
release=release,
verbose=verbose,
backend_opts=backend_opts)
else:
raise NotImplementedError |
def SG(self):
r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596
'''
phase = self.phase
if phase == 'l':
return self.SGl
elif phase == 's':
return self.SGs
elif phase == 'g':
return self.SGg
rho = self.rho
if rho is not None:
return SG(rho)
return None | r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596 | Below is the the instruction that describes the task:
### Input:
r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596
### Response:
def SG(self):
r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596
'''
phase = self.phase
if phase == 'l':
return self.SGl
elif phase == 's':
return self.SGs
elif phase == 'g':
return self.SGg
rho = self.rho
if rho is not None:
return SG(rho)
return None |
def xy(self):
"""CIE xy color space coordinates as array [x, y] of real values (0..1)."""
if self._xy != (None, None):
self._x, self._y = self._xy
if self._x is not None and self._y is not None:
x = self._x
if self._x > 1:
x = self._x / 65555
y = self._y
if self._y > 1:
y = self._y / 65555
return (x, y)
return None | CIE xy color space coordinates as array [x, y] of real values (0..1). | Below is the the instruction that describes the task:
### Input:
CIE xy color space coordinates as array [x, y] of real values (0..1).
### Response:
def xy(self):
"""CIE xy color space coordinates as array [x, y] of real values (0..1)."""
if self._xy != (None, None):
self._x, self._y = self._xy
if self._x is not None and self._y is not None:
x = self._x
if self._x > 1:
x = self._x / 65555
y = self._y
if self._y > 1:
y = self._y / 65555
return (x, y)
return None |
def _client_run(self):
"""MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
# If no messages are coming through, back off a little to keep CPU use low.
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True | MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
### Response:
def _client_run(self):
"""MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
# If no messages are coming through, back off a little to keep CPU use low.
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True |
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
# This is useful when debugging in an active interpreter (otherwise,
# the debugger will stop before reaching the target file)
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value) | This function is called when a return trap is set here. | Below is the the instruction that describes the task:
### Input:
This function is called when a return trap is set here.
### Response:
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
# This is useful when debugging in an active interpreter (otherwise,
# the debugger will stop before reaching the target file)
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value) |
def get_data_path(cls):
"""
Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath
"""
marvin_path = os.environ.get(cls._key)
if not marvin_path:
raise InvalidConfigException('Data path not set!')
is_path_created = check_path(marvin_path, create=True)
if not is_path_created:
raise InvalidConfigException('Data path does not exist!')
return marvin_path | Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath | Below is the the instruction that describes the task:
### Input:
Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath
### Response:
def get_data_path(cls):
"""
Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath
"""
marvin_path = os.environ.get(cls._key)
if not marvin_path:
raise InvalidConfigException('Data path not set!')
is_path_created = check_path(marvin_path, create=True)
if not is_path_created:
raise InvalidConfigException('Data path does not exist!')
return marvin_path |
def _parse_request_reply(self):
"waiting for a reply to our request"
# we need at least 6 bytes of data: 4 for the "header", such
# as it is, and 2 more if it's DOMAINNAME (for the size) or 4
# or 16 more if it's an IPv4/6 address reply. plus there's 2
# bytes on the end for the bound port.
if len(self._data) < 8:
return
msg = self._data[:4]
# not changing self._data yet, in case we've not got
# enough bytes so far.
(version, reply, _, typ) = struct.unpack('BBBB', msg)
if version != 5:
self.reply_error(SocksError(
"Expected version 5, got {}".format(version)))
return
if reply != self.SUCCEEDED:
self.reply_error(_create_socks_error(reply))
return
reply_dispatcher = {
self.REPLY_IPV4: self._parse_ipv4_reply,
self.REPLY_HOST: self._parse_domain_name_reply,
self.REPLY_IPV6: self._parse_ipv6_reply,
}
try:
method = reply_dispatcher[typ]
except KeyError:
self.reply_error(SocksError(
"Unexpected response type {}".format(typ)))
return
method() | waiting for a reply to our request | Below is the the instruction that describes the task:
### Input:
waiting for a reply to our request
### Response:
def _parse_request_reply(self):
"waiting for a reply to our request"
# we need at least 6 bytes of data: 4 for the "header", such
# as it is, and 2 more if it's DOMAINNAME (for the size) or 4
# or 16 more if it's an IPv4/6 address reply. plus there's 2
# bytes on the end for the bound port.
if len(self._data) < 8:
return
msg = self._data[:4]
# not changing self._data yet, in case we've not got
# enough bytes so far.
(version, reply, _, typ) = struct.unpack('BBBB', msg)
if version != 5:
self.reply_error(SocksError(
"Expected version 5, got {}".format(version)))
return
if reply != self.SUCCEEDED:
self.reply_error(_create_socks_error(reply))
return
reply_dispatcher = {
self.REPLY_IPV4: self._parse_ipv4_reply,
self.REPLY_HOST: self._parse_domain_name_reply,
self.REPLY_IPV6: self._parse_ipv6_reply,
}
try:
method = reply_dispatcher[typ]
except KeyError:
self.reply_error(SocksError(
"Unexpected response type {}".format(typ)))
return
method() |
def bold(*content, sep=' '):
"""
Make bold text (Markdown)
:param content:
:param sep:
:return:
"""
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[0]) | Make bold text (Markdown)
:param content:
:param sep:
:return: | Below is the the instruction that describes the task:
### Input:
Make bold text (Markdown)
:param content:
:param sep:
:return:
### Response:
def bold(*content, sep=' '):
"""
Make bold text (Markdown)
:param content:
:param sep:
:return:
"""
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[0]) |
def matplotlib_to_ginga_cmap(cm, name=None):
"""Convert matplotlib colormap to Ginga's."""
if name is None:
name = cm.name
arr = cm(np.arange(0, min_cmap_len) / np.float(min_cmap_len - 1))
clst = arr[:, 0:3]
return ColorMap(name, clst) | Convert matplotlib colormap to Ginga's. | Below is the the instruction that describes the task:
### Input:
Convert matplotlib colormap to Ginga's.
### Response:
def matplotlib_to_ginga_cmap(cm, name=None):
"""Convert matplotlib colormap to Ginga's."""
if name is None:
name = cm.name
arr = cm(np.arange(0, min_cmap_len) / np.float(min_cmap_len - 1))
clst = arr[:, 0:3]
return ColorMap(name, clst) |
def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.",
{'host': host})
cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.",
{'host': host})
# register the metric collectors
registry.register(ram_metric)
registry.register(cpu_metric)
# Start gathering metrics every second
while True:
time.sleep(1)
# Add ram metrics
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
ram_metric.set({'type': "virtual", }, ram.used)
ram_metric.set({'type': "virtual", 'status': "cached"}, ram.cached)
ram_metric.set({'type': "swap"}, swap.used)
# Add cpu metrics
for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
cpu_metric.set({'core': c}, p) | Gathers the metrics | Below is the the instruction that describes the task:
### Input:
Gathers the metrics
### Response:
def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.",
{'host': host})
cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.",
{'host': host})
# register the metric collectors
registry.register(ram_metric)
registry.register(cpu_metric)
# Start gathering metrics every second
while True:
time.sleep(1)
# Add ram metrics
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
ram_metric.set({'type': "virtual", }, ram.used)
ram_metric.set({'type': "virtual", 'status': "cached"}, ram.cached)
ram_metric.set({'type': "swap"}, swap.used)
# Add cpu metrics
for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
cpu_metric.set({'core': c}, p) |
def status(self, code=None):
"""Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
"""
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self | Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}") | Below is the the instruction that describes the task:
### Input:
Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
### Response:
def status(self, code=None):
"""Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
"""
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self |
def get_ip_interface_output_interface_ip_address_ipv4(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ip_address = ET.SubElement(interface, "ip-address")
ipv4 = ET.SubElement(ip_address, "ipv4")
ipv4.text = kwargs.pop('ipv4')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_ip_interface_output_interface_ip_address_ipv4(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ip_address = ET.SubElement(interface, "ip-address")
ipv4 = ET.SubElement(ip_address, "ipv4")
ipv4.text = kwargs.pop('ipv4')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def rename(args):
"""Supply two names: Existing instance name or ID, and new name to assign to the instance."""
old_name, new_name = args.names
add_tags(resources.ec2.Instance(resolve_instance_id(old_name)), Name=new_name, dry_run=args.dry_run) | Supply two names: Existing instance name or ID, and new name to assign to the instance. | Below is the the instruction that describes the task:
### Input:
Supply two names: Existing instance name or ID, and new name to assign to the instance.
### Response:
def rename(args):
"""Supply two names: Existing instance name or ID, and new name to assign to the instance."""
old_name, new_name = args.names
add_tags(resources.ec2.Instance(resolve_instance_id(old_name)), Name=new_name, dry_run=args.dry_run) |
def _check_fact_ref_eval(cls, cpel_dom):
"""
Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error
"""
CHECK_SYSTEM = "check-system"
CHECK_LOCATION = "check-location"
CHECK_ID = "check-id"
checksystemID = cpel_dom.getAttribute(CHECK_SYSTEM)
if (checksystemID == "http://oval.mitre.org/XMLSchema/ovaldefinitions-5"):
# Perform an OVAL check.
# First attribute is the URI of an OVAL definitions file.
# Second attribute is an OVAL definition ID.
return CPELanguage2_3._ovalcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
if (checksystemID == "http://scap.nist.gov/schema/ocil/2"):
# Perform an OCIL check.
# First attribute is the URI of an OCIL questionnaire file.
# Second attribute is OCIL questionnaire ID.
return CPELanguage2_3._ocilcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
# Can add additional check systems here, with each returning a
# True, False, or Error value
return False | Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error | Below is the the instruction that describes the task:
### Input:
Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error
### Response:
def _check_fact_ref_eval(cls, cpel_dom):
"""
Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error
"""
CHECK_SYSTEM = "check-system"
CHECK_LOCATION = "check-location"
CHECK_ID = "check-id"
checksystemID = cpel_dom.getAttribute(CHECK_SYSTEM)
if (checksystemID == "http://oval.mitre.org/XMLSchema/ovaldefinitions-5"):
# Perform an OVAL check.
# First attribute is the URI of an OVAL definitions file.
# Second attribute is an OVAL definition ID.
return CPELanguage2_3._ovalcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
if (checksystemID == "http://scap.nist.gov/schema/ocil/2"):
# Perform an OCIL check.
# First attribute is the URI of an OCIL questionnaire file.
# Second attribute is OCIL questionnaire ID.
return CPELanguage2_3._ocilcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
# Can add additional check systems here, with each returning a
# True, False, or Error value
return False |
def deserialize(self, data, fields=None):
"""
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
"""
# TODO: it can be that we returned data by `get_lines`, that is already deserialized
if not isinstance(data, (bytes, bytearray)):
return data
return msgpack.unpackb(data, encoding='utf-8', object_pairs_hook=decode_to_sorted) | Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict | Below is the the instruction that describes the task:
### Input:
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
### Response:
def deserialize(self, data, fields=None):
"""
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
"""
# TODO: it can be that we returned data by `get_lines`, that is already deserialized
if not isinstance(data, (bytes, bytearray)):
return data
return msgpack.unpackb(data, encoding='utf-8', object_pairs_hook=decode_to_sorted) |