The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: CastError Message: Couldn't cast code: string language: string original_docstring: string AST_depth: int64 alphanumeric_fraction: double max_line_length: int64 avg_line_length: double num_lines: int64 source: string original_comment: string -- schema metadata -- huggingface: '{"info": {"features": {"code": {"dtype": "string", "_type":' + 515 to {'code': Value(dtype='string', id=None), 'language': Value(dtype='string', id=None), 'original_docstring': Value(dtype='string', id=None), 'AST_depth': Value(dtype='int64', id=None), 'alphanumeric_fraction': Value(dtype='float64', id=None), 'max_line_length': Value(dtype='int64', id=None), 'avg_line_length': Value(dtype='float64', id=None), 'num_lines': Value(dtype='int64', id=None), 'source': Value(dtype='string', id=None)} because column names don't match Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1854, in _prepare_split_single for _, table in generator: File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 686, in wrapped for item in generator(*args, **kwargs): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/arrow/arrow.py", line 76, in _generate_tables yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/arrow/arrow.py", line 59, in _cast_table pa_table = table_cast(pa_table, self.info.features.arrow_schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2292, in table_cast return cast_table_to_schema(table, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2240, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast code: string language: string original_docstring: string AST_depth: int64 alphanumeric_fraction: double max_line_length: int64 avg_line_length: double num_lines: int64 source: string original_comment: string -- schema metadata -- huggingface: '{"info": {"features": {"code": {"dtype": "string", "_type":' + 515 to {'code': Value(dtype='string', id=None), 'language': Value(dtype='string', id=None), 'original_docstring': Value(dtype='string', id=None), 'AST_depth': Value(dtype='int64', id=None), 'alphanumeric_fraction': Value(dtype='float64', id=None), 'max_line_length': Value(dtype='int64', id=None), 'avg_line_length': Value(dtype='float64', id=None), 'num_lines': Value(dtype='int64', id=None), 'source': Value(dtype='string', id=None)} because column names don't match The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1412, in compute_config_parquet_and_info_response parquet_operations, partial, estimated_dataset_info = stream_convert_to_parquet( File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 988, in stream_convert_to_parquet builder._prepare_split( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
code
string | language
string | original_docstring
string | AST_depth
int64 | alphanumeric_fraction
float64 | max_line_length
int64 | avg_line_length
float64 | num_lines
int64 | source
string |
---|---|---|---|---|---|---|---|---|
class WeightedGraph:
"""
This class represents a weighted graph for the purposes
of determining clusters via the Markov Clustering Algorithm.
To initialize an object of this class, pass in a dictionary
which maps pairs (tuples) of vertices to the corresponding weight.
Stores internally both an adjacency list and an adjacency matrix
This is fine as the number of expected vertices is small.
"""
def __init__(self, pair_weights):
self.adjacency_list = self._construct_adjacency_list(pair_weights)
self.vertices = list(self.adjacency_list.keys())
self.num_vertices = len(self.vertices)
self.adjacency_matrix = self._construct_adjacency_matrix()
def get_clusters(self, granularity):
"""
This method uses the Markov Clustering Algorithm
to cluster vertices together.
Args:
granularity: The granularity with which to inflate columns
Return:
A dictionary which maps a vertex to the set of vertices it is in a cluster with
"""
# Hardcoded in the expansion parameter, this reflects original implementation
# May wish to change this to have some option
e = 2
matrix = transform_matrix(self.adjacency_matrix)
matrix = normalize_columns(matrix)
error_convergence = np.linalg.norm(matrix)
while error_convergence > 10E-6:
# Store previous matrix
previous_matrix = matrix
matrix = np.linalg.matrix_power(matrix, e)
matrix = inflate_columns(matrix, granularity)
error_convergence = np.linalg.norm(matrix - previous_matrix)
return self._get_clusters(matrix)
def _get_clusters(self, matrix):
"""
Helper function to retrieve the list of clusters from the matrix
"""
# clusters is a set to have only unique sets in the partition of the vertices
clusters = set()
for i, v1 in enumerate(self.vertices):
# Already assigned a cluster
if np.sum(matrix[i, :]) < 10E-6: # If sum of row is essentially zero
continue
else:
cluster = []
for j, v2 in enumerate(self.vertices):
if matrix[i, j] > 10E-6:
cluster.append(v2)
clusters.add(frozenset(cluster))
clusters = [list(cluster) for cluster in clusters]
return clusters
def _construct_adjacency_list(self, pair_weights):
"""
Constructs an adjacency list representation of the graph as
a dictionary which maps vertices to a list of tuples (v, w) where
v is the adjacent vertex and w is the weight of the edge.
Args:
pair_weights: A dictionary mapping pairs of vertices to weights
Returns:
An adjacency list
"""
adjacency_list = {}
for v1, v2 in pair_weights:
weight = pair_weights[(v1, v2)]
if v1 in adjacency_list:
adjacency_list[v1].append((v2, weight))
else:
adjacency_list[v1] = [(v2, weight)]
if v2 in adjacency_list:
adjacency_list[v2].append((v1, weight))
else:
adjacency_list[v2] = [(v1, weight)]
return adjacency_list
def _construct_adjacency_matrix(self):
"""
Constructs an adjacency matrix from the internally stored adjacency list
Assigns M_ij to be the weight from vertex i to vertex j.
Returns:
The numpy matrix storing the weights
"""
adjacency_matrix = np.identity(self.num_vertices)
for i, v1 in enumerate(self.vertices):
for j, v2 in enumerate(self.vertices):
v1_v2_weight = 0
for vertex, weight in self.adjacency_list[v1]:
if v2 == vertex:
v1_v2_weight = weight
break
adjacency_matrix[i][j] = v1_v2_weight
return adjacency_matrix | python |
This class represents a weighted graph for the purposes
of determining clusters via the Markov Clustering Algorithm.
To initialize an object of this class, pass in a dictionary
which maps pairs (tuples) of vertices to the corresponding weight.
Stores internally both an adjacency list and an adjacency matrix
This is fine as the number of expected vertices is small.
| 17 | 0.59216 | 91 | 37.392523 | 107 | class |
class Zone:
"""
Zone with defined boundaries
"""
def topLeft(self):
"""
:rtype: (int, int)
"""
raise NotImplementedError()
def bottomRight(self):
"""
:rtype: (int, int)
"""
raise NotImplementedError()
def center(self):
"""
:rtype: (int, int)
"""
raise NotImplementedError() | python |
Zone with defined boundaries
| 8 | 0.464646 | 35 | 17.045455 | 22 | class |
class GameObject:
"""
The base class for all other classes.
"""
MENU_EVENT = pg.USEREVENT + 1
SCENE_EVENT = pg.USEREVENT + 2
CUTSCENE_EVENT = pg.USEREVENT + 3
CATEGORIES_MENU = [
"screen",
"transition",
"complete",
"health",
"max_health"
]
CATEGORIES_SCENE = [
"screen",
"transition",
"complete",
"pause",
"unpause",
"no_mode",
"start_game",
"switch",
"door",
"death",
"revive"
]
CATEGORIES_CUTSCENE = [
"screen",
"transition"
]
def handleEvent(self, event):
"""
Handles the given event.
:param event: pygame.Event, allowing event-driven programming.
"""
raise NotImplementedError
def update(self):
"""
Updates the logic of the game object every game tick.
"""
raise NotImplementedError
def draw(self, camera=None):
"""
Renders the game object to the screen every game tick.
"""
raise NotImplementedError
def messageMenu(self, category, data=None):
"""
Creates an event that is posted for the menu engine.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
self._messageEngine(GameObject.CATEGORIES_MENU,
GameObject.MENU_EVENT,
self.__str__(),
category,
data)
def messageScene(self, category, data=None):
"""
Creates an event that is posted for the scene engine.
:param sender: String, the sender of the message.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
self._messageEngine(GameObject.CATEGORIES_SCENE,
GameObject.SCENE_EVENT,
self.__str__(),
category,
data)
def messageCutScene(self, category, data=None):
"""
Creates an event that is posted for the cutscene engine.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
self._messageEngine(GameObject.CATEGORIES_CUTSCENE,
GameObject.CUTSCENE_EVENT,
self.__str__(),
category,
data)
def _messageEngine(self, CATEGORIES, EVENT, sender, category, data=None):
"""
Creates an event that is posted to an engine.
:param CATEGORIES: List, containing strings of valid categories.
:param EVENT: pygame.event, the event that the engine handles.
:param sender: String, the sender of the message.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
if category not in CATEGORIES:
raise KeyError("'{}' is an invalid category! The categories allowed "
"are {}!".format(category, CATEGORIES))
contents = \
{
"sender": sender,
"category": category,
"data": data
}
message = pg.event.Event(EVENT, contents)
pg.event.post(message) | python |
The base class for all other classes.
| 14 | 0.53008 | 81 | 29.837607 | 117 | class |
class Stream:
"""Represents a single HTTP/2 Stream.
Stream is a bidirectional flow of bytes within an established connection,
which may carry one or more messages. Handles the transfer of HTTP Headers
and Data frames.
Role of this class is to
1. Combine all the data frames
"""
def __init__(
self,
stream_id: int,
request: Request,
protocol: "H2ClientProtocol",
download_maxsize: int = 0,
download_warnsize: int = 0,
) -> None:
"""
Arguments:
stream_id -- Unique identifier for the stream within a single HTTP/2 connection
request -- The HTTP request associated to the stream
protocol -- Parent H2ClientProtocol instance
"""
self.stream_id: int = stream_id
self._request: Request = request
self._protocol: "H2ClientProtocol" = protocol
self._download_maxsize = self._request.meta.get('download_maxsize', download_maxsize)
self._download_warnsize = self._request.meta.get('download_warnsize', download_warnsize)
# Metadata of an HTTP/2 connection stream
# initialized when stream is instantiated
self.metadata: Dict = {
'request_content_length': 0 if self._request.body is None else len(self._request.body),
# Flag to keep track whether the stream has initiated the request
'request_sent': False,
# Flag to track whether we have logged about exceeding download warnsize
'reached_warnsize': False,
# Each time we send a data frame, we will decrease value by the amount send.
'remaining_content_length': 0 if self._request.body is None else len(self._request.body),
# Flag to keep track whether client (self) have closed this stream
'stream_closed_local': False,
# Flag to keep track whether the server has closed the stream
'stream_closed_server': False,
}
# Private variable used to build the response
# this response is then converted to appropriate Response class
# passed to the response deferred callback
self._response: Dict = {
# Data received frame by frame from the server is appended
# and passed to the response Deferred when completely received.
'body': BytesIO(),
# The amount of data received that counts against the
# flow control window
'flow_controlled_size': 0,
# Headers received after sending the request
'headers': Headers({}),
}
def _cancel(_) -> None:
# Close this stream as gracefully as possible
# If the associated request is initiated we reset this stream
# else we directly call close() method
if self.metadata['request_sent']:
self.reset_stream(StreamCloseReason.CANCELLED)
else:
self.close(StreamCloseReason.CANCELLED)
self._deferred_response = Deferred(_cancel)
def __repr__(self):
return f'Stream(id={self.stream_id!r})'
@property
def _log_warnsize(self) -> bool:
"""Checks if we have received data which exceeds the download warnsize
and whether we have not already logged about it.
Returns:
True if both the above conditions hold true
False if any of the conditions is false
"""
content_length_header = int(self._response['headers'].get(b'Content-Length', -1))
return (
self._download_warnsize
and (
self._response['flow_controlled_size'] > self._download_warnsize
or content_length_header > self._download_warnsize
)
and not self.metadata['reached_warnsize']
)
def get_response(self) -> Deferred:
"""Simply return a Deferred which fires when response
from the asynchronous request is available
"""
return self._deferred_response
def check_request_url(self) -> bool:
# Make sure that we are sending the request to the correct URL
url = urlparse(self._request.url)
return (
url.netloc == str(self._protocol.metadata['uri'].host, 'utf-8')
or url.netloc == str(self._protocol.metadata['uri'].netloc, 'utf-8')
or url.netloc == f'{self._protocol.metadata["ip_address"]}:{self._protocol.metadata["uri"].port}'
)
def _get_request_headers(self) -> List[Tuple[str, str]]:
url = urlparse(self._request.url)
path = url.path
if url.query:
path += '?' + url.query
# This pseudo-header field MUST NOT be empty for "http" or "https"
# URIs; "http" or "https" URIs that do not contain a path component
# MUST include a value of '/'. The exception to this rule is an
# OPTIONS request for an "http" or "https" URI that does not include
# a path component; these MUST include a ":path" pseudo-header field
# with a value of '*' (refer RFC 7540 - Section 8.1.2.3)
if not path:
path = '*' if self._request.method == 'OPTIONS' else '/'
# Make sure pseudo-headers comes before all the other headers
headers = [
(':method', self._request.method),
(':authority', url.netloc),
]
# The ":scheme" and ":path" pseudo-header fields MUST
# be omitted for CONNECT method (refer RFC 7540 - Section 8.3)
if self._request.method != 'CONNECT':
headers += [
(':scheme', self._protocol.metadata['uri'].scheme),
(':path', path),
]
content_length = str(len(self._request.body))
headers.append(('Content-Length', content_length))
content_length_name = self._request.headers.normkey(b'Content-Length')
for name, values in self._request.headers.items():
for value in values:
value = str(value, 'utf-8')
if name == content_length_name:
if value != content_length:
logger.warning(
'Ignoring bad Content-Length header %r of request %r, '
'sending %r instead',
value,
self._request,
content_length,
)
continue
headers.append((str(name, 'utf-8'), value))
return headers
def initiate_request(self) -> None:
if self.check_request_url():
headers = self._get_request_headers()
self._protocol.conn.send_headers(self.stream_id, headers, end_stream=False)
self.metadata['request_sent'] = True
self.send_data()
else:
# Close this stream calling the response errback
# Note that we have not sent any headers
self.close(StreamCloseReason.INVALID_HOSTNAME)
def send_data(self) -> None:
"""Called immediately after the headers are sent. Here we send all the
data as part of the request.
If the content length is 0 initially then we end the stream immediately and
wait for response data.
Warning: Only call this method when stream not closed from client side
and has initiated request already by sending HEADER frame. If not then
stream will raise ProtocolError (raise by h2 state machine).
"""
if self.metadata['stream_closed_local']:
raise StreamClosedError(self.stream_id)
# Firstly, check what the flow control window is for current stream.
window_size = self._protocol.conn.local_flow_control_window(stream_id=self.stream_id)
# Next, check what the maximum frame size is.
max_frame_size = self._protocol.conn.max_outbound_frame_size
# We will send no more than the window size or the remaining file size
# of data in this call, whichever is smaller.
bytes_to_send_size = min(window_size, self.metadata['remaining_content_length'])
# We now need to send a number of data frames.
while bytes_to_send_size > 0:
chunk_size = min(bytes_to_send_size, max_frame_size)
data_chunk_start_id = self.metadata['request_content_length'] - self.metadata['remaining_content_length']
data_chunk = self._request.body[data_chunk_start_id:data_chunk_start_id + chunk_size]
self._protocol.conn.send_data(self.stream_id, data_chunk, end_stream=False)
bytes_to_send_size = bytes_to_send_size - chunk_size
self.metadata['remaining_content_length'] = self.metadata['remaining_content_length'] - chunk_size
self.metadata['remaining_content_length'] = max(0, self.metadata['remaining_content_length'])
# End the stream if no more data needs to be send
if self.metadata['remaining_content_length'] == 0:
self._protocol.conn.end_stream(self.stream_id)
# Q. What about the rest of the data?
# Ans: Remaining Data frames will be sent when we get a WindowUpdate frame
def receive_window_update(self) -> None:
"""Flow control window size was changed.
Send data that earlier could not be sent as we were
blocked behind the flow control.
"""
if (
self.metadata['remaining_content_length']
and not self.metadata['stream_closed_server']
and self.metadata['request_sent']
):
self.send_data()
def receive_data(self, data: bytes, flow_controlled_length: int) -> None:
self._response['body'].write(data)
self._response['flow_controlled_size'] += flow_controlled_length
# We check maxsize here in case the Content-Length header was not received
if self._download_maxsize and self._response['flow_controlled_size'] > self._download_maxsize:
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata['reached_warnsize'] = True
warning_msg = (
f'Received more ({self._response["flow_controlled_size"]}) bytes than download '
f'warn size ({self._download_warnsize}) in request {self._request}'
)
logger.warning(warning_msg)
# Acknowledge the data received
self._protocol.conn.acknowledge_received_data(
self._response['flow_controlled_size'],
self.stream_id
)
def receive_headers(self, headers: List[HeaderTuple]) -> None:
for name, value in headers:
self._response['headers'][name] = value
# Check if we exceed the allowed max data size which can be received
expected_size = int(self._response['headers'].get(b'Content-Length', -1))
if self._download_maxsize and expected_size > self._download_maxsize:
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata['reached_warnsize'] = True
warning_msg = (
f'Expected response size ({expected_size}) larger than '
f'download warn size ({self._download_warnsize}) in request {self._request}'
)
logger.warning(warning_msg)
def reset_stream(self, reason: StreamCloseReason = StreamCloseReason.RESET) -> None:
"""Close this stream by sending a RST_FRAME to the remote peer"""
if self.metadata['stream_closed_local']:
raise StreamClosedError(self.stream_id)
# Clear buffer earlier to avoid keeping data in memory for a long time
self._response['body'].truncate(0)
self.metadata['stream_closed_local'] = True
self._protocol.conn.reset_stream(self.stream_id, ErrorCodes.REFUSED_STREAM)
self.close(reason)
def close(
self,
reason: StreamCloseReason,
errors: Optional[List[BaseException]] = None,
from_protocol: bool = False,
) -> None:
"""Based on the reason sent we will handle each case.
"""
if self.metadata['stream_closed_server']:
raise StreamClosedError(self.stream_id)
if not isinstance(reason, StreamCloseReason):
raise TypeError(f'Expected StreamCloseReason, received {reason.__class__.__qualname__}')
# Have default value of errors as an empty list as
# some cases can add a list of exceptions
errors = errors or []
if not from_protocol:
self._protocol.pop_stream(self.stream_id)
self.metadata['stream_closed_server'] = True
# We do not check for Content-Length or Transfer-Encoding in response headers
# and add `partial` flag as in HTTP/1.1 as 'A request or response that includes
# a payload body can include a content-length header field' (RFC 7540 - Section 8.1.2.6)
# NOTE: Order of handling the events is important here
# As we immediately cancel the request when maxsize is exceeded while
# receiving DATA_FRAME's when we have received the headers (not
# having Content-Length)
if reason is StreamCloseReason.MAXSIZE_EXCEEDED:
expected_size = int(self._response['headers'].get(
b'Content-Length',
self._response['flow_controlled_size'])
)
error_msg = (
f'Cancelling download of {self._request.url}: received response '
f'size ({expected_size}) larger than download max size ({self._download_maxsize})'
)
logger.error(error_msg)
self._deferred_response.errback(CancelledError(error_msg))
elif reason is StreamCloseReason.ENDED:
self._fire_response_deferred()
# Stream was abruptly ended here
elif reason is StreamCloseReason.CANCELLED:
# Client has cancelled the request. Remove all the data
# received and fire the response deferred with no flags set
# NOTE: The data is already flushed in Stream.reset_stream() called
# immediately when the stream needs to be cancelled
# There maybe no :status in headers, we make
# HTTP Status Code: 499 - Client Closed Request
self._response['headers'][':status'] = '499'
self._fire_response_deferred()
elif reason is StreamCloseReason.RESET:
self._deferred_response.errback(ResponseFailed([
Failure(
f'Remote peer {self._protocol.metadata["ip_address"]} sent RST_STREAM',
ProtocolError
)
]))
elif reason is StreamCloseReason.CONNECTION_LOST:
self._deferred_response.errback(ResponseFailed(errors))
elif reason is StreamCloseReason.INACTIVE:
errors.insert(0, InactiveStreamClosed(self._request))
self._deferred_response.errback(ResponseFailed(errors))
else:
assert reason is StreamCloseReason.INVALID_HOSTNAME
self._deferred_response.errback(InvalidHostname(
self._request,
str(self._protocol.metadata['uri'].host, 'utf-8'),
f'{self._protocol.metadata["ip_address"]}:{self._protocol.metadata["uri"].port}'
))
def _fire_response_deferred(self) -> None:
"""Builds response from the self._response dict
and fires the response deferred callback with the
generated response instance"""
body = self._response['body'].getvalue()
response_cls = responsetypes.from_args(
headers=self._response['headers'],
url=self._request.url,
body=body,
)
response = response_cls(
url=self._request.url,
status=int(self._response['headers'][':status']),
headers=self._response['headers'],
body=body,
request=self._request,
certificate=self._protocol.metadata['certificate'],
ip_address=self._protocol.metadata['ip_address'],
protocol='h2',
)
self._deferred_response.callback(response) | python | Represents a single HTTP/2 Stream.
Stream is a bidirectional flow of bytes within an established connection,
which may carry one or more messages. Handles the transfer of HTTP Headers
and Data frames.
Role of this class is to
1. Combine all the data frames
| 21 | 0.602322 | 117 | 40.643038 | 395 | class |
class Meta:
"""
Meta class. Getting fields.
"""
model = Chat
fields = ('base_image', ) | python |
Meta class. Getting fields.
| 7 | 0.436508 | 35 | 20.166667 | 6 | class |
class Meta:
"""
Meta class. Getting fields.
"""
model = MessageImages
fields = ('image',) | python |
Meta class. Getting fields.
| 7 | 0.465116 | 35 | 20.666667 | 6 | class |
class ScopeFilterValidator:
"""
The scope filter validator validates whether prefixes, ASNs or RPSL
objects fall within the configured scope filter.
"""
def __init__(self):
self.load_filters()
def load_filters(self):
"""
(Re)load the local cache of the configured filters.
Also called by __init__
"""
prefixes = get_setting('scopefilter.prefixes', [])
self.filtered_prefixes = [IP(prefix) for prefix in prefixes]
self.filtered_asns = set()
self.filtered_asn_ranges = set()
asn_filters = get_setting('scopefilter.asns', [])
for asn_filter in asn_filters:
if '-' in str(asn_filter):
start, end = asn_filter.split('-')
self.filtered_asn_ranges.add((int(start), int(end)))
else:
self.filtered_asns.add(int(asn_filter))
def validate(self, source: str, prefix: Optional[IP]=None, asn: Optional[int]=None) -> ScopeFilterStatus:
"""
Validate a prefix and/or ASN, for a particular source.
Returns a tuple of a ScopeFilterStatus and an explanation string.
"""
if not prefix and asn is None:
raise ValueError('Scope Filter validator must be provided asn or prefix')
if get_setting(f'sources.{source}.scopefilter_excluded'):
return ScopeFilterStatus.in_scope
if prefix:
for filtered_prefix in self.filtered_prefixes:
if prefix.version() == filtered_prefix.version() and filtered_prefix.overlaps(prefix):
return ScopeFilterStatus.out_scope_prefix
if asn is not None:
if asn in self.filtered_asns:
return ScopeFilterStatus.out_scope_as
for range_start, range_end in self.filtered_asn_ranges:
if range_start <= asn <= range_end:
return ScopeFilterStatus.out_scope_as
return ScopeFilterStatus.in_scope
def _validate_rpsl_data(self, source: str, object_class: str, prefix: Optional[IP],
asn_first: Optional[int]) -> Tuple[ScopeFilterStatus, str]:
"""
Validate whether a particular set of RPSL data is in scope.
Depending on object_class, members and mp_members are also validated.
Returns a ScopeFilterStatus.
"""
out_of_scope = [ScopeFilterStatus.out_scope_prefix, ScopeFilterStatus.out_scope_as]
if object_class not in ['route', 'route6']:
return ScopeFilterStatus.in_scope, ''
if prefix:
prefix_state = self.validate(source, prefix)
if prefix_state in out_of_scope:
return prefix_state, f'prefix {prefix} is out of scope'
if asn_first is not None:
asn_state = self.validate(source, asn=asn_first)
if asn_state in out_of_scope:
return asn_state, f'ASN {asn_first} is out of scope'
return ScopeFilterStatus.in_scope, ''
def validate_rpsl_object(self, rpsl_object: RPSLObject) -> Tuple[ScopeFilterStatus, str]:
"""
Validate whether an RPSLObject is in scope.
Returns a tuple of a ScopeFilterStatus and an explanation string.
"""
return self._validate_rpsl_data(
rpsl_object.source(),
rpsl_object.rpsl_object_class,
rpsl_object.prefix,
rpsl_object.asn_first,
)
def validate_all_rpsl_objects(self, database_handler: DatabaseHandler) -> \
Tuple[List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]]:
"""
Apply the scope filter to all relevant objects.
Retrieves all routes from the DB, and aggregates the validation results.
Returns a tuple of three sets:
- one with routes that should be set to status in_scope, but are not now
- one with routes that should be set to status out_scope_as, but are not now
- one with routes that should be set to status out_scope_prefix, but are not now
Each object is recorded as a dict, which has the fields shown
in "columns" below.
Objects where their current status in the DB matches the new
validation result, are not included in the return value.
"""
columns = ['rpsl_pk', 'ip_first', 'prefix_length', 'asn_first', 'source', 'object_class',
'object_text', 'scopefilter_status']
objs_changed: Dict[ScopeFilterStatus, List[Dict[str, str]]] = defaultdict(list)
q = RPSLDatabaseQuery(column_names=columns, enable_ordering=False)
q = q.object_classes(['route', 'route6'])
results = database_handler.execute_query(q)
for result in results:
current_status = result['scopefilter_status']
result['old_status'] = current_status
prefix = None
if result['ip_first']:
prefix = IP(result['ip_first'] + '/' + str(result['prefix_length']))
new_status, _ = self._validate_rpsl_data(
result['source'],
result['object_class'],
prefix,
result['asn_first'],
)
if new_status != current_status:
result['scopefilter_status'] = new_status
objs_changed[new_status].append(result)
return (objs_changed[ScopeFilterStatus.in_scope],
objs_changed[ScopeFilterStatus.out_scope_as],
objs_changed[ScopeFilterStatus.out_scope_prefix]) | python |
The scope filter validator validates whether prefixes, ASNs or RPSL
objects fall within the configured scope filter.
| 18 | 0.60043 | 109 | 42.232558 | 129 | class |
class Client:
""" Represents a client entry """
def __init__(self, cid, cname, public_key, last_seen):
self.ID = bytes.fromhex(cid) # Unique client ID, 16 bytes.
self.Name = cname # Client's name, null terminated ascii string, 255 bytes.
self.PublicKey = public_key # Client's public key, 160 bytes.
self.LastSeen = last_seen # The Date & time of client's last request.
def validate(self):
""" Validate Client attributes according to the requirements """
if not self.ID or len(self.ID) != protocol.CLIENT_ID_SIZE:
return False
if not self.Name or len(self.Name) >= protocol.NAME_SIZE:
return False
if not self.PublicKey or len(self.PublicKey) != protocol.PUBLIC_KEY_SIZE:
return False
if not self.LastSeen:
return False
return True | python | Represents a client entry | 11 | 0.618016 | 84 | 42.9 | 20 | class |
class Message:
""" Represents a message entry """
def __init__(self, to_client, from_client, mtype, content):
self.ID = 0 # Message ID, 4 bytes.
self.ToClient = to_client # Receiver's unique ID, 16 bytes.
self.FromClient = from_client # Sender's unique ID, 16 bytes.
self.Type = mtype # Message type, 1 byte.
self.Content = content # Message's content, Blob.
def validate(self):
""" Validate Message attributes according to the requirements """
if not self.ToClient or len(self.ToClient) != protocol.CLIENT_ID_SIZE:
return False
if not self.FromClient or len(self.FromClient) != protocol.CLIENT_ID_SIZE:
return False
if not self.Type or self.Type > protocol.MSG_TYPE_MAX:
return False
return True | python | Represents a message entry | 11 | 0.620939 | 82 | 42.789474 | 19 | class |
class HER:
"""HER (final strategy).
Attributes:
desired_states (np.ndarray): desired states
reward_func (Callable): returns reward from state, action, next_state
"""
def __init__(self, demo_path: str, reward_func: Callable = default_reward_func):
"""Initialization.
Args:
demo_path (str): path of demonstration including desired states
reward_func (Callable): returns reward from state, action, next_state
"""
self.desired_states, self.demo_goal_indices = fetch_desired_states_from_demo(
demo_path
)
self.reward_func = reward_func
def sample_desired_state(self) -> np.ndarray:
"""Sample one of the desired states."""
return np.random.choice(self.desired_states, 1)[0]
def generate_demo_transitions(self, demo: list) -> list:
"""Return generated demo transitions for HER."""
new_demo: list = list()
# generate demo transitions
prev_idx = 0
for idx in self.demo_goal_indices:
demo_final_state = demo[idx][0]
transitions = [demo[i] for i in range(prev_idx, idx + 1)]
prev_idx = idx + 1
transitions = self.generate_transitions(
transitions, demo_final_state, demo=True
)
new_demo.extend(transitions)
return new_demo
def generate_transitions(
self, transitions: list, desired_state: np.ndarray, demo: bool = False
) -> list:
"""Generate new transitions concatenated with desired states."""
new_transitions = list()
final_state = transitions[-1][0]
for transition in transitions:
# process transitions with the initial goal state
new_transitions.append(self.__get_transition(transition, desired_state))
if not demo:
new_transitions.append(self.__get_transition(transition, final_state))
return new_transitions
def __get_transition(self, transition: tuple, goal_state: np.ndarray):
"""Get a single transition concatenated with a goal state."""
state, action, _, next_state, done = transition
done = np.array_equal(state, goal_state)
reward = self.reward_func(state, action, goal_state)
state = np.concatenate((state, goal_state), axis=-1)
next_state = np.concatenate((next_state, goal_state), axis=-1)
return (state, action, reward, next_state, done) | python | HER (final strategy).
Attributes:
desired_states (np.ndarray): desired states
reward_func (Callable): returns reward from state, action, next_state
| 14 | 0.611266 | 86 | 35.289855 | 69 | class |
class HostRemoval:
'''
PCA-decompose a saturated host star PSF and remove it
'''
def __init__(self,
n_PCA,
outdir,
abs_PCA_name,
config_data = config):
'''
INPUTS:
n_PCA: number of principal components to use
outdir: directory to deposit the host-subtracted images in (this has to be
defined at the function call because the images may or may not
contain fake planet PSFs, and I want to keep them separate)
abs_PCA_name: absolute file name of the PCA cube to reconstruct the host star
for making a fake planet (i.e., without saturation effects)
config_data: configuration data, as usual
'''
self.n_PCA = n_PCA
self.outdir = outdir
self.abs_PCA_name = abs_PCA_name
self.config_data = config_data
# read in the PCA vector cube for this series of frames
# (note the PCA needs to correspond to saturated PSFs, since I am subtracting
# saturated PSFs away)
self.pca_basis_cube_sat, self.header_pca_basis_cube_sat = fits.getdata(self.abs_PCA_name, 0, header=True)
##########
def __call__(self,
abs_sci_name):
'''
Reconstruct and inject, for a single frame so as to parallelize the job
INPUTS:
abs_sci_name: the absolute path of the science frame into which we want to inject a planet
'''
print(abs_sci_name)
# read in the cutout science frame
# (there should be no masking of this frame downstream)
sci, header_sci = fits.getdata(abs_sci_name, 0, header=True)
# define the mask of this science frame
## ## fine-tune this step later!
mask_weird = np.ones(np.shape(sci))
no_mask = np.copy(mask_weird) # a non-mask for reconstructing saturated PSFs
#mask_weird[sci > 1e8] = np.nan # mask saturating region
## TEST: WRITE OUT
#hdu = fits.PrimaryHDU(mask_weird)
#hdulist = fits.HDUList([hdu])
#hdu.writeto("junk_mask.fits", clobber=True)
## END TEST
###########################################
# PCA-decompose the host star PSF
# (note no de-rotation of the image here)
# do the PCA fit of masked host star
# returns dict: 'pca_vector': the PCA best-fit vector; and 'recon_2d': the 2D reconstructed PSF
# N.b. PCA reconstruction will be to get an UN-sat PSF; note PCA basis cube involves unsat PSFs
fit_unsat = fit_pca_star(self.pca_basis_cube_sat, sci, no_mask, n_PCA=100)
# subtract the PCA-reconstructed host star
image_host_removed = np.subtract(sci,fit_unsat["recon_2d"])
# pickle the PCA vector
pickle_stuff = {"pca_cube_file_name": self.abs_PCA_name,
"pca_vector": fit_unsat["pca_vector"],
"recons_2d_psf_unsat": fit_unsat["recon_2d"],
"sci_image_name": abs_sci_name}
print(pickle_stuff)
pca_fit_pickle_write_name = str(self.config_data["data_dirs"]["DIR_PICKLE"]) \
+ "pickle_pca_sat_psf_info_" + str(os.path.basename(abs_sci_name).split(".")[0]) + ".pkl"
print(pca_fit_pickle_write_name)
with open(pca_fit_pickle_write_name, "wb") as f:
pickle.dump(pickle_stuff, f)
# add info to the header indicating last reduction step, and PCA info
header_sci["RED_STEP"] = "host_removed"
# write FITS file out, with fake planet params in file name
## ## do I actually want to write out a separate FITS file for each fake planet?
abs_image_host_removed_name = str(self.outdir + os.path.basename(abs_sci_name))
fits.writeto(filename = abs_image_host_removed_name,
data = image_host_removed,
header = header_sci,
overwrite = True)
print("Writing out host_removed frame " + os.path.basename(abs_sci_name)) | python |
PCA-decompose a saturated host star PSF and remove it
| 17 | 0.573561 | 113 | 41.387755 | 98 | class |
class MeshLoader:
"""
Class to load the meshes for the objects in a scene.
"""
def __init__(self):
"""Module initializer"""
self.base_dir = CONSTANTS.MESH_BASE_DIR
self.text_dir = CONSTANTS.TEXT_BASE_DIR
self.reset()
def reset(self):
self.loaded_meshes = []
def get_meshes(self):
""" """
extract_singular = lambda x: x[0] if len(x) == 1 else x
return [extract_singular(item) for item in self.loaded_meshes]
def load_meshes(self, obj_info: List[object_info.ObjectInfo], **kwargs):
"""
Loads the meshes whose information is given in parameter 'obj_info.
Each call of this method APPENDS a list to the loaded_meshes attribute.
:param obj_info: The object information of the meshes to be loaded.
:param kwargs: additional mesh modifiers such as scale, specified with a leading 'mod_'
"""
paths = []
for obj in obj_info:
path = self.text_dir if obj.name.endswith("_floor") or obj.name.endswith("_wall") else self.base_dir
paths.append((path / obj.mesh_fp).resolve())
scales = [obj.scale for obj in obj_info]
class_ids = [obj.class_id for obj in obj_info]
mod_scales = kwargs.get("mod_scale", [1.0] * len(scales))
scales = [s * ms for (s, ms) in zip(scales, mod_scales)]
flags = [mesh_flags(obj) for obj in obj_info]
meshes = sl.Mesh.load_threaded(filenames=paths, flags=flags)
# Setup class IDs
for _, (mesh, scale, class_id) in enumerate(zip(meshes, scales, class_ids)):
pt = torch.eye(4)
pt[:3, :3] *= scale
mesh.pretransform = pt
mesh.class_index = class_id
info_mesh_tuples = list(zip(obj_info, meshes))
self.loaded_meshes.append(info_mesh_tuples) | python |
Class to load the meshes for the objects in a scene.
| 15 | 0.595161 | 112 | 39.456522 | 46 | class |
class ObjectLoader:
"""
Class to load the objects in a scene
"""
def __init__(self):
"""Module initializer"""
self.reset()
def reset(self):
self.instance_idx = 0
self.loaded_objects = dict()
@property
def static_objects(self):
return [obj for obj in self.loaded_objects.values() if obj.static]
@property
def dynamic_objects(self):
return [obj for obj in self.loaded_objects.values() if not obj.static]
def create_object(self, object_info: object_info.ObjectInfo, mesh: sl.Mesh, is_static: bool, **obj_mod):
"""
Proper object setup
:param mesh:
:param object_info:
:param is_static:
:param obj_mod: Optional object modifiers, specified with a leading 'mod_'.
IMPORTANT: scaling is done during mesh loading!!!
:return:
"""
ins_idx = self.instance_idx + 1
self.instance_idx += 1
obj = sl.Object(mesh)
mod_weight = obj_mod.get("mod_weight", obj_mod.get("mod_scale", 1.0) ** 3)
obj.mass = object_info.weight * mod_weight
obj.metallic = object_info.metallic
obj.roughness = object_info.roughness
obj.restitution = object_info.restitution
obj.static_friction = object_info.static_friction
obj.dynamic_friction = object_info.dynamic_friction
pose = obj_mod.get("mod_pose", torch.eye(4))
mod_R = obj_mod.get("mod_R", torch.eye(3))
pose[:3, :3] = torch.mm(mod_R, pose[:3, :3])
mod_t = obj_mod.get("mod_t", torch.tensor([obj_mod.get("mod_x", 0.0),
obj_mod.get("mod_y", 0.0),
obj_mod.get("mod_z", 0.0)]))
pose[:3, 3] += mod_t
obj.set_pose(pose)
obj.linear_velocity = obj_mod.get("mod_v_linear", torch.tensor([0.0, 0.0, 0.0]))
obj.angular_velocity = obj_mod.get("mod_v_angular", torch.tensor([0.0, 0.0, 0.0]))
obj.static = is_static
obj.instance_index = ins_idx
self.loaded_objects[ins_idx] = obj
return obj
def remove_object(self, instance_id, decrement_ins_idx=True):
obj = self.loaded_objects.pop(instance_id, None)
if decrement_ins_idx and obj is not None:
self.instance_idx -= 1
return obj | python |
Class to load the objects in a scene
| 15 | 0.564557 | 108 | 37.241935 | 62 | class |
class DecoratorLoader:
"""
Class to add random decorative objects to the scene, which do not participate of the scene dynamics.
It is based on creating an occupancy matrix of the scene, finding empty locations and placing stuff there
"""
def __init__(self, scene):
""" Object initializer """
self.config = SCENARIO_DEFAULTS["decorator"]
decorations = self.config["decorations"]
bounds = self.config["bounds"]
self.bounds = bounds
self.pi = torch.acos(torch.zeros(1))
self.scene = scene
self.mesh_loader = MeshLoader()
self.mesh_loader.load_meshes(decorations),
self.meshes = self.mesh_loader.get_meshes()[0]
self.x_vect = torch.arange(bounds["min_x"], bounds["max_x"] + bounds["res"], bounds["res"])
self.y_vect = torch.arange(bounds["min_y"], bounds["max_y"] + bounds["res"], bounds["res"])
return
def add_object(self, object_loader, object_id):
""" Loading an object and adding to the loader """
obj_info, obj_mesh = self.meshes[object_id]
pose = torch.eye(4)
obj_mod = {"mod_pose": pose}
obj = object_loader.create_object(obj_info, obj_mesh, True, **obj_mod)
self.scene.add_object(obj)
# shifting object to a free position and adjusting z-coord to be aligned with the table
position = self.occ_matrix.find_free_spot(obj=obj)
pose[:2, -1] = position if position is not None else torch.ones(2)
pose[2, -1] += obj.mesh.bbox.max[-1]
# Rotating object in yaw direction
yaw_angle = random.choice([torch.tensor([i * CONSTANTS.PI / 2]) for i in range(4)])
angles = torch.cat([yaw_angle, torch.zeros(2)])
rot_matrix = utils.get_rot_matrix(angles=angles)
pose[:3, :3] = pose[:3, :3] @ rot_matrix
obj.set_pose(pose)
self.occ_matrix.update_occupancy_matrix(obj)
self.occ_matrix.add_object_margings()
return
def decorate_scene(self, object_loader):
""" Randomly adding some decoderation to a scene """
# initializing occupancy matrix
self.occ_matrix = OccupancyMatrix(bounds=self.bounds, objects=self.scene.objects)
# iteratively placing objects while avoiding collision
N = torch.randint(low=self.config["min_objs"], high=self.config["max_objs"], size=(1,))
for i in range(N):
id = torch.randint(low=0, high=len(self.meshes), size=(1,))
self.add_object(object_loader, object_id=id)
return | python |
Class to add random decorative objects to the scene, which do not participate of the scene dynamics.
It is based on creating an occupancy matrix of the scene, finding empty locations and placing stuff there
| 16 | 0.622553 | 109 | 41.583333 | 60 | class |
class Source:
'''
Source class to define source objects
'''
def __init__(self,id,name,category):
self.id = id
self.name = name
self.category = category | python |
Source class to define source objects
| 8 | 0.5625 | 41 | 20.444444 | 9 | class |
class Article:
'''
Article class to define article objects
'''
def __init__(self, name, author, title, description, link, image, publishDate):
self.name = name
self.author = author
self.title = title
self.description = description
self.link = link
self.image = image
self.publishDate = publishDate | python |
Article class to define article objects
| 8 | 0.597855 | 84 | 25.714286 | 14 | class |
class Top:
'''
Top headlines class to define headlines objects
'''
def __init__(self, source, author, title, description, link, image):
self.source = source
self.author = author
self.title = title
self.description = description
self.link = link
self.image = image | python |
Top headlines class to define headlines objects
| 8 | 0.590909 | 73 | 24.461538 | 13 | class |
class ArgoWorflow:
"""The ArgoWorflow provide a way to start an argo WF based on an existing template.
"""
def __init__(self):
"""Initialize the ArgoWorflow
"""
logger.info("Reading configuration files")
logger.info(f"Argo config file > {ARGO_CONFIG}")
try:
with open(ARGO_CONFIG, 'r') as configfile:
argoconfig = yaml.load(configfile, Loader=yaml.SafeLoader)
# read mandatory parameters
self.server = argoconfig['argoserver']['server']
self.ns = argoconfig['argoserver']['namespace']
self.sa = argoconfig['argoserver']['serviceaccount']
self.template = argoconfig['argoserver']['template']
except OSError as err:
raise Exception(f'Could not read argo configuration: {err}')
except KeyError as err:
raise Exception(f'Missing mandatory configuration key: {err}')
except Exception as err:
raise Exception(f'Unknown error when reading settings: {err}')
# read non-mandatory parameters
self.proto = argoconfig['argoserver'].get('protocol', 'http')
self.param_name = argoconfig['argoserver'].get('event_param_name', 'event')
self.base64_encode = argoconfig['argoserver'].get('base64_encode', False)
self.raw_labels = argoconfig['argoserver'].get('labels', [])
# set a from:veba label
self.labels = ["from=veba"]
# add configured labels
for label in self.raw_labels:
self.labels.append(f"{label}={self.raw_labels[label]}")
def submit(self, event: dict):
"""Submit the workflow
Args:
event (dict): event data
"""
logger.debug("Preparing request data")
uri = f"{self.proto}://{self.server}/api/v1/workflows/{self.ns}/submit"
self.labels.append(f"event_id={event.get('id')}")
self.labels.append(f"event_subject={event.get('subject')}")
# base64 convertion
if self.base64_encode:
event_data = base64.b64encode(
json.dumps(event).encode('utf-8')
).decode()
else:
event_data = json.dumps(event)
# prepare the workflow data
data = {
"resourceKind": "WorkflowTemplate",
"resourceName": self.template,
"submitOptions": {
"serviceaccount": self.sa,
"parameters": [
f"{self.param_name}={event_data}"
],
"labels": ','.join(self.labels)
}
}
logger.debug(json.dumps(data, indent=4, sort_keys=True))
headers = { "Content-Type": "application/json" }
logger.info("Submiting workflow")
try:
r = requests.post(uri, json=data, headers=headers)
logger.debug(r.text)
r.raise_for_status()
except requests.exceptions.HTTPError:
return f"Invalid status code returned: {r.status_code}"
except Exception as err:
return f"Unable to make request to argo server {self.server}: {err}", 500
return "Argo workflow was successfully submited", 200 | python | The ArgoWorflow provide a way to start an argo WF based on an existing template.
| 17 | 0.56775 | 87 | 41.934211 | 76 | class |
class TimeseriesPlot:
"""
Object describes a 1D timeseries.
Attributes:
x (np.ndarray) - independent variable
y (np.ndarray) - dependent variable
ax (matplotlib.axes.AxesSubplot)
"""
def __init__(self, x, y, ax=None):
"""
Instantiate a 1D timeseries.
Args:
x (np.ndarray) - independent variable
y (np.ndarray) - dependent variable
ax (matplotlib.axes.AxesSubplot)
"""
self.x = x
self.y = y
# set axis
if ax is None:
ax = self.create_figure()
self.ax = ax
def create_figure(self):
""" Instantiate figure. """
fig, ax = plt.subplots(ncols=1, figsize=(3, 2))
ax.set_xlim(self.x.min(), self.x.max())
ax.set_ylim(0, 1.1*self.y.max())
ax.set_xlabel('Time (h)'),
ax.set_ylabel('Expression (a.u.)')
return ax
def scatter(self,
color='k',
alpha=1,
s=1,
rasterized=False,
**additional):
"""
Scatterplot markers for x and y data.
Args:
color (str) - marker color
alpha (float) - marker alpha
s (float) - marker size
rasterized (bool) - if True, rasterize markers
"""
marker_kw = dict(color=color, s=s, alpha=alpha, lw=0, rasterized=rasterized)
_ = self.ax.scatter(self.x, self.y, **marker_kw, **additional)
def average(self,
ma_type='savgol',
window_size=100,
resolution=1,
smooth=True,
color='k',
alpha=1,
lw=1,
linestyle=None,
**additional
):
"""
Plot moving average of x and y data.
Args:
ma_type (str) - type of average, 'savgol', 'sliding', or 'binned'
window_size (int) - size of sliding window or bin (num of cells)
resolution (int) - sampling resolution for confidence interval
smooth (bool) - if True, apply secondary savgol filter
color, alpha, lw, linestyle - formatting parameters
"""
ma_kw = dict(ma_type=ma_type, window_size=window_size, resolution=resolution, smooth=smooth)
line_kw = dict(line_color=color, line_alpha=alpha, line_width=lw, linestyle=linestyle)
if len(self.y) > window_size:
_ = plot_mean(self.x, self.y, ax=self.ax, **ma_kw, **line_kw, **additional)
def interval(self,
ma_type='sliding',
window_size=100,
resolution=25,
nbootstraps=1000,
confidence=95,
color='k',
alpha=0.5,
**additional):
"""
Plot confidence interval for moving average of x and y data.
Args:
ma_type (str) - type of moving average, 'sliding' or 'binned'
window_size (int) - size of sliding window or bin (num of cells)
resolution (int) - sampling resolution for confidence interval
nbootstraps (int) - number of bootstraps
confidence (float) - confidence interval, between 0 and 100
color, alpha - formatting parameters
"""
# define moving average keyword arguments
ma_kw = dict(ma_type=ma_type,
window_size=window_size,
resolution=resolution,
nbootstraps=nbootstraps,
confidence=confidence)
# define interval shading keyword arguments
shade_kw = dict(color=color, alpha=alpha)
# plot confidence interval
if len(self.y) > window_size:
plot_mean_interval(self.x,
self.y,
ax=self.ax,
**ma_kw,
**shade_kw)
def plot(self,
scatter=False,
average=True,
interval=False,
marker_kw={},
line_kw={},
interval_kw={},
ma_kw={}):
"""
Plot timeseries data.
Args:
scatter (bool) - if True, add datapoints
average (bool) - if True, add moving average
interval (bool) - if True, add moving average interval
marker_kw (dict) - keyword arguments for marker formatting
line_kw (dict) - keyword arguments for line formatting
interval_kw (dict) - keyword arguments for interval formatting
ma_kw (dict) - keyword arguments for moving average
"""
# add scattered data
if scatter:
self.scatter(**marker_kw)
# add moving average
if average:
self.average(**ma_kw, **line_kw)
# add confidence interval for moving average
if interval:
self.interval(**ma_kw, **interval_kw) | python |
Object describes a 1D timeseries.
Attributes:
x (np.ndarray) - independent variable
y (np.ndarray) - dependent variable
ax (matplotlib.axes.AxesSubplot)
| 13 | 0.510139 | 100 | 25.760638 | 188 | class |
class Sparkfun_QwiicJoystick:
"""CircuitPython class for the Sparkfun QwiicJoystick
Usage:
# import the CircuitPython board and busio libraries
import board
import busio
# Create bus object using the board's I2C port
i2c = busio.I2C(board.SCL, board.SDA)
joystick = QwiicJoystick(i2c) # default address is 0x20
# use QwiicJoystick(i2c, address) for a different address
# joystick = QwiicJoystick(i2c, 0x21)"""
def __init__(self, i2c, address=QWIIC_JOYSTICK_ADDR, debug=False):
"""Initialize Qwiic Joystick for i2c communication."""
self._device = I2CDevice(i2c, address)
# save handle to i2c bus in case address is changed
self._i2c = i2c
self._debug = debug
# public properites
@property
def connected(self):
"""True if the Joystick is connected and a valid id is successful read."""
try:
# Attempt to read the id and see if we get an error
self._read_register(_JOYSTICK_ID)
except ValueError:
return False
return True
@property
def version(self):
"""Firmware version string for joystick."""
major = self._read_register(_JOYSTICK_VERSION1)
minor = self._read_register(_JOYSTICK_VERSION2)
return "v" + str(major) + "." + str(minor)
@property
def horizontal(self):
"""X value from 0 - 1023 of the joystick postion."""
# Read MSB for horizontal joystick position
x_msb = self._read_register(_JOYSTICK_X_MSB)
# Read LSB for horizontal joystick position
x_lsb = self._read_register(_JOYSTICK_X_LSB)
# mask off bytes and combine into 10-bit integer
x = ((x_msb & 0xFF) << 8 | (x_lsb & 0xFF)) >> 6
return x
@property
def vertical(self):
"""Y value from 0 to 1023 of the joystick postion."""
# Read MSB for veritical joystick position
y_msb = self._read_register(_JOYSTICK_Y_MSB)
# Read LSB for vertical joystick position
y_lsb = self._read_register(_JOYSTICK_Y_LSB)
# mask off bytes and combine into 10-bit integer
y = ((y_msb & 0xFF) << 8 | (y_lsb & 0xFF)) >> 6
return y
@property
def button(self):
"""0 if button is down, 1 if button is up."""
button = self._read_register(_JOYSTICK_BUTTON)
return button
# Issue: register 0x08 always contains 1 for some reason, even when cleared
@property
def button_status(self):
"""1 if button pressed between reads, cleared after read."""
# read button status (since last check)
status = self._read_register(_JOYSTICK_STATUS)
# clear button status
self._write_register(_JOYSTICK_STATUS, 0x00)
return status & 0xFF
# public functions
def set_i2c_address(self, new_address):
"""Change the i2c address of Joystick snd return True if successful."""
# check range of new address
if new_address < 8 or new_address > 119:
print("ERROR: Address outside 8-119 range")
return False
# write magic number 0x13 to lock register, to unlock address for update
self._write_register(_JOYSTICK_I2C_LOCK, 0x13)
# write new address
self._write_register(_JOYSTICK_CHANGE_ADDRESS, new_address)
# wait a second for joystick to settle after change
sleep(1)
# try to re-create new i2c device at new address
try:
self._device = I2CDevice(self._i2c, new_address)
except ValueError as err:
print("Address Change Failure")
print(err)
return False
# if we made it here, everything went fine
return True
# No i2c begin function is needed since I2Cdevice class takes care of that
# private functions
def _read_register(self, addr):
# Read and return a byte from the specified 8-bit register address.
with self._device as device:
device.write(bytes([addr & 0xFF]))
result = bytearray(1)
device.readinto(result)
# For some reason, write_then_readinto returns invalid data
# device.write_then_readinto(bytes([addr & 0xFF]), result)
if self._debug:
print("$%02X => %s" % (addr, [hex(i) for i in result]))
return result[0]
def _write_register(self, addr, value):
# Write a byte to the specified 8-bit register address
with self._device as device:
device.write(bytes([addr & 0xFF, value & 0xFF]))
if self._debug:
print("$%02X <= 0x%02X" % (addr, value)) | python | CircuitPython class for the Sparkfun QwiicJoystick
Usage:
# import the CircuitPython board and busio libraries
import board
import busio
# Create bus object using the board's I2C port
i2c = busio.I2C(board.SCL, board.SDA)
joystick = QwiicJoystick(i2c) # default address is 0x20
# use QwiicJoystick(i2c, address) for a different address
# joystick = QwiicJoystick(i2c, 0x21) | 17 | 0.607455 | 82 | 34.308271 | 133 | class |
class Item:
"""
Class representing store hub files.
"""
def __init__(self, id: str, base_url: str):
self.id = id
self.base_url = base_url
@cached_property
def public_url(self):
""" Get public url from item in workspace.
"""
url = f'{self.base_url}/workspace/items/{self.id}/publiclink?gcube-token={self.token}'
x = requests.get(url)
# for some reason, the response returns an url with surrounding quote marks
return x.text[1:-1]
@property
def token(self):
return context.token | python |
Class representing store hub files.
| 11 | 0.587329 | 94 | 25.590909 | 22 | class |
class VMFCache:
""" An expandable-size cache for VMFs. This lets us skip the load process
for VMFs that we've already loaded before, which is helpful for VMFs that
take a long time to parse.
"""
def __init__(self):
self.maxSize = 1
self.data = {}
self.unusedPaths = set()
self.pendingUnusedPaths = set()
self._mutex = RLock()
def increase_max_size(self, maxSize):
''' Increases the max size of the cache to the given number.
If the requested max size is less than the current size, this does
nothing.
'''
with self._mutex:
if maxSize > self.maxSize:
self.set_max_size(maxSize)
def set_max_size(self, maxSize):
with self._mutex:
if maxSize < self.get_vmf_count():
raise ValueError("Can't clear enough unused entries!")
self.evict_unused()
self.maxSize = maxSize
assert len(self.data) <= self.maxSize
def add_vmf(self, vmf):
vmfPath = vmf.path
with self._mutex:
assert len(self.data) <= self.maxSize
if vmfPath in self.pendingUnusedPaths:
# This VMF has been preemptively marked as unused.
# Don't bother caching it.
self.pendingUnusedPaths.remove(vmfPath)
return
if len(self.data) >= self.maxSize:
if len(self.unusedPaths) > 0:
self.evict_unused(limit=1)
else:
raise ValueError("VMF cache limit reached!")
self.data[vmfPath] = vmf
assert len(self.data) <= self.maxSize
def mark_used(self, *vmfPaths):
with self._mutex:
for vmfPath in vmfPaths:
if vmfPath in self.unusedPaths:
self.unusedPaths.remove(vmfPath)
def mark_unused(self, *vmfPaths):
with self._mutex:
for vmfPath in vmfPaths:
if vmfPath in self.data:
self.unusedPaths.add(vmfPath)
else:
self.pendingUnusedPaths.add(vmfPath)
def evict_unused(self, limit=float('inf')):
with self._mutex:
for i, unusedPath in enumerate(set(self.unusedPaths)):
if i >= limit:
break
del self.data[unusedPath]
self.unusedPaths.remove(unusedPath)
print("Evicted", unusedPath)
assert len(self.data) <= self.maxSize
def has_vmf_path(self, path):
with self._mutex:
return path in self.data
def get_vmfs(self):
with self._mutex:
return [
vmf for vmf in self.data.values()
if vmf.path not in self.unusedPaths
]
def get_vmf_count(self):
with self._mutex:
return len(self.data) - len(self.unusedPaths) | python | An expandable-size cache for VMFs. This lets us skip the load process
for VMFs that we've already loaded before, which is helpful for VMFs that
take a long time to parse.
| 16 | 0.489004 | 77 | 32.080808 | 99 | class |
class StateHandler:
"""utilities commonly used when working with states"""
def getDateString(date):
"""returns iso-date-string of specified date"""
return str(f"{date.year}-{date.month}-{date.day}")
def getAppropriateState(title):
"""returns appropriate state depending of due_date and deadline"""
if ItemHandler.getProperty(title, "due_date") == StateHandler.getDateString(datetime.now()):
return "active"
elif ItemHandler.getProperty(title, "due_date") == None:
return "upcoming"
elif ItemHandler.getProperty(title, "deadline") == None:
return "upcoming"
elif ItemHandler.getProperty(title, "deadline") == StateHandler.getDateString(datetime.now()):
return "urgent" | python | utilities commonly used when working with states | 12 | 0.651786 | 102 | 48.0625 | 16 | class |
class DBConfig:
"""
Holds the DB parameters for the web scraping.
"""
HOST = "localhost"
USER = "root"
PASSWORD = "password" # not real password, change after pulling this file
DATABASE = "brbeky1hybvf32t4ufxz"
INSERT_CITY_QUERY = "INSERT IGNORE INTO cities(city_name) values (%s)"
INSERT_LISTINGS_QUERY = "INSERT IGNORE INTO listings(listing_type) values (%s)"
INSERT_PROPERTY_TYPES_QUERY = "INSERT IGNORE INTO property_types(property_type) values (%s)"
FK_IDS_LIST = ['listing_id', 'property_type_id', 'city_id']
PRICE_COLUMN_IDX = 3
LATITUDE_COLUMN_IDX = -5
GET_LISTING_TYPE_ID_QUERY = "SELECT id FROM listings WHERE listing_type = %s"
GET_PROPERTY_TYPE_ID_QUERY = "SELECT id FROM property_types WHERE property_type = %s"
GET_CITY_ID_QUERY = "SELECT id FROM cities WHERE city_name = %s"
TUPLE_FIRST_ELEMENT_IDX = 0
LISTING_TYPE_IDX = 0
PROPERTY_TYPE_IDX = 1
CITY_IDX = 2
SEPARATOR = ","
TABLE_FEEDER_COLUMN_IDX = 3 | python |
Holds the DB parameters for the web scraping.
| 7 | 0.663699 | 96 | 33.896552 | 29 | class |
class Configuration:
"""
Holds the user parameters for the web scraping.
"""
# class attr
args = None
# PARAMETERS KWARGS KEYS
VERBOSE_KEY = 'verbose'
LIMIT_KEY = 'limit'
PRINT_KEY = 'to_print'
SAVE_KEY = 'save'
DB_KEY = 'to_database'
FETCH_KEY = 'fetch_info'
LISTING_TYPE_KEY = 'listing_type'
# CONSTANTS FOR SCRAPING
PRINTABLE = set(string.printable)
SILENCE_DRIVER_LOG = '0'
BROWSER_WIDTH = 1919
BROWSER_HEIGHT = 1079
PROPERTY_LISTING_TYPE = ('buy', 'rent', 'commercial', 'new_homes', 'all')
LISTING_MAP = {
'buy': ['buy'],
'rent': ['rent'],
'commercial': ['commercial'],
'new_homes': ['new homes'],
'all': ['buy', 'rent', 'commercial', 'new homes']
}
MAIN_URL = 'https://www.onmap.co.il/en'
URLS = {'buy': MAIN_URL + '/homes/buy',
'rent': MAIN_URL + '/homes/rent',
'commercial': MAIN_URL + '/commercial/rent',
'new homes': MAIN_URL + '/projects'}
COLUMNS_NOT_SELENIUM = ['Date', 'City_name', 'Street_name', 'House_number', 'Bathrooms', 'Rooms', 'Floor',
'Area[m^2]',
'Parking_spots_aboveground', 'Parking_spots_underground', 'Price[NIS]', 'Property_type']
SCROLL_PAUSE_TIME = 1
BETWEEN_URL_PAUSE = 3
SINGLE_ATR_ITEM = 1
TRIVIAL_NUMBER = 0
INVALID_FLOOR_TEXT_SIZE = 1
NOT_SELENIUM_PRINTING_HASH_CONSTANT = 20
NONE = 'none'
DICT_PROPERTY_ID = {'id': 'propertiesList'}
# INDICES FOR PARSING
NOT_SELENIUM_PARSING_FILE_IDX = 0
ELEM_TO_SCROLL_IDX = -1
PRICE_IDX = -1
CITY_IDX = -1
ADDRESS_IDX = -2
PROPERTY_TYPE_IDX = 1
NUM_OF_ROOMS_IDX = 0
FLOOR_IDX = 1
SIZE_IDX = 2
PARKING_SPACES_IDX = 3
FILENAME_IDX = -1
SIZE_TEXT_IDX = 0
NOT_SELENIUM_REGION_IDX = -1
URL_SPLIT_SEPARATOR = '/'
NOT_SELENIUM_SEPARATOR = '.'
SEPARATOR = ", "
PROPERTIES_LIST_IDX = 1
LEN_PROPER = 2
EMPTY = ""
DUMMY_REPLACER = 0
# XPATHS AND SELENIUM COMMANDS
SCROLL_COMMAND = "arguments[0].scrollIntoView();"
PROPERTIES_XPATH = "//div[@style='position: relative;']"
BOTTOM_PAGE_XPATH = "//div[@class='G3BoaHW05R4rguvqgn-Oo']"
# Handling strings
ENCODING = "ISO-8859-8"
COMMERCIAL_FILENAME = "commercial.csv"
NEW_HOMES_FILENAME = "new_homes.csv"
PROJECT = 'project'
COMMERCIAL = 'commercial'
# DF columns names
PRICE_COL = 'Price'
ROOM_COL = 'Rooms'
FLOOR_COL = 'Floor'
AREA_COL = 'Area'
CITY_COL = 'City'
PARKING_COL = 'Parking_spots'
PROP_TYPE_COL = 'Property_type'
LIST_TYPE_COL = 'listing_type'
@classmethod
def define_parser(cls):
"""
Creates the command line arguments
"""
arg_parser = argparse.ArgumentParser(
description="Scraping OnMap website | Checkout https://www.onmap.co.il/en/")
arg_parser.add_argument(
"property_listing_type",
choices=Configuration.PROPERTY_LISTING_TYPE,
help="choose which type of properties you would like to scrape",
type=str)
arg_parser.add_argument('--limit', '-l',
help="limit to n number of scrolls per page", metavar="n",
type=int,
required=False)
arg_parser.add_argument("--print", '-p', help="print the results to the screen", action="store_true")
arg_parser.add_argument("--save", '-s',
help="save the scraped information into a csv file in the same directory",
action="store_true")
arg_parser.add_argument("--database", '-d',
help="inserts new information found into the on_map database",
action="store_true")
arg_parser.add_argument("--fetch", '-f',
help="fetches more information for each property using Nominatim API",
action="store_true")
arg_parser.add_argument("--verbose", '-v', help="prints messages during the scraper execution",
action="store_true")
cls.args = arg_parser.parse_args() | python |
Holds the user parameters for the web scraping.
| 12 | 0.55535 | 116 | 34.991667 | 120 | class |
class Logger:
"""
This class handles logging for the entire web scraping process
"""
logger = None
scroll_finished = FINISHED_SCROLLING
scroll_finished_new_home = HOMES_FINISHED_SCROLLING
end_scroll_function = SCROLL_FINISHED
end_scroll_new_home = NEW_HOMES_FINISHED
fetch_more_init = MORE_ATTRIBUTES_STARTING
geofetcher_init = GEOFETCHER_INITIALIZED
end_fetch_more_att = FINISHED_SUCCESSFULLY_FETCH
main_cli = PARSER_WAS_SUCCESSFUL
main_no_url = NO_URLS_FOUND_TO_SCRAPE
main_scrape_obj = SCRAPER_OBJECT
main_closing_driver = CLOSING_DRIVER
main_quit_drive = QUITTING_DRIVER
error_connect_server = ERROR_CONNECTION
connection_successful = DB_SUCCESSFUL
commit_successful = COMMIT_TO_DB_SUCCESSFUL
@classmethod
def start_logging(cls):
cls.logger = logging.getLogger('on_map_scraper')
cls.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("'%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s'")
# create a file handler and add it to logger
file_handler = logging.FileHandler('web_scraper.log', mode='a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
cls.logger.addHandler(file_handler)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.CRITICAL)
stream_handler.setFormatter(formatter)
cls.logger.addHandler(stream_handler)
@staticmethod
def scroll_error(ele_to_scroll):
"""
Message for the logger in case of error when scrolling.
----
:param ele_to_scroll: html element to look for when scrolling
:type ele_to_scroll: str
:return: error message
:rtype: str
"""
return f"_scroll: ele_to_scroll should have a content but it is {ele_to_scroll}"
@staticmethod
def scroll_new_homes(prev_len):
"""
Message for the logger in when scrolling.
----
:param prev_len: number of elements found when scrolling
:type prev_len: int
:return: message
:rtype: str
"""
return f"_scroll_new_homes:prev_len {prev_len}"
@staticmethod
def end_save_csv(url):
"""
Message for the logger when finished saving an url content to a csv
----
:param url: url address
:type url: int
:return: message
:rtype: str
"""
return f"_save_to_csv: finished {url}"
@staticmethod
def init_print_save_df(url, to_print, save, to_database, verbose, listing_type):
"""
Message for the logger at beginning of print_save_df function
----
:param url: url address
:type url: str
:param listing_type: type of listing: buy, rent, commercial, new_home
:type listing_type: str
:param to_print: if true, it prints the dataframe to the screen
:type to_print: bool
:param save: if true, it saves the dataframe into a csv file
:type save: bool
:param to_database: if true, it saves the new information from the dataframe to the database
:type to_database: bool
:param verbose: if true, it prints relevant information to the user
:type verbose: bool
"""
return f"_print_save_df: Checking if print {url}, to_print={to_print}, save={save}, to_database={to_database}, " \
f"verbose={verbose}, listing_type={listing_type}"
@staticmethod
def saving_print_save_df(url, to_print, save, to_database, verbose, listing_type):
"""
Message for the logger before saving to a csv in print_save_df function
----
:param url: url address
:type url: str
:param listing_type: type of listing: buy, rent, commercial, new_home
:type listing_type: str
:param to_print: if true, it prints the dataframe to the screen
:type to_print: bool
:param save: if true, it saves the dataframe into a csv file
:type save: bool
:param to_database: if true, it saves the new information from the dataframe to the database
:type to_database: bool
:param verbose: if true, it prints relevant information to the user
:type verbose: bool
"""
return f"_print_save_df: Saving into csv {url}, to_print={to_print}, save={save}, to_database={to_database}, " \
f"verbose={verbose}, listing_type={listing_type}"
@staticmethod
def db_print_save_df(url, to_print, save, to_database, verbose, listing_type):
"""
Message for the logger before saving into the db in print_save_df function
----
:param url: url address
:type url: str
:param listing_type: type of listing: buy, rent, commercial, new_home
:type listing_type: str
:param to_print: if true, it prints the dataframe to the screen
:type to_print: bool
:param save: if true, it saves the dataframe into a csv file
:type save: bool
:param to_database: if true, it saves the new information from the dataframe to the database
:type to_database: bool
:param verbose: if true, it prints relevant information to the user
:type verbose: bool
"""
return f"_print_save_df: Saving into db {url}, to_print={to_print}, save={save}, to_database={to_database}, " \
f"verbose={verbose}, listing_type={listing_type}"
@staticmethod
def end_print_save(url):
"""
Message for the logger when finished running the function _print_save_df
----
:param url: url address
:type url: int
:return: message
:rtype: str
"""
return f"_print_to_save: finished {url}"
@staticmethod
def pulling_row_info(row_number):
"""
Message for the logger when pulling row information in fetch_more_attributes function
----
:param row_number: row number in the dataframe
:type row_number: int
:return: message
:rtype: str
"""
return f"fetch_more_attributes: Pulling info for row {row_number}"
@staticmethod
def exception_fetch_more_attributes(row_number, exception):
"""
Message for the logger when an exception occurred when pulling row information in fetch_more_attributes function
----
:param row_number: row number in the dataframe
:type row_number: int
:param exception: error message
:type exception: exception
:return: message
:rtype: str
"""
return f"fetch_more_attributes: row {row_number}, {exception}"
@staticmethod
def not_fetched(fetch_info):
"""
Message for the logger when additional information was not fetched
----
:param fetch_info: row number in the dataframe
:type fetch_info: bool
:return: message
:rtype: str
"""
return f"fetch_more_attributes: fetch info == {fetch_info}"
@staticmethod
def creating_df(url):
"""
Message for the logger when _create_df is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"create_df: Creating dataframe from {url}"
@staticmethod
def created_df(url):
"""
Message for the logger when _create_df is finished
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"create_df: Created dataframe from {url} successfully"
@staticmethod
def scraping(url):
"""
Message for the logger when scrap_url is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scrolling {url}"
@staticmethod
def before_scroll(url):
"""
Message for the logger before _scroll is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scrolling {url} - not new_homes"
@staticmethod
def before_scroll_new_home(url):
"""
Message for the logger before _scroll_new_homes is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scrolling {url} - new_homes"
@staticmethod
def before_scraping(url):
"""
Message for the logger before starting to actually scrape in scrap_url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scraping {url}"
@staticmethod
def finished_scraping(url):
"""
Message for the logger at the end of scrap_url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: finished {url}"
@staticmethod
def main_scraping(url):
"""
Message for the logger before calling scrap_url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"main: Scraping {url}"
@staticmethod
def main_scraped_success(url):
"""
Message for the logger after all scraping operations are done for the particular url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"main: Scrapped {url} successfully"
@staticmethod
def connect_to_server(listing, verbose):
"""
Message for the logger before connecting to db server
----
:param listing: listing type of the dataframe
:type listing: str
:param verbose: whether nor not to print relevant info to the user
:type verbose: bool
:return: message
:rtype: str
"""
return f"_save_to_data_base: Connecting to the db listing_type={listing}, verbose={verbose}"
@staticmethod
def insert_city_error(city):
"""
Message for the logger when error for inserting existing city in cities table
----
:param city: city already in table
:type city: str
:return: message
:rtype: str
"""
return f"_save_to_data_base: {city} is already in cities."
@staticmethod
def insert_city_error(listing):
"""
Message for the logger when error for inserting existing listing type in listings table
----
:param listing: listing already in table
:type listing: str
:return: message
:rtype: str
"""
return f"_save_to_data_base: {listing} is already in listings."
@staticmethod
def insert_city_error(property):
"""
Message for the logger when error for inserting existing property in properties table
----
:param property: property already in table
:type property: str
:return: message
:rtype: str
"""
return f"_save_to_data_base: {property} is already in properties."
@staticmethod
def insert_row_error(row):
"""
Message for the logger when error for inserting existing property in properties table
----
:param row: row already in table
:type row: pd.Series
:return: message
:rtype: str
"""
return f"_save_to_data_base: {row} is already in properties. " | python |
This class handles logging for the entire web scraping process
| 12 | 0.594302 | 122 | 32.137255 | 357 | class |
class ExperimentConfig:
"""
Configuration Parameters for experiments
"""
# number of fields of view in each well - pairs of images (DAPI and FITC) for each field
FIELDS_PER_WELL = 20
# smoothing constant
EPS = 0.00000001
# Moedl paths:
# -------------------------------------------------------------------------
NUCLEI_MASK_RCNN_WEIGHTS_PATH = "deepretina_final.h5"
NEURITE_SEGMENTATION_MODEL_PATH = "neurite_unet_weights.h5"
# Parameters for a boolean mask that containing a round search area to search
# for cells in the proximity of neurite endpoints
# ----------------------------------------------------------------------------
# length in pixels of the search radius around each neurite endpoint to search for a cell
RADIUS = 15
# square boolean mask edge length
square_edge_length = (RADIUS + 1) * 2 + 1
y, x = np.ogrid[: square_edge_length, : square_edge_length]
# boolean mask with disk of ones at the center
DISK_MASK = (x - (RADIUS + 1)) ** 2 + (y - (RADIUS + 1)) ** 2 <= RADIUS ** 2
# Outlier Removal
# ----------------------------------------------------------------------------
# minimum number of fields to accept the results of a well as valid
MIN_VALID_FIELDS = 5
# Outlier removal thresholds:
# minimal number of cells allowed in a field for it to be valid
MIN_CELL_NUM = 50
# maximal number of cells allowed in a field for it to be valid
MAX_CELL_NUM = 1000
# max allowed ratio of un-viable cells in a field
MAX_APOP_RATIO = 0.25
# max allowed ratio of extremely clustered cells
MAX_HIGH_DENSITY_RATIO = 0.45
# Parameters for cell density:
# a cell in a highly dense area in the field is a cell with
# at least MIN_SAMPLES in a range of D_EPS raduis around it
D_EPS = 100
MIN_SAMPLES = 10
# unsupervised outlier removal constants:
# straight line will be calculated using Random Sample Consensus (RANSAC) algorithm
# number of samples randomly selected equal to RANSAC_MIN_SAMPLES.
RANSAC_MIN_SAMPLES = 5
assert RANSAC_MIN_SAMPLES <= MIN_VALID_FIELDS, "The minimal number of valid fields has to be equal or larger" \
" than the number of minimal ransac samples or else" \
" the algorithm might not work"
# fields with residual distance far away will have a low probability to fit the RANSAC line
# fields with probability lower than threshold will be considered un-valid.
PROBABILITY_THRESHOLD = 0.05
# Connection Probability
# ----------------------
# connection distances
SHORT_DISTANCE = 100
INTERMEDIATE_DISTANCE = 300
LONG_DISTANCE = 400
# connection probability over a distance (connection_pdf) constants:
# minimal and maximal distances for calculating the probability of connection
MIN_DISTANCE = 0
MAX_DISTANCE = 1000
# distance range of each pdf bin - meaning the probability of connection will be calculated in
# the following distance ranges to create the connection_pdf:
# (MIN_DISTANCE : BIN_SIZE),
# ((MIN_DISTANCE + BIN_SIZE) : (MIN_DISTANCE + 2*BIN_SIZE)),
# ...
# (MAX_DISTANCE - BIN_SIZE) : MAX_DISTANCE) range
BIN_SIZE = 25
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if a.isupper():
print("{:30} {}".format(a, getattr(self, a)))
print("\n") | python |
Configuration Parameters for experiments
| 16 | 0.60173 | 115 | 35.948454 | 97 | class |
class LaneGeneratorTSMixin: # (LaneGeneratorCU):
""" Generates a time series of lanes.
"""
def __iter__(self):
return self
def __next__(self):
""" Iterator over the frames of the movie.
"""
curr_idx_batch = 0
curr_time_step = 0
X_list = []
y_list = []
X_list_ts = []
y_list_ts = []
while curr_time_step < self.nb_time_steps:
curr_filename = next(self._file_iterator())
if curr_idx_batch < self.batch_size_ts:
X, y = self._generate_one_Xy(curr_filename)
X_list_ts.append(X)
y_list_ts.append(y)
curr_idx_batch += 1
else:
X_list.append(np.array(X_list_ts))
y_list.append(np.array(y_list_ts))
curr_idx_batch = 0
curr_time_step += 1
X_list_ts = []
y_list_ts = []
return np.array(X_list), np.array(y_list)
def show_movie_with_lanes(self, wait_between_frames : int = 100 ):
""" Shows the movie from images.
"""
for X, y in self:
# X, y of shape (batch_size, nb_time_steps, image_x, image_y, nb_channels)
for batch_X, batch_y in zip(X, y):
for X_time_step, y_time_step in zip(batch_X, batch_y):
cv2.imshow('TS Video', cv2.addWeighted(X_time_step, 0.6, y_time_step, 0.8, 0))
cv2.waitKey(wait_between_frames) | python | Generates a time series of lanes.
| 16 | 0.489446 | 98 | 31.276596 | 47 | class |
class Point:
'''
A Point in a bidimensional plane with coordinates (x, y) and an index to identify it.
'''
def __init__(self, index: int, x: int, y: int):
self.__index = index
self.__x = x
self.__y = y
@property
def index(self) -> int:
'''
Index that works as an identification.
'''
return self.__index
@property
def x(self) -> int:
'''
Coordinate X.
'''
return self.__x
@property
def y(self) -> int:
'''
Coordinate Y.
'''
return self.__y
def distance(self, point: 'Point') -> float:
'''
Calculates the Euclidean distance to another Point.
'''
dx = abs(self.x - point.x)
dy = abs(self.y - point.y)
return math.hypot(dx, dy)
def __str__(self) -> str:
'''
Returns the string representation of a Point:
<index> <x> <y>
'''
return str(self.index) + ' ' + str(self.x) + ' ' + str(self.y)
def __eq__(self, point: 'Point') -> bool:
return self.index == point.index and self.x == point.x and self.y == point.y
def __hash__(self):
return hash((self.index, self.x, self.y))
def __repr__(self) -> str:
# return f'Point(index={self.index}, x={self.x}, y={self.y})'
return str(self.index) | python |
A Point in a bidimensional plane with coordinates (x, y) and an index to identify it.
| 13 | 0.493863 | 89 | 23.75 | 56 | class |
class FinanceHMM:
"""
Class to compute multivariate mixture distributions from n_assets based on a given HMM.
Computes posteriors, state sequences as well as expected and forecasted returns and standard deviations.
Transforms lognormal multivariate distributions into normal distributions and combines them into mixtures.
Parameters
----------
X : ndarray of shape (n_samples,)
Times series data used to train the HMM.
df : DataFrame of shape (n_samples, n_assets)
Times series data used when estimating expected returns and covariances.
model : hidden markov model
Hidden Markov Model object.
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
"""
def __init__(self, model):
self.model = model
self.n_states = model.n_states
self.n_assets = None
def get_cond_asset_dist(self, df, state_sequence):
"""
Compute conditional multivariate normal distribution of all assets in each state.
Assumes returns follow a multivariate log-normal distribution. Proceeds by first
getting the conditional log of means and covariances and then transforming them
back into normal varibles.
Parameters
----------
df : DataFrame of shape (n_samples, n_assets)
log-returns for assets
state_sequence : ndarray of shape (n_samples,)
Decoded state sequence
Returns
-------
mu : ndarray of shape (n_states, n_assets)
Conditional mean value of each assets
cov : ndarray of shape (n_states, n_assets, n_assets)
Conditional covariance matrix
"""
self.n_assets = df.shape[1]
df = df.iloc[-len(state_sequence):]
df['state_sequence'] = state_sequence
groupby_state = df.groupby('state_sequence')
log_mu, log_cov = groupby_state.mean(), groupby_state.cov()
state_count = groupby_state.count().max(axis=1) # Num obs in each state
mu = np.zeros(shape=(self.n_states, self.n_assets))
cov = np.zeros(shape=(self.n_states, self.n_assets, self.n_assets))
# Loop through n_states present in current sample
for s in log_mu.index:
if state_count[s] > 1: # If state_count not >1, covariance will return NaN
mu[s], cov[s] = self.logcov_to_cov(log_mu.loc[s], log_cov.loc[s])
return mu, cov
def get_uncond_asset_dist(self, posteriors, cond_mu, cond_cov):
"""
Compute unconditional multivariate normal distribution of all assets.
Parameters
----------
posteriors: ndarray of shape (n_preds, n_states)
predicted posterior probability of being in state i at time t+h.
cond_mu : ndarray of shape (n_states, n_assets)
Conditional mean value of each assets
cond_cov : ndarray of shape (n_states, n_assets, n_assets)
Conditional covariance matrix
Returns
-------
pred_mu : ndarray of shape (n_preds, n_assets)
Conditional mean value of each assets
pred_cov : ndarray of shape (n_preds, n_assets, n_assets)
Conditional covariance matrix
"""
pred_mu = np.inner(cond_mu.T, posteriors).T # shape (n_preds, n_assets)
cov_x1 = np.inner(posteriors, cond_cov.T) # shape (n_preds, n_assets, n_assets)
cov_x2 = pred_mu - cond_mu[:, np.newaxis] # shape (n_states, n_preds)
cov_x3 = np.einsum('ijk,ijk->ij', cov_x2, cov_x2) # Equal to np.sum(X**2, axis=-1)
cov_x4 = np.einsum('ij,ij->i', cov_x3.T, posteriors) # Equal to np.sum(X3*posteriors, axis=1)
pred_cov = cov_x1 + cov_x4[:, np.newaxis, np.newaxis] # shape (n_preds, n_assets, n_assets)
return pred_mu, pred_cov
@staticmethod
def logcov_to_cov(log_mu, log_cov):
"""
Transforms log returns' means and covariances back into regular formats.
Parameters
----------
log_mu : DataFrame of shape (n_assets,)
log_cov : DataFrame of shape (n_assets, n_assets)
Returns
-------
mu : ndarray of shape (n_assets)
Mean value of each assets
cov : ndarray of shape (n_assets, n_assets)
Covariance matrix
"""
diag = np.diag(log_cov)
mu = np.exp(log_mu + np.diag(log_cov) / 2) - 1
x1 = np.outer(mu, mu) # Multiply all combinations of the vector mu -> 2-D array
x2 = np.outer(diag, diag) / 2
cov = np.exp(x1 + x2) * (np.exp(log_cov) - 1)
return mu, cov
def stein_shrinkage(self, cond_cov, shrinkage_factor=(0.2, 0.4)):
"""Stein-type shrinkage of conditional covariance matrices"""
shrinkage_factor = np.array(shrinkage_factor)
# Turn it into 3D to make it broadcastable with cond_cov
shrink_3d = shrinkage_factor[:, np.newaxis, np.newaxis]
term1 = (1-shrink_3d) * cond_cov
# Turn term2 into 3D to make it broadcastable with term3
term2 = (shrinkage_factor * np.trace(cond_cov.T) * 1/self.n_assets) # Shape (n_states,)
term3 = np.broadcast_to(np.identity(self.n_assets)[..., np.newaxis],
(self.n_assets,self.n_assets,self.n_states)).T # Shape (n_states, n_assets, n_assets)
term4 = term2[:, np.newaxis, np.newaxis] * term3
cond_cov = term1 + term4
return cond_cov
def fit_model_get_uncond_dist(self, X, df, n_preds=15, shrinkage_factor=(0.2, 0.4), verbose=False):
"""
From data, fit hmm model, predict posteriors probabilities and return unconditional distribution.
Wraps model.fit_predict, get_cond_asset_dist and get_uncond_asset_dist methods into one.
Parameters
----------
X : ndarray of shape (n_samples,)
Time series of data
df : DataFrame of shape (n_samples, n_assets)
Historical returns for each asset i.
n_preds : int, default=15
Number of h predictions
verbose : boolean, default=False
Get verbose output
Returns
-------
pred_mu : ndarray of shape (n_preds, n_assets)
Conditional mean value of each assets
pred_cov : ndarray of shape (n_preds, n_assets, n_assets)
Conditional covariance matrix
"""
self.n_assets = df.shape[1]
# fit model, return decoded historical state sequnce and n predictions
# state_sequence is 1D-array with same length as X_rolling
# posteriors is 2D-array with shape (n_preds, n_states)
state_sequence, posteriors = self.model.fit_predict(X, n_preds=n_preds, verbose=verbose)
# Compute conditional mixture distributions in rolling period
cond_mu, cond_cov = \
self.get_cond_asset_dist(df, state_sequence) # shapes (n_states, n_assets), (n_states, n_assets, n_assets)
cond_cov = self.stein_shrinkage(cond_cov, shrinkage_factor=shrinkage_factor)
# Transform into unconditional moments at time t
# Combine with posteriors to also predict moments h steps into future
# shapes (n_preds, n_assets), (n_preds, n_assets, n_assets)
pred_mu, pred_cov = self.get_uncond_asset_dist(posteriors, cond_mu, cond_cov)
return pred_mu, pred_cov, posteriors, state_sequence | python |
Class to compute multivariate mixture distributions from n_assets based on a given HMM.
Computes posteriors, state sequences as well as expected and forecasted returns and standard deviations.
Transforms lognormal multivariate distributions into normal distributions and combines them into mixtures.
Parameters
----------
X : ndarray of shape (n_samples,)
Times series data used to train the HMM.
df : DataFrame of shape (n_samples, n_assets)
Times series data used when estimating expected returns and covariances.
model : hidden markov model
Hidden Markov Model object.
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
| 15 | 0.615365 | 119 | 41.910615 | 179 | class |
class Backtester:
"""
Backtester for Hidden Markov Models.
Parameters
----------
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
"""
def __init__(self, window_len=1700):
self.preds = None
self.cov = None
self.n_states = None
self.n_assets = None
self.window_len = window_len
def rolling_preds_cov_from_hmm(self, X, df_logret, model, n_preds=15, window_len=None, shrinkage_factor=(0.3, 0.3), verbose=False):
"""
Backtest based on rolling windows.
Fits a Hidden Markov model within each rolling window and computes the unconditional
multivariate normal mixture distributions for each asset in the defined universe.
Parameters
----------
X : ndarray of shape (n_samples,)
Log-returns. Times series data used to train the HMM.
df_logret : DataFrame of shape (n_samples, n_assets)
Log-returns. Times series data used when estimating expected returns and covariances.
model : hidden markov model
Hidden Markov Model object
n_preds : int, default=15
Number of h predictions
window_len : int, default=1500
verbose : boolean, default=False
Make output verbose
Returns
-------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
Unconditional mean values for each asset
cov : ndarray of shape (n_samples-window_len, n_preds, n_assets, n_assets)
Unconditional covariance matrix at each time step t, h steps into future
"""
self.n_states = model.n_states
self.n_assets = df_logret.shape[1]
if window_len == None: # Ensure class and function window_lens match
window_len = self.window_len
else:
self.window_len = window_len
finance_hmm = FinanceHMM(model) # class for computing asset distributions and predictions.
# Create 3- and 4-D array to store predictions and covariances
self.preds = np.empty(shape=(len(df_logret) - window_len, n_preds, self.n_assets)) # 3-D array
self.cov = np.empty(shape=(len(df_logret) - window_len, n_preds, self.n_assets, self.n_assets)) # 4-D array
self.timestamp = np.empty(shape=len(df_logret) - window_len, dtype=object)
for t in tqdm.trange(window_len, len(df_logret)):
# Slice data into rolling sequences
df_rolling = df_logret.iloc[t-window_len: t]
X_rolling = X.iloc[t-window_len: t]
# fit rolling data with model, return predicted means and covariances, posteriors and state sequence
pred_mu, pred_cov, posteriors, state_sequence = \
finance_hmm.fit_model_get_uncond_dist(
X_rolling, df_rolling, shrinkage_factor=shrinkage_factor, n_preds=n_preds, verbose=verbose)
self.timestamp[t - window_len] = df_rolling.index[-1]
self.preds[t - window_len] = pred_mu
self.cov[t - window_len] = pred_cov
return self.preds, self.cov
def backtest_mpc(self, df_rets, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_drawdown=0.4, max_holding_rf=1.,
max_leverage=2.0, gamma_0=5, kappa1=0.008,
rho2=0.0005, rho_rf=0.0001, max_holding=0.4, short_cons="LLO",
rf_included=True, eps=1e-6):
"""
Wrapper for backtesting MPC models on given data and predictions.
Parameters
----------
df_rets : DataFrame of shape (n_samples, n_assets)
Historical returns for each asset i. Cash must be at the last column position.
preds : ndarray of shape (n_samples, n_preds, n_assets)
list of return predictions for each asset h time steps into the future. Each element in list contains,
from time t, predictions h time steps into the future.
covariances : ndarray of shape (n_samples, n_preds, n_assets, n_assets)
list of covariance matrix of returns for each time step t.
port_val : float, default=1000
Starting portfolio value.
start_weights : ndarray of shape (n_assets,)
Current (known) portfolio weights at the start of backtest. Default is 100% allocation to cash.
Cash must be the last column in df_rets.
"""
self.port_val = np.array([0, port_val])
self.port_ret = np.array([1, 1])
self.n_assets = df_rets.shape[1]
self.n_preds = n_preds
df_rets = df_rets.iloc[-len(preds):] # Slice returns to match preds
if start_weights == None: # Standard init with 100% allocated to cash
start_weights = np.zeros(self.n_assets)
start_weights[-1] = 1.
else:
start_weights = start_weights
self.weights = np.zeros(shape=(len(preds) + 1, self.n_assets)) # len(preds) + 1 to include start weights
self.weights[0] = start_weights
gamma = np.array([]) # empty array
trade_cost, turnover = [], []
# Instantiate MPC object
mpc_solver = MPC(rets=preds[0], covariances=covariances[0], prev_port_vals=self.port_val,
start_weights=self.weights[0], max_drawdown=max_drawdown, gamma_0=gamma_0,
kappa1=kappa1, rho2=rho2, rho_rf=rho_rf, max_holding=max_holding, max_holding_rf=max_holding_rf
,max_leverage=max_leverage, short_cons=short_cons, rf_included=rf_included, eps=eps)
for t in tqdm.trange(preds.shape[0]):
# Update MPC object
mpc_solver.rets = np.array(preds[t])
mpc_solver.cov = np.array(covariances[t])
mpc_solver.start_weights = self.weights[t]
mpc_solver.prev_port_vals = self.port_val
# Solve MPC problem at time t and save weights
weights_mpc = mpc_solver.cvxpy_solver(verbose=False) # ndarray of shape (n_preds, n_assets)
self.weights[t + 1] = weights_mpc[0] # Only use first forecasted weights
gamma = np.append(gamma, mpc_solver.gamma)
delta_weights = self.weights[t] - self.weights[t-1]
# self.weights and df_rets are one shifted to each other. Time periods should match.
gross_ret = (self.weights[t + 1] @ (1 + df_rets.iloc[t]))
shorting_cost = self.short_costs(self.weights[t + 1], rf_return=df_rets.iloc[t, -1])
trans_cost = self.transaction_costs(delta_weights, trans_cost=0.001)
port_ret = (gross_ret-shorting_cost) * (1-trans_cost)
new_port_val = port_ret * self.port_val[-1]
self.port_ret = np.append(self.port_ret, port_ret)
self.port_val = np.append(self.port_val, new_port_val)
trade_cost.append(trans_cost)
turnover.append(np.linalg.norm(delta_weights, ord=1) / 2) # Half L1 norm
self.port_val = self.port_val[1:] # Throw away first observation since it is artificially set to zero
self.port_ret = self.port_ret[2:]
self.gamma = gamma
# Annualized average trading ost
self.trans_cost = np.array(trade_cost)
self.annual_trans_cost = 252 / len(self.trans_cost) * self.trans_cost.sum()
# Compute average annualized portfolio turnover
self.daily_turnover = np.array(turnover)
self.annual_turnover = 252 / len(self.daily_turnover) * self.daily_turnover.sum()
# Compute return & std.
n_years = len(self.port_val) / 252
annual_ret = self.port_ret.prod()**(1/n_years) - 1
annual_std = self.port_ret.std(ddof=1) * np.sqrt(252)
return annual_ret, annual_std, self.annual_turnover
def gridsearch_mpc(self, grid, df_rets, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_drawdown=1000, max_leverage=2.0, gamma_0=5, kappa1=0.008,
rho2=0.0005, max_holding=0.4, short_cons="LO", rf_included=True, eps=1e-6):
results = pd.DataFrame()
for max_holding in grid['max_holding']:
for trans_costs in grid['trans_costs']:
for holding_costs in grid['holding_costs']:
for holding_costs_rf in grid['holding_costs_rf']:
print(f"""Computing grid -- max_holding {max_holding} -- trans_costs {trans_costs} holding_costs {holding_costs} holding_costs_rf {holding_costs_rf}""")
#try:
annual_ret, annual_std, annual_turnover = self.backtest_mpc(
df_rets, preds, covariances, n_preds=n_preds, port_val=port_val,
start_weights=start_weights, max_drawdown=max_drawdown, max_leverage=max_leverage,
gamma_0=gamma_0, kappa1=trans_costs, rho2=holding_costs, rho_rf=holding_costs_rf, max_holding=max_holding,
short_cons=short_cons, rf_included=rf_included, eps=eps
)
results_dict = {'max_holding': max_holding,
'trans_costs': trans_costs,
'holding_costs': holding_costs,
'holding_costs_rf': holding_costs_rf,
'return': annual_ret,
'std': annual_std,
'turnover': annual_turnover}
results = results.append(results_dict, ignore_index=True)
print(results.tail(1))
#except Exception as e:
# print('No convergence')
# print(e)
# continue
self.gridsearch_df = results
return results
def mpc_gammas_shortcons(self, gammas, constraints,
data, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_holding_rf=1.,
max_leverage=2.0, trans_costs=0.001,
holding_costs=0.0000, max_holding=0.2, eps=1e-6):
df = pd.DataFrame()
for constr in constraints:
print(f'Backtesting for params {constr}')
results = {f'gamma_{i}': [] for i in gammas}
short_con = constr[0]
max_drawdown = constr[1]
for gamma in gammas:
self.backtest_mpc(data.rets, preds, covariances, n_preds=n_preds, port_val=port_val,
start_weights=start_weights, max_drawdown=max_drawdown, max_leverage=max_leverage,
gamma_0=gamma, kappa1=trans_costs, rho2=holding_costs, max_holding=max_holding,
short_cons=short_con, eps=eps)
results[f'gamma_{gamma}'] = self.port_val
df_temp = pd.DataFrame(results)
df_temp['short_cons'] = short_con
df_temp['D_max'] = max_drawdown
df_temp['timestamp'] = data.rets.index[-len(df_temp):]
df_temp['T-bills rf'] = data.prices['T-bills rf'].iloc[-len(df_temp):].values
df = df.append(df_temp)
# self.annual_turnover, self.annual_trans_cost, self.port_val
self.port_val_df = df
return df
def mpc_shortcons(self, constraints,
data, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_holding_rf=1.,
max_leverage=2.0, trans_costs=0.001,
holding_costs=0.0000, max_holding=0.2, eps=1e-6):
df = pd.DataFrame()
results = {f'{constr[0]}_{constr[1]}': [] for constr in constraints}
for constr in constraints:
print(f'Backtesting for params {constr}')
short_con = constr[0]
max_drawdown = constr[1]
self.backtest_mpc(data.rets, preds, covariances, n_preds=n_preds, port_val=port_val,
start_weights=start_weights, max_drawdown=max_drawdown, max_leverage=max_leverage,
gamma_0=5, kappa1=trans_costs, rho2=holding_costs, max_holding=max_holding,
short_cons=short_con, eps=eps)
results[f'{constr[0]}_{constr[1]}'] = self.port_val
df = pd.DataFrame(results)
df['timestamp'] = data.rets.index[-len(df):]
df['T-bills rf'] = data.prices['T-bills rf'].iloc[-len(df):].values
# self.annual_turnover, self.annual_trans_cost, self.port_val
self.port_val_df = df
return df
def backtest_equal_weighted(self, df_rets, rebal_freq='M', port_val=1000, use_weights=None, start_weights=None):
"""
Backtest an equally weighted portfolio, with specified rebalancing frequency.
Parameters
----------
df_rets : DataFrame of shape (n_samples, n_assets)
Historical returns for each asset i. Cash must be at the last column position.
rebal_freq : int, default=20
Rebalance frequency. Default is 20, i.e monthly.
port_val : float, default=1000
Starting portfolio value.
start_weights : ndarray of shape (n_assets,)
Current (known) portfolio weights at the start of backtest. Default is 100% allocation to cash.
Cash must be the last column in df_rets.
"""
self.port_val = np.array([0, port_val])
self.n_assets = df_rets.shape[1]
if np.any(use_weights) == None:
use_weights = np.array([1 / self.n_assets] * self.n_assets) # Vector of shape (n_assets,)
if start_weights == None: # Standard init with 100% allocated to cash
start_weights = np.zeros(self.n_assets)
start_weights[-1] = 1.
else:
start_weights = start_weights
weights = start_weights
trade_cost, turnover = [], []
# Group data into months - average sample size is 20
# Then for each month loop over the daily returns and update weights
# The problem is recursive and thus requires looping done this way
for month_dt, df_group in tqdm.tqdm(df_rets.groupby(pd.Grouper(freq=rebal_freq))):
# Compute transaction costs for each month. Subtracted from gross ret the first of the month
delta_weights = use_weights - weights
trans_cost = self.transaction_costs(delta_weights)
weights = use_weights # Reset weights
for day in range(len(df_group)):
# Calculate gross returns for portfolio and append it
if day == 0:
gross_ret = (1 + df_group.iloc[day]) * (1-trans_cost)
else:
gross_ret = 1 + df_group.iloc[day]
new_port_val = weights @ gross_ret * self.port_val[-1]
self.port_val = np.append(self.port_val, new_port_val)
new_w = gross_ret * weights
new_w /= new_w.sum() # Weights sum to 1
weights = new_w # Update weights each iteration
trade_cost.append(trans_cost)
turnover.append(np.linalg.norm(delta_weights, ord=1) / 2) # Half L1 norm
self.port_val = self.port_val[1:] # Throw away first observation since it is artificially set to zero
# Annualized average trading ost
self.trans_cost = np.array(trade_cost)
self.annual_trans_cost = 12 / len(self.trans_cost) * self.trans_cost.sum()
# Compute average annualized portfolio turnover
self.monthly_turnover = np.array(turnover)
self.annual_turnover = 12 / len(self.monthly_turnover) * self.monthly_turnover.sum()
def short_costs(self, weights, rf_return):
"""
Compute shorting costs, assuming a fee equal to the risk-free asset is paid.
"""
weights_no_rf = weights[:-1] # Remove risk-free asset from array
short_weights = weights_no_rf[weights_no_rf < 0.0].sum() # Sum of all port weights below 0.0
return -short_weights * rf_return
def transaction_costs(self, delta_weights, trans_cost=0.001):
"""
Compute transaction costs. Assumes no costs in risk-free asset and equal cost to
buying and selling assets.
"""
delta_weights = delta_weights[:-1] # Remove risk-free asset as it doesn't have trading costs
delta_weights = np.abs(delta_weights).sum() # abs since same price for buying/selling
return delta_weights * trans_cost
def asset_metrics(self, df_prices):
"""Compute performance metrics for a given portfolio/asset"""
df_ret = df_prices.pct_change().dropna()
n_years = len(df_ret) / 252
# Get regular cagr and std
ret = df_ret.drop('T-bills rf', axis=1)
cagr = ((1 + ret).prod(axis=0)) ** (1 / n_years) - 1
std = ret.std(axis=0, ddof=1) * np.sqrt(252)
# Compute metrics in excess of the risk-free asset
excess_ret = df_ret.subtract(df_ret['T-bills rf'], axis=0).drop('T-bills rf', axis=1)
excess_cagr = ((1 + excess_ret).prod(axis=0)) ** (1 / n_years) - 1
excess_std = excess_ret.std(axis=0 ,ddof=1) * np.sqrt(252)
sharpe = excess_cagr / excess_std
df_prices = df_prices.drop('T-bills rf', axis=1)
peaks = df_prices.cummax(axis=0)
drawdown = -(df_prices - peaks) / peaks
max_drawdown = drawdown.max(axis=0)
calmar = excess_cagr / max_drawdown
metrics = {'return': cagr,
'std': std,
'excess_return': excess_cagr,
'excess_std': excess_std,
'sharpe': sharpe,
'max_drawdown': max_drawdown,
'calmar_ratio': calmar}
metrics = pd.DataFrame(metrics)
return metrics
def single_port_metric(self, df_prices, port_val, compare_assets=False):
"""Compute performance metrics for a given portfolio/asset"""
# Merge port_val with data
df_prices = df_prices.iloc[-len(port_val):]
df_prices['port_val'] = port_val
df_prices.dropna(inplace=True)
df_ret = df_prices.pct_change().dropna()
# Annual returns, std
n_years = len(port_val) / 252
excess_ret = df_ret['port_val'] - df_ret['T-bills rf']
excess_cagr = ((1+excess_ret).prod())**(1/n_years) - 1
excess_std = excess_ret.std(ddof=1) * np.sqrt(252)
sharpe = excess_cagr / excess_std
# Drawdown
peaks = np.maximum.accumulate(port_val)
drawdown = -(port_val-peaks) / peaks
max_drawdown = np.max(drawdown)
max_drawdown_end = np.argmax(drawdown)
max_drawdown_beg = np.argmax(port_val[:max_drawdown_end])
drawdown_dur = max_drawdown_end - max_drawdown_beg # TODO not showing correct values
calmar = excess_cagr / max_drawdown
metrics = {'excess_return': excess_cagr,
'excess_std': excess_std,
'sharpe': sharpe,
'max_drawdown': max_drawdown,
'max_drawdown_dur': drawdown_dur,
'calmar_ratio': calmar}
return metrics
def mulitple_port_metrics(self, df_port_val):
"""Compute performance metrics for a given portfolio/asset"""
df = pd.DataFrame()
for type, df_groupby in df_port_val.groupby(['short_cons', 'D_max']):
df_prices = df_groupby.drop(columns=['short_cons', 'D_max', 'timestamp'])
df_rets = df_prices.pct_change().dropna()
# Annual returns, std
n_years = len(df_rets) / 252
ret = df_rets.drop('T-bills rf', axis=1)
cagr = ((1 + ret).prod(axis=0)) ** (1 / n_years) - 1
std = ret.std(axis=0, ddof=1) * np.sqrt(252)
excess_ret = df_rets.subtract(df_rets['T-bills rf'], axis=0).drop('T-bills rf', axis=1)
excess_cagr = ((1 + excess_ret).prod(axis=0)) ** (1 / n_years) - 1
excess_std = excess_ret.std(axis=0 ,ddof=1) * np.sqrt(252)
sharpe = excess_cagr / excess_std
df_prices = df_prices.drop('T-bills rf', axis=1)
peaks = df_prices.cummax(axis=0)
drawdown = -(df_prices - peaks) / peaks
max_drawdown = drawdown.max(axis=0)
"""
max_drawdown_end = np.argmax(drawdown, axis=0)
max_drawdown_beg = np.argmax(drawdown[:max_drawdown_end], axis=0)
drawdown_dur = max_drawdown_end - max_drawdown_beg # TODO not showing correct values
"""
calmar = excess_cagr / max_drawdown
metrics = {'return': cagr,
'std': std,
'excess_return': excess_cagr,
'excess_std': excess_std,
'sharpe': sharpe,
'max_drawdown': max_drawdown,
'calmar_ratio': calmar}
df_temp = pd.DataFrame(metrics)
df_temp['short_cons'] = type[0]
df_temp['D_max'] = type[1]
df = df.append(df_temp)
return df
def plot_port_val(self, data, mpc_val, equal_w_val, start=None, savefig=None):
# Prepare data
equal_w_val = equal_w_val[-len(mpc_val):]
data.dropna(inplace=True)
data = data.iloc[-len(mpc_val):]
data['MPC'] = mpc_val
data['1/n'] = equal_w_val
data = data[['MPC', '1/n']] # Drop all other cols
if not start == None:
data = data.loc[start:]
data = data / data.iloc[0] * 100
# Plotting
plt.rcParams.update({'font.size': 15})
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True, figsize=(15,10))
ax.plot(data.index, data)
# ax[0].set_yscale('log')
ax.set_ylabel('$P_t$')
plt.tight_layout()
if not savefig == None:
plt.savefig('./images/' + savefig)
plt.show() | python |
Backtester for Hidden Markov Models.
Parameters
----------
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
| 18 | 0.569317 | 176 | 44.842857 | 490 | class |
class DriveOpen:
""" Context manager for generically opening drive filepaths
"""
def __init__(self, filepath, mode='wb'):
self.is_drive = (type(filepath) is GoogleDrivePath)
self.drive_path = filepath if self.is_drive else open(filepath, mode=mode)
self.mode = mode
def __enter__(self):
if self.mode == 'rb' and self.is_drive:
self.read_buffer = self.drive_path.read()
return self.read_buffer
return self.drive_path
def __exit__(self, exc_type, exc_value, traceback):
if not self.is_drive:
self.drive_path.close()
elif self.mode == 'rb':
self.read_buffer.close() | python | Context manager for generically opening drive filepaths
| 12 | 0.592163 | 82 | 33.5 | 20 | class |
class NeuralNetwork:
"""
Defines a neural network with one hidden layer to do binary classification
"""
def __init__(self, nx, nodes):
"""
Constructor method
------------------
nx: it's the number of input features to the neuron
nodes: it's the number of nodes found in the hidden layer
W1: The weights vector for the hidden layer. Upon instantiation, it
should be initialized using a random normal distribution.
b1: The bias for the hidden layer. Upon instantiation, it should be
initialized with 0’s.
A1: The activated output for the hidden layer. Upon instantiation, it
should be initialized to 0.
W2: The weights vector for the output neuron. Upon instantiation, it
should be initialized using a random normal distribution.
b2: The bias for the output neuron. Upon instantiation, it should be
initialized to 0.
A2: The activated output for the output neuron (prediction). Upon
instantiation, it should be initialized to 0.
"""
if type(nx) is not int:
raise TypeError('nx must be an integer')
if nx < 1:
raise ValueError('nx must be a positive integer')
if type(nodes) is not int:
raise TypeError('nodes must be an integer')
if nodes < 1:
raise ValueError('nodes must be a positive integer')
self.W1 = np.random.randn(nodes, nx)
self.b1 = np.zeros((nodes, 1))
self.A1 = 0
self.W2 = np.random.randn(1, nodes)
self.b2 = 0
self.A2 = 0 | python |
Defines a neural network with one hidden layer to do binary classification
| 11 | 0.605583 | 78 | 41.282051 | 39 | class |
class Range:
"""Immutable representation of PostgreSQL `range` type."""
__slots__ = '_lower', '_upper', '_lower_inc', '_upper_inc', '_empty'
def __init__(self, lower=None, upper=None, *,
lower_inc=True, upper_inc=False,
empty=False):
self._empty = empty
if empty:
self._lower = self._upper = None
self._lower_inc = self._upper_inc = False
else:
self._lower = lower
self._upper = upper
self._lower_inc = lower is not None and lower_inc
self._upper_inc = upper is not None and upper_inc
@property
def lower(self):
return self._lower
@property
def lower_inc(self):
return self._lower_inc
@property
def lower_inf(self):
return self._lower is None and not self._empty
@property
def upper(self):
return self._upper
@property
def upper_inc(self):
return self._upper_inc
@property
def upper_inf(self):
return self._upper is None and not self._empty
@property
def isempty(self):
return self._empty
def _issubset_lower(self, other):
if other._lower is None:
return True
if self._lower is None:
return False
return self._lower > other._lower or (
self._lower == other._lower
and (other._lower_inc or not self._lower_inc)
)
def _issubset_upper(self, other):
if other._upper is None:
return True
if self._upper is None:
return False
return self._upper < other._upper or (
self._upper == other._upper
and (other._upper_inc or not self._upper_inc)
)
def issubset(self, other):
if self._empty:
return True
if other._empty:
return False
return self._issubset_lower(other) and self._issubset_upper(other)
def issuperset(self, other):
return other.issubset(self)
def __bool__(self):
return not self._empty
def __eq__(self, other):
if not isinstance(other, Range):
return NotImplemented
return (
self._lower,
self._upper,
self._lower_inc,
self._upper_inc,
self._empty
) == (
other._lower,
other._upper,
other._lower_inc,
other._upper_inc,
other._empty
)
def __hash__(self):
return hash((
self._lower,
self._upper,
self._lower_inc,
self._upper_inc,
self._empty
))
def __repr__(self):
if self._empty:
desc = 'empty'
else:
if self._lower is None or not self._lower_inc:
lb = '('
else:
lb = '['
if self._lower is not None:
lb += repr(self._lower)
if self._upper is not None:
ub = repr(self._upper)
else:
ub = ''
if self._upper is None or not self._upper_inc:
ub += ')'
else:
ub += ']'
desc = '{}, {}'.format(lb, ub)
return '<Range {}>'.format(desc)
__str__ = __repr__ | python | Immutable representation of PostgreSQL `range` type. | 15 | 0.484264 | 74 | 23.772059 | 136 | class |
class Barcode:
"""
A class used to represent a barcode.
Attributes
----------
info : str
decoded barcode value
type : int
a type of barcode (e.g. EAN-13)
points : numpy.array
vertices of barcode rectangle
Methods
-------
Draw(image)
Draws barcode's rectangle and its value to the given image.
"""
def __init__(self, binfo, btype, points):
self.info = binfo
self.type = btype
self.points = points
def __str__(self):
return str(self.info) + " " + str(self.type)
def Draw(self, image):
p1 = np.array(self.points[0], dtype=int)
p2 = np.array(self.points[2], dtype=int)
cv2.rectangle(image, p1, p2, (255, 0, 0))
cv2.putText(image, "{}".format(self.info), p2, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1, cv2.LINE_AA) | python |
A class used to represent a barcode.
Attributes
----------
info : str
decoded barcode value
type : int
a type of barcode (e.g. EAN-13)
points : numpy.array
vertices of barcode rectangle
Methods
-------
Draw(image)
Draws barcode's rectangle and its value to the given image.
| 11 | 0.557471 | 114 | 26.21875 | 32 | class |
class Colour:
"""
The colour class - used to unify all representations of colour as needed
by third-party modules.
This class also switches the colour around to fit the theme of the code jam.
Parameters
----------
colour: int or str
The colour inputted (given by the text box Entry)
All examples are with the Colour initialised with Colour("15715755")
Attributes
----------
fake_colour: str
The colour in hex before reformatting
e.g. "efcdab"
r: str
The amount of red in hex format.
e.g. "ab"
g: str
The amount of green in hex format.
e.g. "cd"
b: str
The amount of blue in hex format.
e.g. "ef"
colour: str
The colour in hex after the format is switched.
e.g. "abcdef"
as_hex: str
The colour prefixed with #
This is the most common way to represent a colour, and the main one
used by TK/TCL.
e.g. "#abcdef"
as_int: int
The colour in an integer with the hex converted into denary.
e.g. 11259375
as_rgb: tuple[int]
The colour in an (r, g, b) tuple.
e.g. (171, 205, 239)
Methods
-------
from_rgb: classmethod
Creates class from an (r, g, b) tuple.
"""
def __init__(self, colour: typing.Union[str, int]):
try:
int(colour)
except ValueError:
raise TypeError
if int(colour) not in range(16_777_216):
raise ValueError
self.fake_colour = hex(int(colour))[2:]
self.fake_colour = "0" * (6 - len(self.fake_colour)) + self.fake_colour
self.b = self.fake_colour[0:2]
self.g = self.fake_colour[2:4]
self.r = self.fake_colour[4:6]
self.colour = self.r + self.g + self.b
self.as_hex = "#" + self.colour
self.as_int = int(self.colour, 16)
@property
def as_rgb(self):
return (int(self.r, 16), int(self.g, 16), int(self.b, 16))
@classmethod
def from_rgb(cls, colour: typing.Tuple[int, int, int]):
r, g, b = map(lambda x: hex(x)[2:], colour)
fake = b + g + r
fake_int = int(fake, 16)
return cls(fake_int) | python |
The colour class - used to unify all representations of colour as needed
by third-party modules.
This class also switches the colour around to fit the theme of the code jam.
Parameters
----------
colour: int or str
The colour inputted (given by the text box Entry)
All examples are with the Colour initialised with Colour("15715755")
Attributes
----------
fake_colour: str
The colour in hex before reformatting
e.g. "efcdab"
r: str
The amount of red in hex format.
e.g. "ab"
g: str
The amount of green in hex format.
e.g. "cd"
b: str
The amount of blue in hex format.
e.g. "ef"
colour: str
The colour in hex after the format is switched.
e.g. "abcdef"
as_hex: str
The colour prefixed with #
This is the most common way to represent a colour, and the main one
used by TK/TCL.
e.g. "#abcdef"
as_int: int
The colour in an integer with the hex converted into denary.
e.g. 11259375
as_rgb: tuple[int]
The colour in an (r, g, b) tuple.
e.g. (171, 205, 239)
Methods
-------
from_rgb: classmethod
Creates class from an (r, g, b) tuple.
| 14 | 0.559695 | 80 | 27.576923 | 78 | class |
class WhatsappSession:
"""Wrapper around the Whatsapp class to remember state and do background scraping"""
def __init__(self, n_chats=2):
self.started_time = time.time()
self.w = Whatsapp(screenshot_folder="/tmp")
self._last_qr: str = None
self.links = None
self.lock = threading.Lock()
self._thread: Thread = None
self.status: str = "NOTSTARTED"
self._progress: int = None
self._message: str = None
self.n_chats: int = n_chats
def get_qr(self) -> str:
"""Go to whatsapp web and get the QR code"""
self._last_qr = self.w.get_qr()
return self._last_qr
def get_qr_status(self) -> dict:
"""Check if the user logged in and/or if a new QR code is displayed"""
if self.w.is_qr_scanned():
return {"status": "READY"}
try:
qr = self.w.get_qr()
except TimeoutException:
# Check if the app was loading the ready screen and is ready now
if self.w.is_qr_scanned():
return {"status": "READY"}
raise
if qr == self._last_qr:
return {"status": "WAITING"}
else:
self._last_qr = qr
return {"status": "REFRESH", "qr": qr}
def do_scrape(self):
logging.info("Starting scraper")
with self.lock:
if self.links is not None:
raise ValueError("Scraping already in progress")
self.links = []
self.status = "STARTED"
self._progress = 0
try:
self._do_scrape()
except Exception as e:
logging.exception("Error in scraper thread")
with self.lock:
self.status = "ERROR"
self._message = str(e)
self._progress = 0
else:
logging.info("Done!")
with self.lock:
self.status = "DONE"
self._message = f"Done, found {len(self.links)} in total"
self._progress = 100
finally:
self.w.quit_browser()
self.w = None
def _do_scrape(self):
time.sleep(3)
for i, chat in enumerate(self.w.get_all_chats()):
if i >= self.n_chats:
break
msg = f"Scraping contact {i + 1}/{self.n_chats}: {chat.text} [{len(self.links)} links found so far]"
logging.info(msg)
with self.lock:
self._progress = round(i * 100 / self.n_chats)
self._message = msg
links = list(self.w.get_links_per_chat(chat))
with self.lock:
self.links += links
def get_progress(self):
with self.lock:
return dict(status=self.status, progress=self._progress, message=self._message)
def start_scraping(self):
self._thread = threading.Thread(target=self.do_scrape)
logging.info("Starting thread")
self._thread.start() | python | Wrapper around the Whatsapp class to remember state and do background scraping | 17 | 0.523762 | 112 | 34.833333 | 84 | class |
class TODModeSet:
"""
The TODModeSet combines three pieces of information:
- det_uid, a (n_det,) array.
- weights, an (n_det,n_modes) array.
- modes, an (n_modes,n_samp) array.
"""
def __init__(self, det_uid, shape=None, dtype=None):
self.det_uid = det_uid
if shape is not None:
if len(shape) != 2:
raise ValueError('Expected shape=(n_modes, n_samp)')
self.modes = np.zeros(shape, dtype)
self.weights = np.zeros((len(self.det_uid), self.modes.shape[0]))
@classmethod
def from_fits_file(cls, filename):
def extract_table(sdb, keyfmt, dtype=None):
count = 0
while True:
if (keyfmt % count) not in sdb.dtype.names:
break
count += 1
if dtype is None:
dtype = sdb[keyfmt % 0].dtype
output = np.zeros((count, len(sdb)), dtype)
for i in range(count):
output[i,:] = sdb[keyfmt%i]
return output
data1 = moby2.util.StructDB.from_fits_table(filename, index=1)
data2 = moby2.util.StructDB.from_fits_table(filename, index=2)
self = cls(det_uid=data1['det_uid'])
self.weights = extract_table(data1, 'weight%i').transpose()
self.modes = extract_table(data2, 'mode%i')
return self
def to_fits_file(self, filename=None):
prihdr = fits.Header()
n_modes, n_samp = self.modes.shape
prihdr['n_modes'] = n_modes
prihdu = fits.PrimaryHDU(header=prihdr)
tb0 = moby2.util.StructDB.from_data(
[('det_uid', self.det_uid)] + [
('weight%i'%i, self.weights[:,i]) for i in range(n_modes)]
).to_fits_table()
tb1 = moby2.util.StructDB.from_data(
[('mode%i'%i, self.modes[i]) for i in range(n_modes)]
).to_fits_table()
hdulist = fits.HDUList([prihdu, tb0, tb1])
if filename is not None:
hdulist.writeto(filename, clobber=True)
return hdulist
@classmethod
def from_hdf(cls, target):
cls.check_class(target, 'tod_modeset', 1)
self = cls(det_uid=target['det_uid'])
self.weights = np.array(target['weights'])
self.modes = np.array(target['modes'])
return self
def to_hdf(self, target):
kw = {'compression': 'gzip'}
target.create_dataset('det_uid', data=self.det_uid.astype('uint32'), **kw)
target.create_dataset('weights', data=self.weights.astype('float32'), **kw)
target.create_dataset('modes', data=self.modes.astype('float32'), **kw)
cls.set_class(target, 'tod_modeset', 1)
def get_tod(self, dets=None, dtype=None, mode_idx=None):
"""
Return weights dot modes for the desired dets.
"""
if dets is None:
dets = list(range(0, self.weights.shape[0]))
if mode_idx is None:
mode_idx = list(range(0, len(self.modes)))
if np.asarray(dets).ndim == 0:
return np.dot(self.weights[dets,mode_idx], self.modes[mode_idx])
output = np.empty((len(dets), len(self.modes[0])), dtype=dtype)
for j,i in enumerate(dets):
output[j,:] = np.dot(self.weights[i,mode_idx], self.modes[mode_idx])
return output
def remove_modes(self, target, dets=None):
if dets is None:
dets = range(0, self.weights.shape[0])
amps = np.array(np.transpose(self.weights), order='C')
if self.modes.dtype == np.float64:
moby2.libactpol.remove_modes64(
target, np.array(dets).astype('int32'), self.modes, amps)
elif self.modes.dtype == np.float32:
moby2.libactpol.remove_modes(
target, np.array(dets).astype('int32'), self.modes, amps)
else:
raise ValueError('Fast mode removal only supported for '
'self.modes.dtype float32 and float64.') | python |
The TODModeSet combines three pieces of information:
- det_uid, a (n_det,) array.
- weights, an (n_det,n_modes) array.
- modes, an (n_modes,n_samp) array.
| 16 | 0.561311 | 83 | 40.635417 | 96 | class |
class AnnotatedSpan:
"""
An HTML-like annotation applied to a span of offsets.
The label is the primary label to be applied to the region.
Additionally, key-value metadata (attributes) can be applied.
When rendered as HTML, the primary label will become the tag and the metadata will
becomes attributes.
"""
label: str = attrib(validator=instance_of(str))
span: Span = attrib(validator=instance_of(Span))
attributes: Mapping[str, str] = attrib(
default=immutabledict(), converter=immutabledict
)
@staticmethod
def create_div_of_class(span: Span, clazz: str) -> "AnnotatedSpan":
return AnnotatedSpan(DIV, span, {"class": clazz})
@staticmethod
def create_span_of_class(span: Span, clazz: str) -> "AnnotatedSpan":
return AnnotatedSpan(SPAN, span, {"class": clazz}) | python |
An HTML-like annotation applied to a span of offsets.
The label is the primary label to be applied to the region.
Additionally, key-value metadata (attributes) can be applied.
When rendered as HTML, the primary label will become the tag and the metadata will
becomes attributes.
| 12 | 0.682353 | 86 | 34.458333 | 24 | class |
class ProgressBar:
"""Implement a console progress bar into a processing loop.
Args:
total_values (int, optional): Total number of iterations.
Defaults to 25.
bar_len (int, optional): Complete length of the progress bar, in chars.
Defaults to 25
symbol (str, optional): The symbol which is used to track progress.
Defaults to ``'.'``.
color (str, optional): Colour of the progress bar; where only the first
letter of the colour is required.
Options are: red, green, yellow, blue, magenta, cyan, white.
Defaults to 'w' (white).
:Design:
This is a simple console progress bar which should be called
**inside** a processing loop.
On instantiation, you can pass in the bar colour, length and
symbol parameters if you want to configure the appearance a
little bit.
:Colour Options:
red, green, yellow, blue, magenta, cyan, white
:Example:
You might implement the progress bar in a loop like this::
>>> import time
>>> from utils4.progressbar import ProgressBar
>>> pb = ProgressBar(total_values=25,
bar_len=25,
symbol='#',
color='red')
>>> for i range(26):
>>> # < some processing >
>>> pb.update_progress(current=i)
>>> # Optional pause to see updates.
>>> time.sleep(.1)
Processing 25 of 25 [ ......................... ] 100% Complete
"""
def __init__(self, total_values: int=25, bar_len: int=25, symbol: str='.', color: str='w'):
"""Progress bar class initialiser."""
self._total = total_values
self._bar_len = bar_len
self._symbol = symbol
self._color = color
self._len = len(str(self._total))
self._rst = '\x1b[0m'
self._clr = self._getcolor()
def update_progress(self, current: int): # pragma: nocover
"""Incrementally update the progress bar.
Args:
current (int): Index value for the current iteration.
This value is compared against the initialised ``total_values``
parameter to determine the current position in the overall
progress.
:Example:
Refer to the :class:`~ProgressBar` class docstring.
"""
# Calculate percent complete.
percent = float(current) / self._total
# Number of ticks.
ticks = self._symbol * int(round(percent * self._bar_len))
# Number of space placeholders.
spaces = ' ' * (self._bar_len - len(ticks))
msg = (f'{self._clr}'
f'\rProcessing {str(current).zfill(self._len)} of {self._total} [ {ticks+spaces} ] '
f'{percent*100:.0f}% Complete{self._rst}')
sys.stdout.write(msg)
sys.stdout.flush()
def _getcolor(self) -> str:
"""Create ANSI colour escape sequence to user's colour.
Returns:
str: ANSI escape sequence string for the user's colour.
"""
clrs = {'r': 31, 'g': 32, 'y': 33, 'b': 34, 'm': 35, 'c': 36, 'w': 37}
seq = f'\033[{clrs.get(self._color[0])};40m'
return seq | python | Implement a console progress bar into a processing loop.
Args:
total_values (int, optional): Total number of iterations.
Defaults to 25.
bar_len (int, optional): Complete length of the progress bar, in chars.
Defaults to 25
symbol (str, optional): The symbol which is used to track progress.
Defaults to ``'.'``.
color (str, optional): Colour of the progress bar; where only the first
letter of the colour is required.
Options are: red, green, yellow, blue, magenta, cyan, white.
Defaults to 'w' (white).
:Design:
This is a simple console progress bar which should be called
**inside** a processing loop.
On instantiation, you can pass in the bar colour, length and
symbol parameters if you want to configure the appearance a
little bit.
:Colour Options:
red, green, yellow, blue, magenta, cyan, white
:Example:
You might implement the progress bar in a loop like this::
>>> import time
>>> from utils4.progressbar import ProgressBar
>>> pb = ProgressBar(total_values=25,
bar_len=25,
symbol='#',
color='red')
>>> for i range(26):
>>> # < some processing >
>>> pb.update_progress(current=i)
>>> # Optional pause to see updates.
>>> time.sleep(.1)
Processing 25 of 25 [ ......................... ] 100% Complete
| 15 | 0.541047 | 99 | 35.16129 | 93 | class |
class CommandStack:
"""
Stack of command tokens that can be navigated forward and backward with undo/redo
"""
stack = list()
nextIndex = 0
maxIndex = 0
@staticmethod
def setTaskTree(taskTree):
"""
Set the database on which commands will act
"""
CommandStack.taskTree = taskTree
@staticmethod
def push(token, inredo):
"""
Add a new command token to the top of the stack
"""
CommandStack.nextIndex += 1
if inredo == False:
CommandStack.stack.insert(CommandStack.nextIndex - 1, token)
CommandStack.maxIndex = CommandStack.nextIndex
@staticmethod
def pop():
"""
Remove a command token from the top of the stack and return it
"""
token = CommandStack.stack[CommandStack.nextIndex - 1]
CommandStack.nextIndex -= 1
return token
@staticmethod
def undo():
"""
Roll back the previous command if possible. Return 'True' if possible.
"""
if CommandStack.nextIndex == 0:
return False
else:
CommandStack.pop().undo()
return True
@staticmethod
def redo():
"""
Go forward from a previously undone command if possible. Return 'True' if possible.
"""
if CommandStack.nextIndex == CommandStack.maxIndex:
return False
else:
CommandStack.stack[CommandStack.nextIndex].execute(True)
return True | python |
Stack of command tokens that can be navigated forward and backward with undo/redo
| 14 | 0.574823 | 91 | 23.730159 | 63 | class |
class TodoCommand:
"""
Class for 'todo' commands in todoshell
"""
def __init__(self, task):
self.task = task
def execute(self, inredo=False):
"""
Execute this command
"""
self.label = CommandStack.taskTree.insertTask(self.task)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.deleteTask(self.label) | python |
Class for 'todo' commands in todoshell
| 10 | 0.559211 | 64 | 18.869565 | 23 | class |
class TodosubCommand:
"""
Class for 'todosub' commands in todoshell
"""
def __init__(self, task, parentLabel):
self.task = task
self.parentLabel = parentLabel
def execute(self, inredo=False):
"""
Execute this command
"""
self.label = CommandStack.taskTree.insertTask(self.task, self.parentLabel)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.deleteTask(self.label) | python |
Class for 'todosub' commands in todoshell
| 10 | 0.588346 | 82 | 21.208333 | 24 | class |
class DoneCommand:
"""
Class for 'done' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
CommandStack.taskTree.setDone(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.setUndone(self.label) | python |
Class for 'done' commands in todoshell
| 9 | 0.555305 | 51 | 18.304348 | 23 | class |
class RemoveCommand:
"""
Class for 'remove' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
self.trace = CommandStack.taskTree.deleteTask(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.insertTrace(self.trace) | python |
Class for 'remove' commands in todoshell
| 10 | 0.567742 | 65 | 19.26087 | 23 | class |
class MoveUpCommand:
"""
Class for 'move up' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
self.newLabel = CommandStack.taskTree.moveTaskUp(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.moveTaskDown(self.newLabel) | python |
Class for 'move up' commands in todoshell
| 10 | 0.572939 | 68 | 19.608696 | 23 | class |
class MoveDownCommand:
"""
Class for 'move down' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
self.newLabel = CommandStack.taskTree.moveTaskDown(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.moveTaskUp(self.newLabel) | python |
Class for 'move down' commands in todoshell
| 10 | 0.57652 | 70 | 19.782609 | 23 | class |
class MoveTopCommand:
"""
Class for 'move top' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
(self.newLabel, self.oldPosition) = CommandStack.taskTree.moveTask(self.label, 1)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.moveTask(self.newLabel, self.oldPosition) | python |
Class for 'move top' commands in todoshell
| 10 | 0.584314 | 89 | 21.217391 | 23 | class |
class MoveBottomCommand:
"""
Class for 'move bottom' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
# self.newLabel = CommandStack.taskTree.moveTaskBottom(self.label)
# CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
""" | python |
Class for 'move bottom' commands in todoshell
| 8 | 0.555814 | 74 | 19.52381 | 21 | class |
class Resolver:
"""Resolves system delta, validates system configuration."""
_LOG = logging.getLogger(__name__)
def __init__(self, *, resolvers_map: Dict[str, component.Resolver]):
self._resolvers_map = resolvers_map
self._validators = [
validate.NameConventionValidator(),
validate.NameUniquenessValidator()]
def load_checked_delta(self, target: model.Spec) -> model.Delta:
target_descriptions = self._get_descriptions(target)
assert len(target_descriptions) == len(target.specs)
self._validate_target(target_descriptions)
current = self.load_current()
delta = self._build_delta(current, target)
self._check_dependencies(
current=current, target_descriptions=target_descriptions)
self._order_delta(delta)
return delta
def load_current(self) -> model.Spec:
spec = model.Spec(specs=[])
for resolver in self._resolvers_map.values():
for name in resolver.system_list():
spec.specs.append(resolver.system_get(name))
return spec
def _get_descriptions(self, target: model.Spec) -> List[model.Description]:
descriptions = list()
for spec in target.specs:
try:
assert spec.resource_type in self._resolvers_map, \
f"Resource type [{spec.resource_type}] does not have a corresponding registered resolver"
description = self._resolvers_map[spec.resource_type].describe(spec)
descriptions.append(description)
except Exception as e:
raise ValueError(f"Could not describe resource [{spec.full_name()}]: {str(e)}")
return descriptions
def _validate_target(self, target_descriptions: List[model.Description]) -> None:
for validator in self._validators:
validator.validate_target(descriptions=target_descriptions)
self._check_schema(target_descriptions)
def _check_schema(self, descriptions: List[model.Description]) -> None:
schemas = self._load_schemas(descriptions)
for description in descriptions:
if description.spec.schema_name:
expected = schemas.get(description.spec.schema_name)
if not expected:
raise ValueError(f"Resource [{description.spec.full_name}] "
f"requires schema [{description.spec.schema_name}] which is not defined.")
# TODO: Should compare different order of elements.
if description.schema != expected:
raise ValueError(f"Resource [{description.spec.full_name}] schema mismatch. "
f"Expected [{expected}], actual [{description.schema}].")
def _load_schemas(self, descriptions: List[model.Description]) -> Dict[str, model.SchemaParams]:
schemas: Dict[str, model.SchemaParams] = {}
for desc in descriptions:
if desc.spec.resource_type == model.RESOURCE_SCHEMA:
assert desc.spec.name not in schemas, f"Duplicated schema name [{desc.spec.name}]"
schemas[desc.spec.name] = desc.schema
return schemas
def _check_dependencies(self, *,
current: model.Spec,
target_descriptions: List[model.Description]) -> None:
# TODO Consider resources added or removed with delta
# TODO Stream and Table checks for UDFs
for desc in target_descriptions:
for dep in desc.depends:
found = False
for curr in current.specs:
if curr.resource_type == dep.resource_type \
and curr.name.lower() == dep.name.lower():
found = True
break
if not found:
raise ValueError(f"Resource {desc.spec.resource_type.capitalize()} [{desc.spec.name}] "
f"depends on {dep.resource_type.capitalize()} [{dep.name}]"
f"which was not found in the system")
def _order_delta(self, delta: model.Delta) -> None:
orders = {
model.RESOURCE_TOPIC: 1,
model.RESOURCE_SCHEMA: 2,
model.RESOURCE_SOURCE: 3,
model.RESOURCE_TABLE: 4,
model.RESOURCE_STREAM: 5,
model.RESOURCE_SINK: 6}
tuples = list()
for item in delta.items:
pos = orders.get(item.resource_type)
assert pos, f"Order position not defined for {item.resource_type}"
tuples.append((pos, item))
tuples = sorted(tuples, key=lambda x: x[0])
delta.items = [item[1] for item in tuples]
def _build_delta(self, current: model.Spec, target: model.Spec) -> model.Delta:
# System can have multiple items with the same name but different types.
current_map: Dict[str, List[model.SpecItem]] = {}
for spec in current.specs:
if spec.name.lower() in current_map:
current_map[spec.name.lower()].append(spec)
else:
current_map[spec.name.lower()] = [spec]
delta = model.Delta(items=[])
for target_spec in target.specs:
found = False
if target_spec.name.lower() in current_map:
for current_item in current_map.get(target_spec.name.lower()):
if current_item.resource_type == target_spec.resource_type:
found = True
resolver = self._resolvers_map[target_spec.resource_type]
if not resolver.equals(current_item, target_spec):
self._LOG.info(f"{target_spec.resource_type} [{target_spec.name}] changes")
delta.items.append(model.DeltaItem(
deleted=False,
resource_type=target_spec.resource_type,
current=current_item,
target=target_spec))
else:
self._LOG.info(f"{target_spec.resource_type} [{target_spec.name}] remains the same")
break
if not found:
self._LOG.info(f"{target_spec.resource_type} [{target_spec.name}] is new")
delta.items.append(model.DeltaItem(
deleted=False,
resource_type=target_spec.resource_type,
current=None,
target=target_spec))
return delta | python | Resolves system delta, validates system configuration. | 22 | 0.562016 | 112 | 47.266187 | 139 | class |
class Config:
'''
General configuration parent class
'''
NEWS_API_BASE_URL ='https://newsapi.org/v2/top-headlines?country={}&apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3'
NEWS_API_KEY = 'dbfa40f35ae24c188d04adfd4ebbd2a3'
NEWS_API_SEARCH_URL = 'https://newsapi.org/v2/everything?q={}&apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3'
NEWS_API_SOURCE_URL = 'https://newsapi.org/v2/sources?apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3'
TOP_HEADLINES_URL = 'https://newsapi.org/v2/top-headlines?sources={}&sortBy=latest&apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3' | python |
General configuration parent class
| 6 | 0.760984 | 127 | 62.333333 | 9 | class |
class ScrapedRootCertificateRecord:
"""A root certificate subject name and fingerprint scraped from a list of root records (Apple's, MSFT, etc.).
It needs to be validated and sanitized by the RootRecordsValidator before we can do anything with it.
"""
def __init__(
self, subject_name: str, fingerprint: bytes, fingerprint_hash_algorithm: Union[hashes.SHA1, hashes.SHA256]
) -> None:
self.subject_name = subject_name
self.fingerprint = fingerprint
self.fingerprint_hash_algorithm = fingerprint_hash_algorithm | python | A root certificate subject name and fingerprint scraped from a list of root records (Apple's, MSFT, etc.).
It needs to be validated and sanitized by the RootRecordsValidator before we can do anything with it.
| 11 | 0.711744 | 114 | 45.916667 | 12 | class |
class TestMain:
"""Unit tests for main() function."""
def test_instantiate_worker(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker.call_args
assert args == ("download_fvcom_results",)
assert list(kwargs.keys()) == ["description"]
def test_init_cli(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
m_worker().init_cli.assert_called_once_with()
def test_add_host_name_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_argument.call_args_list[0]
assert args == ("host_name",)
assert "help" in kwargs
def test_add_model_config_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_argument.call_args_list[1]
assert args == ("model_config",)
assert kwargs["choices"] == {"r12", "x2"}
assert "help" in kwargs
def test_add_run_type_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_argument.call_args_list[2]
assert args == ("run_type",)
expected = {"nowcast", "forecast"}
assert kwargs["choices"] == expected
assert "help" in kwargs
def test_add_run_date_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_date_option.call_args_list[0]
assert args == ("--run-date",)
assert kwargs["default"] == arrow.now().floor("day")
assert "help" in kwargs
def test_run_worker(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().run.call_args
assert args == (
download_fvcom_results.download_fvcom_results,
download_fvcom_results.success,
download_fvcom_results.failure,
) | python | Unit tests for main() function. | 13 | 0.585594 | 71 | 37.196429 | 56 | class |
class TestConfig:
"""Unit tests for production YAML config file elements related to worker."""
def test_message_registry(self, prod_config):
assert "download_fvcom_results" in prod_config["message registry"]["workers"]
msg_registry = prod_config["message registry"]["workers"][
"download_fvcom_results"
]
assert msg_registry["checklist key"] == "VHFR FVCOM results files"
@pytest.mark.parametrize(
"msg",
(
"success x2 nowcast",
"failure x2 nowcast",
"success x2 forecast",
"failure x2 forecast",
"success r12 nowcast",
"failure r12 nowcast",
"crash",
),
)
def test_message_types(self, msg, prod_config):
msg_registry = prod_config["message registry"]["workers"][
"download_fvcom_results"
]
assert msg in msg_registry
def test_run_types_section(self, prod_config):
run_types = prod_config["vhfr fvcom runs"]["run types"]
assert run_types["nowcast x2"] == {
"nemo boundary results": "/nemoShare/MEOPAR/SalishSea/nowcast/",
"time step": 0.5,
"results": "/nemoShare/MEOPAR/SalishSea/fvcom-nowcast-x2/",
}
assert run_types["forecast x2"] == {
"nemo boundary results": "/nemoShare/MEOPAR/SalishSea/forecast/",
"time step": 0.5,
"results": "/nemoShare/MEOPAR/SalishSea/fvcom-forecast-x2/",
}
assert run_types["nowcast r12"] == {
"nemo boundary results": "/nemoShare/MEOPAR/SalishSea/nowcast/",
"time step": 0.2,
"results": "/nemoShare/MEOPAR/SalishSea/fvcom-nowcast-r12/",
}
def test_results_archive_section(self, prod_config):
results_archive = prod_config["vhfr fvcom runs"]["results archive"]
assert results_archive["nowcast x2"] == "/opp/fvcom/nowcast-x2/"
assert results_archive["forecast x2"] == "/opp/fvcom/forecast-x2/"
assert results_archive["nowcast r12"] == "/opp/fvcom/nowcast-r12/" | python | Unit tests for production YAML config file elements related to worker. | 12 | 0.585551 | 85 | 40.27451 | 51 | class |
class TestSuccess:
"""Unit tests for success() function."""
def test_success(self, m_logger, model_config, run_type):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
msg_type = download_fvcom_results.success(parsed_args)
assert m_logger.info.called
assert msg_type == f"success {model_config} {run_type}" | python | Unit tests for success() function. | 13 | 0.600406 | 63 | 37 | 13 | class |
class TestFailure:
"""Unit tests for failure() function."""
def test_failure(self, m_logger, model_config, run_type):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
msg_type = download_fvcom_results.failure(parsed_args)
assert m_logger.critical.called
assert msg_type == f"failure {model_config} {run_type}" | python | Unit tests for failure() function. | 13 | 0.603622 | 63 | 37.307692 | 13 | class |
class TestDownloadFVCOMResults:
"""Unit tests for download_fvcom_results() function."""
def test_checklist(
self, m_fix_perms, m_run_sub, m_logger, model_config, run_type, config
):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
checklist = download_fvcom_results.download_fvcom_results(parsed_args, config)
expected = {
run_type: {
"host": "arbutus.cloud",
"model config": model_config,
"run date": "2018-02-16",
"files": [],
}
}
assert checklist == expected
def test_scp_subprocess(
self, m_fix_perms, m_run_sub, m_logger, model_config, run_type, config
):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
download_fvcom_results.download_fvcom_results(parsed_args, config)
m_run_sub.assert_called_once_with(
shlex.split(
f"scp -Cpr "
f"arbutus.cloud:/nemoShare/MEOPAR/SalishSea/fvcom-{run_type}-{model_config}/16feb18 "
f"/opp/fvcom/{run_type}-{model_config}"
),
m_logger.debug,
m_logger.error,
) | python | Unit tests for download_fvcom_results() function. | 13 | 0.533602 | 101 | 34.452381 | 42 | class |
class MultitaskGatherTarget:
"""Gather the targets for multitask heads.
Args:
pipeline_list (list[list]): List of pipelines for all heads.
pipeline_indices (list[int]): Pipeline index of each head.
"""
def __init__(self,
pipeline_list,
pipeline_indices=None,
keys=('target', 'target_weight')):
self.keys = keys
self.pipelines = []
for pipeline in pipeline_list:
self.pipelines.append(Compose(pipeline))
if pipeline_indices is None:
self.pipeline_indices = list(range(len(pipeline_list)))
else:
self.pipeline_indices = pipeline_indices
def __call__(self, results):
# generate target and target weights using all pipelines
pipeline_outputs = []
for pipeline in self.pipelines:
pipeline_output = pipeline(results)
pipeline_outputs.append(pipeline_output.copy())
for key in self.keys:
result_key = []
for ind in self.pipeline_indices:
result_key.append(pipeline_outputs[ind].get(key, None))
results[key] = result_key
return results | python | Gather the targets for multitask heads.
Args:
pipeline_list (list[list]): List of pipelines for all heads.
pipeline_indices (list[int]): Pipeline index of each head.
| 15 | 0.584298 | 71 | 34.617647 | 34 | class |
class SetupTaskArguments:
"""
Organisation setup arguments.
"""
directions: Optional[Configuration] = None
"""
Non-interactive directions. Intended only for testing.
"""
configuration_loader: Optional[ConfigurationLoader] = None
log_level: str = "CRITICAL"
regions: Optional[List[str]] = None | python |
Organisation setup arguments.
| 12 | 0.673653 | 62 | 24.769231 | 13 | class |
class Quote:
"""Quote class to define quotes object"""
def __init__(self,author,id,quote,permalink):
self.author = author
self.id = id
self.quote = quote
self.permalink = permalink | python | Quote class to define quotes object | 8 | 0.661692 | 47 | 24.25 | 8 | class |
class SimulationResult:
"""Results from oogeso simulation
The results are stored in a set of multi-index Series, with
index names indicating what they are:
device - device identifier
node - node identifier
edge - edge identifier
carrier - network type ("el", "gas", "oil", "water", "hydrogen", "heat")
terminal - input/output ("in" or "out"),
time (integer timestep)
"""
# Input/output flow per device and network type:
device_flow: Optional[pd.Series] = None
# Device startup preparation status (boolean):
device_is_prep: Optional[pd.Series] = None
# Device on/off status (boolean):
device_is_on: Optional[pd.Series] = None
# Device starting status (boolean):
device_starting: Optional[pd.Series] = None
# Device stopping status (boolean):
device_stopping: Optional[pd.Series] = None
# Energy storage filling level (Sm3 or MJ)
device_storage_energy: Optional[pd.Series] = None
# Max available "flow" (power/fluid) from storage (Sm3/s or MW):
device_storage_pmax: Optional[pd.Series] = None
# Device assosiated penalty rate (PENALTY_UNIT/s):
penalty: Optional[pd.Series] = None
# Flow rate (Sm3/s or MW):
edge_flow: Optional[pd.Series] = None
# Loss rate (MW) - only relevant for energy flow (el and heat):
edge_loss: Optional[pd.Series] = None
# Voltage angle at node - only relevant for electricity floc computed via dc-pf:
el_voltage_angle: Optional[pd.Series] = None
# Pressure at node (MPa):
terminal_pressure: Optional[pd.Series] = None
# Direct flow between in and out terminal of node - relevant if there is no device inbetween:
terminal_flow: Optional[pd.Series] = None
# Emission rate (sum of all devices) (kgCO2/s):
co2_rate: Optional[pd.Series] = None
# Emission rate per device (kgCO2/s):
co2_rate_per_dev: Optional[pd.Series] = None
# Revenue rate for exported oil/gas (CURRENCY/s):
export_revenue: Optional[pd.Series] = None
# CO2 intensity of exported oil/gas (kgCO2/Sm3oe):
co2_intensity: Optional[pd.Series] = None
# Available online electrical reserve capacity (MW):
el_reserve: Optional[pd.Series] = None
# Available online electrical backup per device (MW):
el_backup: Optional[pd.Series] = None
# Value of duals (associated with constraints)
duals: Optional[pd.Series] = None
# Time-series profiles used in simulation (copied from the input)
profiles_forecast: Optional[pd.DataFrame] = None
profiles_nowcast: Optional[pd.DataFrame] = None
def append_results(self, sim_res):
exclude_list = ["df_profiles_forecast", "df_profiles_forecast"]
for my_field in fields(self):
field_name = my_field.name
if field_name not in exclude_list:
my_df = getattr(self, field_name)
other_df = getattr(sim_res, field_name)
if other_df is not None:
setattr(self, field_name, pd.concat([my_df, other_df]).sort_index()) | python | Results from oogeso simulation
The results are stored in a set of multi-index Series, with
index names indicating what they are:
device - device identifier
node - node identifier
edge - edge identifier
carrier - network type ("el", "gas", "oil", "water", "hydrogen", "heat")
terminal - input/output ("in" or "out"),
time (integer timestep)
| 19 | 0.665573 | 97 | 44.492537 | 67 | class |
class Helper: # pragma: no cover
"""
Helper(): helper functions for custom decks
"""
@staticmethod
def custom_suits_values_1():
"""
custom_suits_values_1():
sample custom desk to be used in tests
"""
# returns (suits_ranking, values_ranking)
return (
[
'Diamonds',
'Hearts',
],
[
'10',
'Jack',
'Queen',
'King',
]
)
@staticmethod
def custom_suits_values_2():
"""
custom_suits_values_2():
sample custom deck to be used in tests
"""
# returns (suits_ranking, values_ranking)
return (
[
'Sith',
'Jedi',
],
[
'Youngling',
'Padawan',
'Knight',
'Guardian',
'Master',
]
)
@staticmethod
def create_deck_manager(*args, **kwargs):
"""
create_deck_manager(): must be implemented by classes
that inherit this class
"""
raise NotImplementedError
@staticmethod
def normal_deck_suits():
"""
normal_deck_suits(): returns list of normal deck suits
"""
return [
'Spades',
'Diamonds',
'Hearts',
'Clubs',
]
@staticmethod
def normal_deck_values():
"""
normal_deck_values():
returns list of normal deck values
"""
return [
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'10',
'Jack',
'Queen',
'King',
'Ace',
]
@staticmethod
def generate_player_names(count):
"""
generate_player_names(): generates list of player names
"""
return [
f"Player{x}" for x in range(1, count+1)
] | python |
Helper(): helper functions for custom decks
| 12 | 0.389751 | 63 | 21.4 | 95 | class |
class SequentialFitnessCaller:
"""
Fitness caller used for sequential implementation of NMMSO algorithm.
"""
def __init__(self):
self.problem = None
self.data = []
def set_problem(self, problem):
"""
Sets the problem object to use to calculate the fitness.
Arguments
---------
problem
Problem object implementing the fitness method.
"""
self.problem = problem
def add(self, location, userdata):
"""
Add a location to be evaluated.
Arguments
---------
location : numpy array
Location to be evaluated.
userdata
Userdata to be returned with the evaluation result.
"""
self.data.append((location, userdata))
def evaluate(self):
"""
Evaluates all the locations.
Returns
-------
list of (location, value, userdate) tuples
Tuples containing the location, value and corresponding user data
"""
result = []
for location, userdata in self.data:
value = self.problem.fitness(location)
result.append((location, value, userdata))
self.data = []
return result
def finish(self):
"""
Terminates the fitness caller.
"""
pass | python |
Fitness caller used for sequential implementation of NMMSO algorithm.
| 12 | 0.54267 | 77 | 22.655172 | 58 | class |
End of preview.