repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
qubole/qds-sdk-py | qds_sdk/cluster.py | Cluster.restore_point | def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True):
"""
Restoring cluster from a given hbase snapshot id
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {}
parameters['s3_location'] = s3_location
parameters['backup_id'] = backup_id
parameters['table_names'] = table_names
parameters['overwrite'] = overwrite
parameters['automatic'] = automatic
return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters) | python | def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True):
"""
Restoring cluster from a given hbase snapshot id
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {}
parameters['s3_location'] = s3_location
parameters['backup_id'] = backup_id
parameters['table_names'] = table_names
parameters['overwrite'] = overwrite
parameters['automatic'] = automatic
return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters) | [
"def",
"restore_point",
"(",
"cls",
",",
"cluster_id_label",
",",
"s3_location",
",",
"backup_id",
",",
"table_names",
",",
"overwrite",
"=",
"True",
",",
"automatic",
"=",
"True",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
"version",
"=",
"Cluster",
".",
"api_version",
")",
"parameters",
"=",
"{",
"}",
"parameters",
"[",
"'s3_location'",
"]",
"=",
"s3_location",
"parameters",
"[",
"'backup_id'",
"]",
"=",
"backup_id",
"parameters",
"[",
"'table_names'",
"]",
"=",
"table_names",
"parameters",
"[",
"'overwrite'",
"]",
"=",
"overwrite",
"parameters",
"[",
"'automatic'",
"]",
"=",
"automatic",
"return",
"conn",
".",
"post",
"(",
"cls",
".",
"element_path",
"(",
"cluster_id_label",
")",
"+",
"\"/restore_point\"",
",",
"data",
"=",
"parameters",
")"
] | Restoring cluster from a given hbase snapshot id | [
"Restoring",
"cluster",
"from",
"a",
"given",
"hbase",
"snapshot",
"id"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L682-L693 | train |
qubole/qds-sdk-py | qds_sdk/cluster.py | Cluster.update_snapshot_schedule | def update_snapshot_schedule(cls, cluster_id_label, s3_location=None, frequency_unit=None, frequency_num=None, status=None):
"""
Update for snapshot schedule
"""
conn = Qubole.agent(version=Cluster.api_version)
data = {}
if s3_location is not None:
data["s3_location"] = s3_location
if frequency_unit is not None:
data["frequency_unit"] = frequency_unit
if frequency_num is not None:
data["frequency_num"] = frequency_num
if status is not None:
data["status"] = status
return conn.put(cls.element_path(cluster_id_label) + "/snapshot_schedule", data) | python | def update_snapshot_schedule(cls, cluster_id_label, s3_location=None, frequency_unit=None, frequency_num=None, status=None):
"""
Update for snapshot schedule
"""
conn = Qubole.agent(version=Cluster.api_version)
data = {}
if s3_location is not None:
data["s3_location"] = s3_location
if frequency_unit is not None:
data["frequency_unit"] = frequency_unit
if frequency_num is not None:
data["frequency_num"] = frequency_num
if status is not None:
data["status"] = status
return conn.put(cls.element_path(cluster_id_label) + "/snapshot_schedule", data) | [
"def",
"update_snapshot_schedule",
"(",
"cls",
",",
"cluster_id_label",
",",
"s3_location",
"=",
"None",
",",
"frequency_unit",
"=",
"None",
",",
"frequency_num",
"=",
"None",
",",
"status",
"=",
"None",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
"version",
"=",
"Cluster",
".",
"api_version",
")",
"data",
"=",
"{",
"}",
"if",
"s3_location",
"is",
"not",
"None",
":",
"data",
"[",
"\"s3_location\"",
"]",
"=",
"s3_location",
"if",
"frequency_unit",
"is",
"not",
"None",
":",
"data",
"[",
"\"frequency_unit\"",
"]",
"=",
"frequency_unit",
"if",
"frequency_num",
"is",
"not",
"None",
":",
"data",
"[",
"\"frequency_num\"",
"]",
"=",
"frequency_num",
"if",
"status",
"is",
"not",
"None",
":",
"data",
"[",
"\"status\"",
"]",
"=",
"status",
"return",
"conn",
".",
"put",
"(",
"cls",
".",
"element_path",
"(",
"cluster_id_label",
")",
"+",
"\"/snapshot_schedule\"",
",",
"data",
")"
] | Update for snapshot schedule | [
"Update",
"for",
"snapshot",
"schedule"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L704-L719 | train |
qubole/qds-sdk-py | qds_sdk/cluster.py | ClusterInfo.set_spot_instance_settings | def set_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None):
"""
Purchase options for spot instances. Valid only when
`slave_request_type` is hybrid or spot.
`maximum_bid_price_percentage`: Maximum value to bid for spot
instances, expressed as a percentage of the base price for the
slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
"""
self.hadoop_settings['spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'maximum_spot_instance_percentage': maximum_spot_instance_percentage} | python | def set_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None):
"""
Purchase options for spot instances. Valid only when
`slave_request_type` is hybrid or spot.
`maximum_bid_price_percentage`: Maximum value to bid for spot
instances, expressed as a percentage of the base price for the
slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
"""
self.hadoop_settings['spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'maximum_spot_instance_percentage': maximum_spot_instance_percentage} | [
"def",
"set_spot_instance_settings",
"(",
"self",
",",
"maximum_bid_price_percentage",
"=",
"None",
",",
"timeout_for_request",
"=",
"None",
",",
"maximum_spot_instance_percentage",
"=",
"None",
")",
":",
"self",
".",
"hadoop_settings",
"[",
"'spot_instance_settings'",
"]",
"=",
"{",
"'maximum_bid_price_percentage'",
":",
"maximum_bid_price_percentage",
",",
"'timeout_for_request'",
":",
"timeout_for_request",
",",
"'maximum_spot_instance_percentage'",
":",
"maximum_spot_instance_percentage",
"}"
] | Purchase options for spot instances. Valid only when
`slave_request_type` is hybrid or spot.
`maximum_bid_price_percentage`: Maximum value to bid for spot
instances, expressed as a percentage of the base price for the
slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid". | [
"Purchase",
"options",
"for",
"spot",
"instances",
".",
"Valid",
"only",
"when",
"slave_request_type",
"is",
"hybrid",
"or",
"spot",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L883-L904 | train |
qubole/qds-sdk-py | qds_sdk/cluster.py | ClusterInfo.set_stable_spot_instance_settings | def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
"""
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
"""
self.hadoop_settings['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback} | python | def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
"""
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
"""
self.hadoop_settings['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback} | [
"def",
"set_stable_spot_instance_settings",
"(",
"self",
",",
"maximum_bid_price_percentage",
"=",
"None",
",",
"timeout_for_request",
"=",
"None",
",",
"allow_fallback",
"=",
"True",
")",
":",
"self",
".",
"hadoop_settings",
"[",
"'stable_spot_instance_settings'",
"]",
"=",
"{",
"'maximum_bid_price_percentage'",
":",
"maximum_bid_price_percentage",
",",
"'timeout_for_request'",
":",
"timeout_for_request",
",",
"'allow_fallback'",
":",
"allow_fallback",
"}"
] | Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available | [
"Purchase",
"options",
"for",
"stable",
"spot",
"instances",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L907-L926 | train |
qubole/qds-sdk-py | qds_sdk/cluster.py | ClusterInfoV13.minimal_payload | def minimal_payload(self):
"""
This method can be used to create the payload which is sent while
creating or updating a cluster.
"""
payload_dict = self.__dict__
payload_dict.pop("api_version", None)
return util._make_minimal(payload_dict) | python | def minimal_payload(self):
"""
This method can be used to create the payload which is sent while
creating or updating a cluster.
"""
payload_dict = self.__dict__
payload_dict.pop("api_version", None)
return util._make_minimal(payload_dict) | [
"def",
"minimal_payload",
"(",
"self",
")",
":",
"payload_dict",
"=",
"self",
".",
"__dict__",
"payload_dict",
".",
"pop",
"(",
"\"api_version\"",
",",
"None",
")",
"return",
"util",
".",
"_make_minimal",
"(",
"payload_dict",
")"
] | This method can be used to create the payload which is sent while
creating or updating a cluster. | [
"This",
"method",
"can",
"be",
"used",
"to",
"create",
"the",
"payload",
"which",
"is",
"sent",
"while",
"creating",
"or",
"updating",
"a",
"cluster",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L1307-L1314 | train |
qubole/qds-sdk-py | qds_sdk/connection.py | Connection._handle_error | def _handle_error(response):
"""Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned.
"""
code = response.status_code
if 200 <= code < 400:
return
if code == 400:
sys.stderr.write(response.text + "\n")
raise BadRequest(response)
elif code == 401:
sys.stderr.write(response.text + "\n")
raise UnauthorizedAccess(response)
elif code == 403:
sys.stderr.write(response.text + "\n")
raise ForbiddenAccess(response)
elif code == 404:
sys.stderr.write(response.text + "\n")
raise ResourceNotFound(response)
elif code == 405:
sys.stderr.write(response.text + "\n")
raise MethodNotAllowed(response)
elif code == 409:
sys.stderr.write(response.text + "\n")
raise ResourceConflict(response)
elif code == 422:
sys.stderr.write(response.text + "\n")
raise ResourceInvalid(response)
elif code in (449, 502, 503, 504):
sys.stderr.write(response.text + "\n")
raise RetryWithDelay(response)
elif 401 <= code < 500:
sys.stderr.write(response.text + "\n")
raise ClientError(response)
elif 500 <= code < 600:
sys.stderr.write(response.text + "\n")
raise ServerError(response)
else:
raise ConnectionError(response) | python | def _handle_error(response):
"""Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned.
"""
code = response.status_code
if 200 <= code < 400:
return
if code == 400:
sys.stderr.write(response.text + "\n")
raise BadRequest(response)
elif code == 401:
sys.stderr.write(response.text + "\n")
raise UnauthorizedAccess(response)
elif code == 403:
sys.stderr.write(response.text + "\n")
raise ForbiddenAccess(response)
elif code == 404:
sys.stderr.write(response.text + "\n")
raise ResourceNotFound(response)
elif code == 405:
sys.stderr.write(response.text + "\n")
raise MethodNotAllowed(response)
elif code == 409:
sys.stderr.write(response.text + "\n")
raise ResourceConflict(response)
elif code == 422:
sys.stderr.write(response.text + "\n")
raise ResourceInvalid(response)
elif code in (449, 502, 503, 504):
sys.stderr.write(response.text + "\n")
raise RetryWithDelay(response)
elif 401 <= code < 500:
sys.stderr.write(response.text + "\n")
raise ClientError(response)
elif 500 <= code < 600:
sys.stderr.write(response.text + "\n")
raise ServerError(response)
else:
raise ConnectionError(response) | [
"def",
"_handle_error",
"(",
"response",
")",
":",
"code",
"=",
"response",
".",
"status_code",
"if",
"200",
"<=",
"code",
"<",
"400",
":",
"return",
"if",
"code",
"==",
"400",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"BadRequest",
"(",
"response",
")",
"elif",
"code",
"==",
"401",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"UnauthorizedAccess",
"(",
"response",
")",
"elif",
"code",
"==",
"403",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ForbiddenAccess",
"(",
"response",
")",
"elif",
"code",
"==",
"404",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ResourceNotFound",
"(",
"response",
")",
"elif",
"code",
"==",
"405",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"MethodNotAllowed",
"(",
"response",
")",
"elif",
"code",
"==",
"409",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ResourceConflict",
"(",
"response",
")",
"elif",
"code",
"==",
"422",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ResourceInvalid",
"(",
"response",
")",
"elif",
"code",
"in",
"(",
"449",
",",
"502",
",",
"503",
",",
"504",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"RetryWithDelay",
"(",
"response",
")",
"elif",
"401",
"<=",
"code",
"<",
"500",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ClientError",
"(",
"response",
")",
"elif",
"500",
"<=",
"code",
"<",
"600",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ServerError",
"(",
"response",
")",
"else",
":",
"raise",
"ConnectionError",
"(",
"response",
")"
] | Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned. | [
"Raise",
"exceptions",
"in",
"response",
"to",
"any",
"http",
"errors"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/connection.py#L111-L165 | train |
qubole/qds-sdk-py | qds_sdk/template.py | Template.createTemplate | def createTemplate(data):
"""
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
"""
conn = Qubole.agent()
return conn.post(Template.rest_entity_path, data) | python | def createTemplate(data):
"""
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
"""
conn = Qubole.agent()
return conn.post(Template.rest_entity_path, data) | [
"def",
"createTemplate",
"(",
"data",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"post",
"(",
"Template",
".",
"rest_entity_path",
",",
"data",
")"
] | Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID. | [
"Create",
"a",
"new",
"template",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L162-L172 | train |
qubole/qds-sdk-py | qds_sdk/template.py | Template.editTemplate | def editTemplate(id, data):
"""
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
return conn.put(Template.element_path(id), data) | python | def editTemplate(id, data):
"""
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
return conn.put(Template.element_path(id), data) | [
"def",
"editTemplate",
"(",
"id",
",",
"data",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"put",
"(",
"Template",
".",
"element_path",
"(",
"id",
")",
",",
"data",
")"
] | Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template. | [
"Edit",
"an",
"existing",
"template",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L175-L186 | train |
qubole/qds-sdk-py | qds_sdk/template.py | Template.viewTemplate | def viewTemplate(id):
"""
View an existing Template details.
Args:
`id`: ID of the template to fetch
Returns:
Dictionary containing the details of the template.
"""
conn = Qubole.agent()
return conn.get(Template.element_path(id)) | python | def viewTemplate(id):
"""
View an existing Template details.
Args:
`id`: ID of the template to fetch
Returns:
Dictionary containing the details of the template.
"""
conn = Qubole.agent()
return conn.get(Template.element_path(id)) | [
"def",
"viewTemplate",
"(",
"id",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"get",
"(",
"Template",
".",
"element_path",
"(",
"id",
")",
")"
] | View an existing Template details.
Args:
`id`: ID of the template to fetch
Returns:
Dictionary containing the details of the template. | [
"View",
"an",
"existing",
"Template",
"details",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L204-L215 | train |
qubole/qds-sdk-py | qds_sdk/template.py | Template.submitTemplate | def submitTemplate(id, data={}):
"""
Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details.
"""
conn = Qubole.agent()
path = str(id) + "/run"
return conn.post(Template.element_path(path), data) | python | def submitTemplate(id, data={}):
"""
Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details.
"""
conn = Qubole.agent()
path = str(id) + "/run"
return conn.post(Template.element_path(path), data) | [
"def",
"submitTemplate",
"(",
"id",
",",
"data",
"=",
"{",
"}",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"path",
"=",
"str",
"(",
"id",
")",
"+",
"\"/run\"",
"return",
"conn",
".",
"post",
"(",
"Template",
".",
"element_path",
"(",
"path",
")",
",",
"data",
")"
] | Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details. | [
"Submit",
"an",
"existing",
"Template",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L218-L230 | train |
qubole/qds-sdk-py | qds_sdk/template.py | Template.runTemplate | def runTemplate(id, data={}):
"""
Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure)
"""
conn = Qubole.agent()
path = str(id) + "/run"
res = conn.post(Template.element_path(path), data)
cmdType = res['command_type']
cmdId = res['id']
cmdClass = eval(cmdType)
cmd = cmdClass.find(cmdId)
while not Command.is_done(cmd.status):
time.sleep(Qubole.poll_interval)
cmd = cmdClass.find(cmd.id)
return Template.getResult(cmdClass, cmd) | python | def runTemplate(id, data={}):
"""
Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure)
"""
conn = Qubole.agent()
path = str(id) + "/run"
res = conn.post(Template.element_path(path), data)
cmdType = res['command_type']
cmdId = res['id']
cmdClass = eval(cmdType)
cmd = cmdClass.find(cmdId)
while not Command.is_done(cmd.status):
time.sleep(Qubole.poll_interval)
cmd = cmdClass.find(cmd.id)
return Template.getResult(cmdClass, cmd) | [
"def",
"runTemplate",
"(",
"id",
",",
"data",
"=",
"{",
"}",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"path",
"=",
"str",
"(",
"id",
")",
"+",
"\"/run\"",
"res",
"=",
"conn",
".",
"post",
"(",
"Template",
".",
"element_path",
"(",
"path",
")",
",",
"data",
")",
"cmdType",
"=",
"res",
"[",
"'command_type'",
"]",
"cmdId",
"=",
"res",
"[",
"'id'",
"]",
"cmdClass",
"=",
"eval",
"(",
"cmdType",
")",
"cmd",
"=",
"cmdClass",
".",
"find",
"(",
"cmdId",
")",
"while",
"not",
"Command",
".",
"is_done",
"(",
"cmd",
".",
"status",
")",
":",
"time",
".",
"sleep",
"(",
"Qubole",
".",
"poll_interval",
")",
"cmd",
"=",
"cmdClass",
".",
"find",
"(",
"cmd",
".",
"id",
")",
"return",
"Template",
".",
"getResult",
"(",
"cmdClass",
",",
"cmd",
")"
] | Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure) | [
"Run",
"an",
"existing",
"Template",
"and",
"waits",
"for",
"the",
"Result",
".",
"Prints",
"result",
"to",
"stdout",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L233-L255 | train |
qubole/qds-sdk-py | qds_sdk/template.py | Template.listTemplates | def listTemplates(data={}):
"""
Fetch existing Templates details.
Args:
`data`: dictionary containing the value of page number and per-page value
Returns:
Dictionary containing paging_info and command_templates details
"""
conn = Qubole.agent()
url_path = Template.rest_entity_path
page_attr = []
if "page" in data and data["page"] is not None:
page_attr.append("page=%s" % data["page"])
if "per_page" in data and data["per_page"] is not None:
page_attr.append("per_page=%s" % data["per_page"])
if page_attr:
url_path = "%s?%s" % (url_path, "&".join(page_attr))
return conn.get(url_path) | python | def listTemplates(data={}):
"""
Fetch existing Templates details.
Args:
`data`: dictionary containing the value of page number and per-page value
Returns:
Dictionary containing paging_info and command_templates details
"""
conn = Qubole.agent()
url_path = Template.rest_entity_path
page_attr = []
if "page" in data and data["page"] is not None:
page_attr.append("page=%s" % data["page"])
if "per_page" in data and data["per_page"] is not None:
page_attr.append("per_page=%s" % data["per_page"])
if page_attr:
url_path = "%s?%s" % (url_path, "&".join(page_attr))
return conn.get(url_path) | [
"def",
"listTemplates",
"(",
"data",
"=",
"{",
"}",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"url_path",
"=",
"Template",
".",
"rest_entity_path",
"page_attr",
"=",
"[",
"]",
"if",
"\"page\"",
"in",
"data",
"and",
"data",
"[",
"\"page\"",
"]",
"is",
"not",
"None",
":",
"page_attr",
".",
"append",
"(",
"\"page=%s\"",
"%",
"data",
"[",
"\"page\"",
"]",
")",
"if",
"\"per_page\"",
"in",
"data",
"and",
"data",
"[",
"\"per_page\"",
"]",
"is",
"not",
"None",
":",
"page_attr",
".",
"append",
"(",
"\"per_page=%s\"",
"%",
"data",
"[",
"\"per_page\"",
"]",
")",
"if",
"page_attr",
":",
"url_path",
"=",
"\"%s?%s\"",
"%",
"(",
"url_path",
",",
"\"&\"",
".",
"join",
"(",
"page_attr",
")",
")",
"return",
"conn",
".",
"get",
"(",
"url_path",
")"
] | Fetch existing Templates details.
Args:
`data`: dictionary containing the value of page number and per-page value
Returns:
Dictionary containing paging_info and command_templates details | [
"Fetch",
"existing",
"Templates",
"details",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L269-L288 | train |
qubole/qds-sdk-py | qds_sdk/dbtaps.py | DbTapCmdLine.edit | def edit(args):
tap = DbTap.find(args.id)
""" Carefully setup a dict """
options = {}
if not args.name is None:
options["db_name"]=args.name
if args.host is not None:
options["db_host"]=args.host
if args.user is not None:
options["db_user"]=args.user
if args.password is not None:
options["db_passwd"] = args.password
if args.type is not None:
options["db_type"] = args.type
if args.location is not None:
options["db_location"] = args.location
if args.port is not None:
options["port"] = args.port
tap = tap.edit(**options)
return json.dumps(tap.attributes, sort_keys=True, indent=4) | python | def edit(args):
tap = DbTap.find(args.id)
""" Carefully setup a dict """
options = {}
if not args.name is None:
options["db_name"]=args.name
if args.host is not None:
options["db_host"]=args.host
if args.user is not None:
options["db_user"]=args.user
if args.password is not None:
options["db_passwd"] = args.password
if args.type is not None:
options["db_type"] = args.type
if args.location is not None:
options["db_location"] = args.location
if args.port is not None:
options["port"] = args.port
tap = tap.edit(**options)
return json.dumps(tap.attributes, sort_keys=True, indent=4) | [
"def",
"edit",
"(",
"args",
")",
":",
"tap",
"=",
"DbTap",
".",
"find",
"(",
"args",
".",
"id",
")",
"options",
"=",
"{",
"}",
"if",
"not",
"args",
".",
"name",
"is",
"None",
":",
"options",
"[",
"\"db_name\"",
"]",
"=",
"args",
".",
"name",
"if",
"args",
".",
"host",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_host\"",
"]",
"=",
"args",
".",
"host",
"if",
"args",
".",
"user",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_user\"",
"]",
"=",
"args",
".",
"user",
"if",
"args",
".",
"password",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_passwd\"",
"]",
"=",
"args",
".",
"password",
"if",
"args",
".",
"type",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_type\"",
"]",
"=",
"args",
".",
"type",
"if",
"args",
".",
"location",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_location\"",
"]",
"=",
"args",
".",
"location",
"if",
"args",
".",
"port",
"is",
"not",
"None",
":",
"options",
"[",
"\"port\"",
"]",
"=",
"args",
".",
"port",
"tap",
"=",
"tap",
".",
"edit",
"(",
"*",
"*",
"options",
")",
"return",
"json",
".",
"dumps",
"(",
"tap",
".",
"attributes",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")"
] | Carefully setup a dict | [
"Carefully",
"setup",
"a",
"dict"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/dbtaps.py#L135-L154 | train |
qubole/qds-sdk-py | qds_sdk/app.py | App.create | def create(cls, name, config=None, kind="spark"):
"""
Create a new app.
Args:
`name`: the name of the app
`config`: a dictionary of key-value pairs
`kind`: kind of the app (default=spark)
"""
conn = Qubole.agent()
return conn.post(cls.rest_entity_path,
data={'name': name, 'config': config, 'kind': kind}) | python | def create(cls, name, config=None, kind="spark"):
"""
Create a new app.
Args:
`name`: the name of the app
`config`: a dictionary of key-value pairs
`kind`: kind of the app (default=spark)
"""
conn = Qubole.agent()
return conn.post(cls.rest_entity_path,
data={'name': name, 'config': config, 'kind': kind}) | [
"def",
"create",
"(",
"cls",
",",
"name",
",",
"config",
"=",
"None",
",",
"kind",
"=",
"\"spark\"",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"post",
"(",
"cls",
".",
"rest_entity_path",
",",
"data",
"=",
"{",
"'name'",
":",
"name",
",",
"'config'",
":",
"config",
",",
"'kind'",
":",
"kind",
"}",
")"
] | Create a new app.
Args:
`name`: the name of the app
`config`: a dictionary of key-value pairs
`kind`: kind of the app (default=spark) | [
"Create",
"a",
"new",
"app",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/app.py#L127-L140 | train |
qubole/qds-sdk-py | qds_sdk/qubole.py | Qubole.configure | def configure(cls, api_token,
api_url="https://api.qubole.com/api/", version="v1.2",
poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"):
"""
Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events
"""
cls._auth = QuboleAuth(api_token)
cls.api_token = api_token
cls.version = version
cls.baseurl = api_url
if poll_interval < Qubole.MIN_POLL_INTERVAL:
log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL))
cls.poll_interval = Qubole.MIN_POLL_INTERVAL
else:
cls.poll_interval = poll_interval
cls.skip_ssl_cert_check = skip_ssl_cert_check
cls.cloud_name = cloud_name.lower()
cls.cached_agent = None | python | def configure(cls, api_token,
api_url="https://api.qubole.com/api/", version="v1.2",
poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"):
"""
Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events
"""
cls._auth = QuboleAuth(api_token)
cls.api_token = api_token
cls.version = version
cls.baseurl = api_url
if poll_interval < Qubole.MIN_POLL_INTERVAL:
log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL))
cls.poll_interval = Qubole.MIN_POLL_INTERVAL
else:
cls.poll_interval = poll_interval
cls.skip_ssl_cert_check = skip_ssl_cert_check
cls.cloud_name = cloud_name.lower()
cls.cached_agent = None | [
"def",
"configure",
"(",
"cls",
",",
"api_token",
",",
"api_url",
"=",
"\"https://api.qubole.com/api/\"",
",",
"version",
"=",
"\"v1.2\"",
",",
"poll_interval",
"=",
"5",
",",
"skip_ssl_cert_check",
"=",
"False",
",",
"cloud_name",
"=",
"\"AWS\"",
")",
":",
"cls",
".",
"_auth",
"=",
"QuboleAuth",
"(",
"api_token",
")",
"cls",
".",
"api_token",
"=",
"api_token",
"cls",
".",
"version",
"=",
"version",
"cls",
".",
"baseurl",
"=",
"api_url",
"if",
"poll_interval",
"<",
"Qubole",
".",
"MIN_POLL_INTERVAL",
":",
"log",
".",
"warn",
"(",
"\"Poll interval cannot be less than %s seconds. Setting it to %s seconds.\\n\"",
"%",
"(",
"Qubole",
".",
"MIN_POLL_INTERVAL",
",",
"Qubole",
".",
"MIN_POLL_INTERVAL",
")",
")",
"cls",
".",
"poll_interval",
"=",
"Qubole",
".",
"MIN_POLL_INTERVAL",
"else",
":",
"cls",
".",
"poll_interval",
"=",
"poll_interval",
"cls",
".",
"skip_ssl_cert_check",
"=",
"skip_ssl_cert_check",
"cls",
".",
"cloud_name",
"=",
"cloud_name",
".",
"lower",
"(",
")",
"cls",
".",
"cached_agent",
"=",
"None"
] | Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events | [
"Set",
"parameters",
"governing",
"interaction",
"with",
"QDS"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/qubole.py#L36-L63 | train |
qubole/qds-sdk-py | qds_sdk/clusterv2.py | ClusterCmdLine.get_cluster_request_parameters | def get_cluster_request_parameters(cluster_info, cloud_config, engine_config):
'''
Use this to return final minimal request from cluster_info, cloud_config or engine_config objects
Alternatively call util._make_minimal if only one object needs to be implemented
'''
cluster_request = {}
cloud_config = util._make_minimal(cloud_config.__dict__)
if bool(cloud_config): cluster_request['cloud_config'] = cloud_config
engine_config = util._make_minimal(engine_config.__dict__)
if bool(engine_config): cluster_request['engine_config'] = engine_config
cluster_request.update(util._make_minimal(cluster_info.__dict__))
return cluster_request | python | def get_cluster_request_parameters(cluster_info, cloud_config, engine_config):
'''
Use this to return final minimal request from cluster_info, cloud_config or engine_config objects
Alternatively call util._make_minimal if only one object needs to be implemented
'''
cluster_request = {}
cloud_config = util._make_minimal(cloud_config.__dict__)
if bool(cloud_config): cluster_request['cloud_config'] = cloud_config
engine_config = util._make_minimal(engine_config.__dict__)
if bool(engine_config): cluster_request['engine_config'] = engine_config
cluster_request.update(util._make_minimal(cluster_info.__dict__))
return cluster_request | [
"def",
"get_cluster_request_parameters",
"(",
"cluster_info",
",",
"cloud_config",
",",
"engine_config",
")",
":",
"cluster_request",
"=",
"{",
"}",
"cloud_config",
"=",
"util",
".",
"_make_minimal",
"(",
"cloud_config",
".",
"__dict__",
")",
"if",
"bool",
"(",
"cloud_config",
")",
":",
"cluster_request",
"[",
"'cloud_config'",
"]",
"=",
"cloud_config",
"engine_config",
"=",
"util",
".",
"_make_minimal",
"(",
"engine_config",
".",
"__dict__",
")",
"if",
"bool",
"(",
"engine_config",
")",
":",
"cluster_request",
"[",
"'engine_config'",
"]",
"=",
"engine_config",
"cluster_request",
".",
"update",
"(",
"util",
".",
"_make_minimal",
"(",
"cluster_info",
".",
"__dict__",
")",
")",
"return",
"cluster_request"
] | Use this to return final minimal request from cluster_info, cloud_config or engine_config objects
Alternatively call util._make_minimal if only one object needs to be implemented | [
"Use",
"this",
"to",
"return",
"final",
"minimal",
"request",
"from",
"cluster_info",
"cloud_config",
"or",
"engine_config",
"objects",
"Alternatively",
"call",
"util",
".",
"_make_minimal",
"if",
"only",
"one",
"object",
"needs",
"to",
"be",
"implemented"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/clusterv2.py#L131-L145 | train |
qubole/qds-sdk-py | qds_sdk/clusterv2.py | ClusterV2.create | def create(cls, cluster_info):
"""
Create a new cluster using information provided in `cluster_info`.
"""
conn = Qubole.agent(version="v2")
return conn.post(cls.rest_entity_path, data=cluster_info) | python | def create(cls, cluster_info):
"""
Create a new cluster using information provided in `cluster_info`.
"""
conn = Qubole.agent(version="v2")
return conn.post(cls.rest_entity_path, data=cluster_info) | [
"def",
"create",
"(",
"cls",
",",
"cluster_info",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
"version",
"=",
"\"v2\"",
")",
"return",
"conn",
".",
"post",
"(",
"cls",
".",
"rest_entity_path",
",",
"data",
"=",
"cluster_info",
")"
] | Create a new cluster using information provided in `cluster_info`. | [
"Create",
"a",
"new",
"cluster",
"using",
"information",
"provided",
"in",
"cluster_info",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/clusterv2.py#L713-L718 | train |
qubole/qds-sdk-py | qds_sdk/commands.py | _download_to_local | def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):
'''
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
'''
#Progress bar to display download progress
def _callback(downloaded, total):
'''
Call function for upload.
`downloaded`: File size already downloaded (int)
`total`: Total file size to be downloaded (int)
'''
if (total is 0) or (downloaded == total):
return
progress = downloaded*100/total
sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress))
sys.stderr.flush()
m = _URI_RE.match(s3_path)
bucket_name = m.group(1)
bucket = boto_conn.get_bucket(bucket_name)
retries = 6
if s3_path.endswith('/') is False:
#It is a file
key_name = m.group(2)
key_instance = bucket.get_key(key_name)
while key_instance is None and retries > 0:
retries = retries - 1
log.info("Results file is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
key_instance = bucket.get_key(key_name)
if key_instance is None:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
log.info("Downloading file from %s" % s3_path)
if delim is None:
try:
key_instance.get_contents_to_file(fp) # cb=_callback
except boto.exception.S3ResponseError as e:
if (e.status == 403):
# SDK-191, boto gives an error while fetching the objects using versions which happens by default
# in the get_contents_to_file() api. So attempt one without specifying version.
log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....")
key_instance.open()
fp.write(key_instance.read())
key_instance.close()
else:
raise
else:
# Get contents as string. Replace parameters and write to file.
_read_iteratively(key_instance, fp, delim=delim)
else:
#It is a folder
key_prefix = m.group(2)
bucket_paths = bucket.list(key_prefix)
for one_path in bucket_paths:
name = one_path.name
# Eliminate _tmp_ files which ends with $folder$
if name.endswith('$folder$'):
continue
log.info("Downloading file from %s" % name)
if delim is None:
one_path.get_contents_to_file(fp) # cb=_callback
else:
_read_iteratively(one_path, fp, delim=delim) | python | def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):
'''
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
'''
#Progress bar to display download progress
def _callback(downloaded, total):
'''
Call function for upload.
`downloaded`: File size already downloaded (int)
`total`: Total file size to be downloaded (int)
'''
if (total is 0) or (downloaded == total):
return
progress = downloaded*100/total
sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress))
sys.stderr.flush()
m = _URI_RE.match(s3_path)
bucket_name = m.group(1)
bucket = boto_conn.get_bucket(bucket_name)
retries = 6
if s3_path.endswith('/') is False:
#It is a file
key_name = m.group(2)
key_instance = bucket.get_key(key_name)
while key_instance is None and retries > 0:
retries = retries - 1
log.info("Results file is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
key_instance = bucket.get_key(key_name)
if key_instance is None:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
log.info("Downloading file from %s" % s3_path)
if delim is None:
try:
key_instance.get_contents_to_file(fp) # cb=_callback
except boto.exception.S3ResponseError as e:
if (e.status == 403):
# SDK-191, boto gives an error while fetching the objects using versions which happens by default
# in the get_contents_to_file() api. So attempt one without specifying version.
log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....")
key_instance.open()
fp.write(key_instance.read())
key_instance.close()
else:
raise
else:
# Get contents as string. Replace parameters and write to file.
_read_iteratively(key_instance, fp, delim=delim)
else:
#It is a folder
key_prefix = m.group(2)
bucket_paths = bucket.list(key_prefix)
for one_path in bucket_paths:
name = one_path.name
# Eliminate _tmp_ files which ends with $folder$
if name.endswith('$folder$'):
continue
log.info("Downloading file from %s" % name)
if delim is None:
one_path.get_contents_to_file(fp) # cb=_callback
else:
_read_iteratively(one_path, fp, delim=delim) | [
"def",
"_download_to_local",
"(",
"boto_conn",
",",
"s3_path",
",",
"fp",
",",
"num_result_dir",
",",
"delim",
"=",
"None",
")",
":",
"#Progress bar to display download progress",
"def",
"_callback",
"(",
"downloaded",
",",
"total",
")",
":",
"'''\n Call function for upload.\n\n `downloaded`: File size already downloaded (int)\n\n `total`: Total file size to be downloaded (int)\n '''",
"if",
"(",
"total",
"is",
"0",
")",
"or",
"(",
"downloaded",
"==",
"total",
")",
":",
"return",
"progress",
"=",
"downloaded",
"*",
"100",
"/",
"total",
"sys",
".",
"stderr",
".",
"write",
"(",
"'\\r[{0}] {1}%'",
".",
"format",
"(",
"'#'",
"*",
"progress",
",",
"progress",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"m",
"=",
"_URI_RE",
".",
"match",
"(",
"s3_path",
")",
"bucket_name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"bucket",
"=",
"boto_conn",
".",
"get_bucket",
"(",
"bucket_name",
")",
"retries",
"=",
"6",
"if",
"s3_path",
".",
"endswith",
"(",
"'/'",
")",
"is",
"False",
":",
"#It is a file",
"key_name",
"=",
"m",
".",
"group",
"(",
"2",
")",
"key_instance",
"=",
"bucket",
".",
"get_key",
"(",
"key_name",
")",
"while",
"key_instance",
"is",
"None",
"and",
"retries",
">",
"0",
":",
"retries",
"=",
"retries",
"-",
"1",
"log",
".",
"info",
"(",
"\"Results file is not available on s3. Retry: \"",
"+",
"str",
"(",
"6",
"-",
"retries",
")",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"key_instance",
"=",
"bucket",
".",
"get_key",
"(",
"key_name",
")",
"if",
"key_instance",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Results file not available on s3 yet. This can be because of s3 eventual consistency issues.\"",
")",
"log",
".",
"info",
"(",
"\"Downloading file from %s\"",
"%",
"s3_path",
")",
"if",
"delim",
"is",
"None",
":",
"try",
":",
"key_instance",
".",
"get_contents_to_file",
"(",
"fp",
")",
"# cb=_callback",
"except",
"boto",
".",
"exception",
".",
"S3ResponseError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"status",
"==",
"403",
")",
":",
"# SDK-191, boto gives an error while fetching the objects using versions which happens by default",
"# in the get_contents_to_file() api. So attempt one without specifying version.",
"log",
".",
"warn",
"(",
"\"Access denied while fetching the s3 object. Retrying without specifying the version....\"",
")",
"key_instance",
".",
"open",
"(",
")",
"fp",
".",
"write",
"(",
"key_instance",
".",
"read",
"(",
")",
")",
"key_instance",
".",
"close",
"(",
")",
"else",
":",
"raise",
"else",
":",
"# Get contents as string. Replace parameters and write to file.",
"_read_iteratively",
"(",
"key_instance",
",",
"fp",
",",
"delim",
"=",
"delim",
")",
"else",
":",
"#It is a folder",
"key_prefix",
"=",
"m",
".",
"group",
"(",
"2",
")",
"bucket_paths",
"=",
"bucket",
".",
"list",
"(",
"key_prefix",
")",
"for",
"one_path",
"in",
"bucket_paths",
":",
"name",
"=",
"one_path",
".",
"name",
"# Eliminate _tmp_ files which ends with $folder$",
"if",
"name",
".",
"endswith",
"(",
"'$folder$'",
")",
":",
"continue",
"log",
".",
"info",
"(",
"\"Downloading file from %s\"",
"%",
"name",
")",
"if",
"delim",
"is",
"None",
":",
"one_path",
".",
"get_contents_to_file",
"(",
"fp",
")",
"# cb=_callback",
"else",
":",
"_read_iteratively",
"(",
"one_path",
",",
"fp",
",",
"delim",
"=",
"delim",
")"
] | Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded | [
"Downloads",
"the",
"contents",
"of",
"all",
"objects",
"in",
"s3_path",
"into",
"fp"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L1415-L1489 | train |
qubole/qds-sdk-py | qds_sdk/commands.py | Command.cancel_id | def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data) | python | def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data) | [
"def",
"cancel_id",
"(",
"cls",
",",
"id",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"data",
"=",
"{",
"\"status\"",
":",
"\"kill\"",
"}",
"return",
"conn",
".",
"put",
"(",
"cls",
".",
"element_path",
"(",
"id",
")",
",",
"data",
")"
] | Cancels command denoted by this id
Args:
`id`: command id | [
"Cancels",
"command",
"denoted",
"by",
"this",
"id"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L181-L190 | train |
qubole/qds-sdk-py | qds_sdk/commands.py | Command.get_log_id | def get_log_id(cls, id):
"""
Fetches log for the command represented by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
r = conn.get_raw(cls.element_path(id) + "/logs")
return r.text | python | def get_log_id(cls, id):
"""
Fetches log for the command represented by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
r = conn.get_raw(cls.element_path(id) + "/logs")
return r.text | [
"def",
"get_log_id",
"(",
"cls",
",",
"id",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"r",
"=",
"conn",
".",
"get_raw",
"(",
"cls",
".",
"element_path",
"(",
"id",
")",
"+",
"\"/logs\"",
")",
"return",
"r",
".",
"text"
] | Fetches log for the command represented by this id
Args:
`id`: command id | [
"Fetches",
"log",
"for",
"the",
"command",
"represented",
"by",
"this",
"id"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L199-L208 | train |
qubole/qds-sdk-py | qds_sdk/commands.py | Command.get_log | def get_log(self):
"""
Fetches log for the command represented by this object
Returns:
The log as a string
"""
log_path = self.meta_data['logs_resource']
conn = Qubole.agent()
r = conn.get_raw(log_path)
return r.text | python | def get_log(self):
"""
Fetches log for the command represented by this object
Returns:
The log as a string
"""
log_path = self.meta_data['logs_resource']
conn = Qubole.agent()
r = conn.get_raw(log_path)
return r.text | [
"def",
"get_log",
"(",
"self",
")",
":",
"log_path",
"=",
"self",
".",
"meta_data",
"[",
"'logs_resource'",
"]",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"r",
"=",
"conn",
".",
"get_raw",
"(",
"log_path",
")",
"return",
"r",
".",
"text"
] | Fetches log for the command represented by this object
Returns:
The log as a string | [
"Fetches",
"log",
"for",
"the",
"command",
"represented",
"by",
"this",
"object"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L210-L220 | train |
qubole/qds-sdk-py | qds_sdk/commands.py | Command.get_results | def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
include_header = "false"
if len(arguments) == 1:
include_header = arguments.pop(0)
if include_header not in ('true', 'false'):
raise ParseError("incude_header can be either true or false")
r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})
if r.get('inline'):
raw_results = r['results']
encoded_results = raw_results.encode('utf8')
if sys.version_info < (3, 0, 0):
fp.write(encoded_results)
else:
import io
if isinstance(fp, io.TextIOBase):
if hasattr(fp, 'buffer'):
fp.buffer.write(encoded_results)
else:
fp.write(raw_results)
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(encoded_results)
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
if storage_credentials['region_endpoint'] is not None:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token = storage_credentials['session_token'],
host = storage_credentials['region_endpoint'])
else:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token=storage_credentials['session_token'])
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
# If column/header names are not able to fetch then use include header as true
if include_header.lower() == "true" and qlog is not None:
write_headers(qlog, fp)
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(",".join(r['result_location'])) | python | def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
include_header = "false"
if len(arguments) == 1:
include_header = arguments.pop(0)
if include_header not in ('true', 'false'):
raise ParseError("incude_header can be either true or false")
r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})
if r.get('inline'):
raw_results = r['results']
encoded_results = raw_results.encode('utf8')
if sys.version_info < (3, 0, 0):
fp.write(encoded_results)
else:
import io
if isinstance(fp, io.TextIOBase):
if hasattr(fp, 'buffer'):
fp.buffer.write(encoded_results)
else:
fp.write(raw_results)
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(encoded_results)
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
if storage_credentials['region_endpoint'] is not None:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token = storage_credentials['session_token'],
host = storage_credentials['region_endpoint'])
else:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token=storage_credentials['session_token'])
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
# If column/header names are not able to fetch then use include header as true
if include_header.lower() == "true" and qlog is not None:
write_headers(qlog, fp)
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(",".join(r['result_location'])) | [
"def",
"get_results",
"(",
"self",
",",
"fp",
"=",
"sys",
".",
"stdout",
",",
"inline",
"=",
"True",
",",
"delim",
"=",
"None",
",",
"fetch",
"=",
"True",
",",
"qlog",
"=",
"None",
",",
"arguments",
"=",
"[",
"]",
")",
":",
"result_path",
"=",
"self",
".",
"meta_data",
"[",
"'results_resource'",
"]",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"include_header",
"=",
"\"false\"",
"if",
"len",
"(",
"arguments",
")",
"==",
"1",
":",
"include_header",
"=",
"arguments",
".",
"pop",
"(",
"0",
")",
"if",
"include_header",
"not",
"in",
"(",
"'true'",
",",
"'false'",
")",
":",
"raise",
"ParseError",
"(",
"\"incude_header can be either true or false\"",
")",
"r",
"=",
"conn",
".",
"get",
"(",
"result_path",
",",
"{",
"'inline'",
":",
"inline",
",",
"'include_headers'",
":",
"include_header",
"}",
")",
"if",
"r",
".",
"get",
"(",
"'inline'",
")",
":",
"raw_results",
"=",
"r",
"[",
"'results'",
"]",
"encoded_results",
"=",
"raw_results",
".",
"encode",
"(",
"'utf8'",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
",",
"0",
")",
":",
"fp",
".",
"write",
"(",
"encoded_results",
")",
"else",
":",
"import",
"io",
"if",
"isinstance",
"(",
"fp",
",",
"io",
".",
"TextIOBase",
")",
":",
"if",
"hasattr",
"(",
"fp",
",",
"'buffer'",
")",
":",
"fp",
".",
"buffer",
".",
"write",
"(",
"encoded_results",
")",
"else",
":",
"fp",
".",
"write",
"(",
"raw_results",
")",
"elif",
"isinstance",
"(",
"fp",
",",
"io",
".",
"BufferedIOBase",
")",
"or",
"isinstance",
"(",
"fp",
",",
"io",
".",
"RawIOBase",
")",
":",
"fp",
".",
"write",
"(",
"encoded_results",
")",
"else",
":",
"# Can this happen? Don't know what's the right thing to do in this case.",
"pass",
"else",
":",
"if",
"fetch",
":",
"storage_credentials",
"=",
"conn",
".",
"get",
"(",
"Account",
".",
"credentials_rest_entity_path",
")",
"if",
"storage_credentials",
"[",
"'region_endpoint'",
"]",
"is",
"not",
"None",
":",
"boto_conn",
"=",
"boto",
".",
"connect_s3",
"(",
"aws_access_key_id",
"=",
"storage_credentials",
"[",
"'storage_access_key'",
"]",
",",
"aws_secret_access_key",
"=",
"storage_credentials",
"[",
"'storage_secret_key'",
"]",
",",
"security_token",
"=",
"storage_credentials",
"[",
"'session_token'",
"]",
",",
"host",
"=",
"storage_credentials",
"[",
"'region_endpoint'",
"]",
")",
"else",
":",
"boto_conn",
"=",
"boto",
".",
"connect_s3",
"(",
"aws_access_key_id",
"=",
"storage_credentials",
"[",
"'storage_access_key'",
"]",
",",
"aws_secret_access_key",
"=",
"storage_credentials",
"[",
"'storage_secret_key'",
"]",
",",
"security_token",
"=",
"storage_credentials",
"[",
"'session_token'",
"]",
")",
"log",
".",
"info",
"(",
"\"Starting download from result locations: [%s]\"",
"%",
"\",\"",
".",
"join",
"(",
"r",
"[",
"'result_location'",
"]",
")",
")",
"#fetch latest value of num_result_dir",
"num_result_dir",
"=",
"Command",
".",
"find",
"(",
"self",
".",
"id",
")",
".",
"num_result_dir",
"# If column/header names are not able to fetch then use include header as true",
"if",
"include_header",
".",
"lower",
"(",
")",
"==",
"\"true\"",
"and",
"qlog",
"is",
"not",
"None",
":",
"write_headers",
"(",
"qlog",
",",
"fp",
")",
"for",
"s3_path",
"in",
"r",
"[",
"'result_location'",
"]",
":",
"# In Python 3,",
"# If the delim is None, fp should be in binary mode because",
"# boto expects it to be.",
"# If the delim is not None, then both text and binary modes",
"# work.",
"_download_to_local",
"(",
"boto_conn",
",",
"s3_path",
",",
"fp",
",",
"num_result_dir",
",",
"delim",
"=",
"delim",
")",
"else",
":",
"fp",
".",
"write",
"(",
"\",\"",
".",
"join",
"(",
"r",
"[",
"'result_location'",
"]",
")",
")"
] | Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3 | [
"Fetches",
"the",
"result",
"for",
"the",
"command",
"represented",
"by",
"this",
"object"
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L257-L333 | train |
qubole/qds-sdk-py | qds_sdk/util.py | pluralize | def pluralize(singular):
"""Convert singular word to its plural form.
Args:
singular: A word in its singular form.
Returns:
The word in its plural form.
"""
if singular in UNCOUNTABLES:
return singular
for i in IRREGULAR:
if i[0] == singular:
return i[1]
for i in PLURALIZE_PATTERNS:
if re.search(i[0], singular):
return re.sub(i[0], i[1], singular) | python | def pluralize(singular):
"""Convert singular word to its plural form.
Args:
singular: A word in its singular form.
Returns:
The word in its plural form.
"""
if singular in UNCOUNTABLES:
return singular
for i in IRREGULAR:
if i[0] == singular:
return i[1]
for i in PLURALIZE_PATTERNS:
if re.search(i[0], singular):
return re.sub(i[0], i[1], singular) | [
"def",
"pluralize",
"(",
"singular",
")",
":",
"if",
"singular",
"in",
"UNCOUNTABLES",
":",
"return",
"singular",
"for",
"i",
"in",
"IRREGULAR",
":",
"if",
"i",
"[",
"0",
"]",
"==",
"singular",
":",
"return",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"PLURALIZE_PATTERNS",
":",
"if",
"re",
".",
"search",
"(",
"i",
"[",
"0",
"]",
",",
"singular",
")",
":",
"return",
"re",
".",
"sub",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
",",
"singular",
")"
] | Convert singular word to its plural form.
Args:
singular: A word in its singular form.
Returns:
The word in its plural form. | [
"Convert",
"singular",
"word",
"to",
"its",
"plural",
"form",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L85-L101 | train |
qubole/qds-sdk-py | qds_sdk/util.py | singularize | def singularize(plural):
"""Convert plural word to its singular form.
Args:
plural: A word in its plural form.
Returns:
The word in its singular form.
"""
if plural in UNCOUNTABLES:
return plural
for i in IRREGULAR:
if i[1] == plural:
return i[0]
for i in SINGULARIZE_PATTERNS:
if re.search(i[0], plural):
return re.sub(i[0], i[1], plural)
return plural | python | def singularize(plural):
"""Convert plural word to its singular form.
Args:
plural: A word in its plural form.
Returns:
The word in its singular form.
"""
if plural in UNCOUNTABLES:
return plural
for i in IRREGULAR:
if i[1] == plural:
return i[0]
for i in SINGULARIZE_PATTERNS:
if re.search(i[0], plural):
return re.sub(i[0], i[1], plural)
return plural | [
"def",
"singularize",
"(",
"plural",
")",
":",
"if",
"plural",
"in",
"UNCOUNTABLES",
":",
"return",
"plural",
"for",
"i",
"in",
"IRREGULAR",
":",
"if",
"i",
"[",
"1",
"]",
"==",
"plural",
":",
"return",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"SINGULARIZE_PATTERNS",
":",
"if",
"re",
".",
"search",
"(",
"i",
"[",
"0",
"]",
",",
"plural",
")",
":",
"return",
"re",
".",
"sub",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
",",
"plural",
")",
"return",
"plural"
] | Convert plural word to its singular form.
Args:
plural: A word in its plural form.
Returns:
The word in its singular form. | [
"Convert",
"plural",
"word",
"to",
"its",
"singular",
"form",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L104-L120 | train |
qubole/qds-sdk-py | qds_sdk/util.py | camelize | def camelize(word):
"""Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string.
"""
return ''.join(w[0].upper() + w[1:]
for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' ')) | python | def camelize(word):
"""Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string.
"""
return ''.join(w[0].upper() + w[1:]
for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' ')) | [
"def",
"camelize",
"(",
"word",
")",
":",
"return",
"''",
".",
"join",
"(",
"w",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"w",
"[",
"1",
":",
"]",
"for",
"w",
"in",
"re",
".",
"sub",
"(",
"'[^A-Z^a-z^0-9^:]+'",
",",
"' '",
",",
"word",
")",
".",
"split",
"(",
"' '",
")",
")"
] | Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string. | [
"Convert",
"a",
"word",
"from",
"lower_with_underscores",
"to",
"CamelCase",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L123-L132 | train |
qubole/qds-sdk-py | qds_sdk/util.py | _make_minimal | def _make_minimal(dictionary):
"""
This function removes all the keys whose value is either None or an empty
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
if value is not None:
if isinstance(value, dict):
new_value = _make_minimal(value)
if new_value:
new_dict[key] = new_value
else:
new_dict[key] = value
return new_dict | python | def _make_minimal(dictionary):
"""
This function removes all the keys whose value is either None or an empty
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
if value is not None:
if isinstance(value, dict):
new_value = _make_minimal(value)
if new_value:
new_dict[key] = new_value
else:
new_dict[key] = value
return new_dict | [
"def",
"_make_minimal",
"(",
"dictionary",
")",
":",
"new_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"dictionary",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"new_value",
"=",
"_make_minimal",
"(",
"value",
")",
"if",
"new_value",
":",
"new_dict",
"[",
"key",
"]",
"=",
"new_value",
"else",
":",
"new_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"new_dict"
] | This function removes all the keys whose value is either None or an empty
dictionary. | [
"This",
"function",
"removes",
"all",
"the",
"keys",
"whose",
"value",
"is",
"either",
"None",
"or",
"an",
"empty",
"dictionary",
"."
] | 77210fb64e5a7d567aedeea3b742a1d872fd0e5e | https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L146-L160 | train |
iopipe/iopipe-python | iopipe/contrib/profiler/request.py | upload_profiler_report | def upload_profiler_report(url, filename, config):
"""
Uploads a profiler report to IOpipe
:param url: The signed URL
:param filename: The profiler report file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading profiler report to IOpipe")
with open(filename, "rb") as data:
response = requests.put(url, data=data, timeout=config["network_timeout"])
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading profiler report: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
logger.debug("Profiler report uploaded successfully")
finally:
if os.path.isfile(filename):
os.remove(filename) | python | def upload_profiler_report(url, filename, config):
"""
Uploads a profiler report to IOpipe
:param url: The signed URL
:param filename: The profiler report file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading profiler report to IOpipe")
with open(filename, "rb") as data:
response = requests.put(url, data=data, timeout=config["network_timeout"])
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading profiler report: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
logger.debug("Profiler report uploaded successfully")
finally:
if os.path.isfile(filename):
os.remove(filename) | [
"def",
"upload_profiler_report",
"(",
"url",
",",
"filename",
",",
"config",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Uploading profiler report to IOpipe\"",
")",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"data",
":",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error while uploading profiler report: %s\"",
",",
"e",
")",
"if",
"hasattr",
"(",
"e",
",",
"\"response\"",
")",
":",
"logger",
".",
"debug",
"(",
"e",
".",
"response",
".",
"content",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Profiler report uploaded successfully\"",
")",
"finally",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"os",
".",
"remove",
"(",
"filename",
")"
] | Uploads a profiler report to IOpipe
:param url: The signed URL
:param filename: The profiler report file
:param config: The IOpipe config | [
"Uploads",
"a",
"profiler",
"report",
"to",
"IOpipe"
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/profiler/request.py#L12-L33 | train |
iopipe/iopipe-python | iopipe/mock_system.py | read_pid_stat | def read_pid_stat(pid):
"""
Mocks read_pid_stat as this is a Linux-specific operation.
"""
return {
"utime": random.randint(0, 999999999),
"stime": random.randint(0, 999999999),
"cutime": random.randint(0, 999999999),
"cstime": random.randint(0, 999999999),
} | python | def read_pid_stat(pid):
"""
Mocks read_pid_stat as this is a Linux-specific operation.
"""
return {
"utime": random.randint(0, 999999999),
"stime": random.randint(0, 999999999),
"cutime": random.randint(0, 999999999),
"cstime": random.randint(0, 999999999),
} | [
"def",
"read_pid_stat",
"(",
"pid",
")",
":",
"return",
"{",
"\"utime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"stime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"cutime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"cstime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"}"
] | Mocks read_pid_stat as this is a Linux-specific operation. | [
"Mocks",
"read_pid_stat",
"as",
"this",
"is",
"a",
"Linux",
"-",
"specific",
"operation",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/mock_system.py#L40-L49 | train |
iopipe/iopipe-python | iopipe/mock_system.py | read_stat | def read_stat():
"""
Mocks read_stat as this is a Linux-specific operation.
"""
return [
{
"times": {
"user": random.randint(0, 999999999),
"nice": random.randint(0, 999999999),
"sys": random.randint(0, 999999999),
"idle": random.randint(0, 999999999),
"irq": random.randint(0, 999999999),
}
}
] | python | def read_stat():
"""
Mocks read_stat as this is a Linux-specific operation.
"""
return [
{
"times": {
"user": random.randint(0, 999999999),
"nice": random.randint(0, 999999999),
"sys": random.randint(0, 999999999),
"idle": random.randint(0, 999999999),
"irq": random.randint(0, 999999999),
}
}
] | [
"def",
"read_stat",
"(",
")",
":",
"return",
"[",
"{",
"\"times\"",
":",
"{",
"\"user\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"nice\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"sys\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"idle\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"irq\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"}",
"}",
"]"
] | Mocks read_stat as this is a Linux-specific operation. | [
"Mocks",
"read_stat",
"as",
"this",
"is",
"a",
"Linux",
"-",
"specific",
"operation",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/mock_system.py#L63-L77 | train |
iopipe/iopipe-python | iopipe/agent.py | IOpipeCore.load_plugins | def load_plugins(self, plugins):
"""
Loads plugins that match the `Plugin` interface and are instantiated.
:param plugins: A list of plugin instances.
"""
def instantiate(plugin):
return plugin() if inspect.isclass(plugin) else plugin
loaded_plugins = []
plugins_seen = []
# Iterate over plugins in reverse to permit users to override default
# plugin config
for plugin in reversed(plugins):
if not is_plugin(plugin) or plugin.name in plugins_seen:
continue
# Build the plugins list in reverse to restore original order
loaded_plugins.insert(0, instantiate(plugin))
plugins_seen.append(plugin.name)
return loaded_plugins | python | def load_plugins(self, plugins):
"""
Loads plugins that match the `Plugin` interface and are instantiated.
:param plugins: A list of plugin instances.
"""
def instantiate(plugin):
return plugin() if inspect.isclass(plugin) else plugin
loaded_plugins = []
plugins_seen = []
# Iterate over plugins in reverse to permit users to override default
# plugin config
for plugin in reversed(plugins):
if not is_plugin(plugin) or plugin.name in plugins_seen:
continue
# Build the plugins list in reverse to restore original order
loaded_plugins.insert(0, instantiate(plugin))
plugins_seen.append(plugin.name)
return loaded_plugins | [
"def",
"load_plugins",
"(",
"self",
",",
"plugins",
")",
":",
"def",
"instantiate",
"(",
"plugin",
")",
":",
"return",
"plugin",
"(",
")",
"if",
"inspect",
".",
"isclass",
"(",
"plugin",
")",
"else",
"plugin",
"loaded_plugins",
"=",
"[",
"]",
"plugins_seen",
"=",
"[",
"]",
"# Iterate over plugins in reverse to permit users to override default",
"# plugin config",
"for",
"plugin",
"in",
"reversed",
"(",
"plugins",
")",
":",
"if",
"not",
"is_plugin",
"(",
"plugin",
")",
"or",
"plugin",
".",
"name",
"in",
"plugins_seen",
":",
"continue",
"# Build the plugins list in reverse to restore original order",
"loaded_plugins",
".",
"insert",
"(",
"0",
",",
"instantiate",
"(",
"plugin",
")",
")",
"plugins_seen",
".",
"append",
"(",
"plugin",
".",
"name",
")",
"return",
"loaded_plugins"
] | Loads plugins that match the `Plugin` interface and are instantiated.
:param plugins: A list of plugin instances. | [
"Loads",
"plugins",
"that",
"match",
"the",
"Plugin",
"interface",
"and",
"are",
"instantiated",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L198-L220 | train |
iopipe/iopipe-python | iopipe/agent.py | IOpipeCore.run_hooks | def run_hooks(self, name, event=None, context=None):
"""
Runs plugin hooks for each registered plugin.
"""
hooks = {
"pre:setup": lambda p: p.pre_setup(self),
"post:setup": lambda p: p.post_setup(self),
"pre:invoke": lambda p: p.pre_invoke(event, context),
"post:invoke": lambda p: p.post_invoke(event, context),
"pre:report": lambda p: p.pre_report(self.report),
"post:report": lambda p: p.post_report(self.report),
}
if name in hooks:
for p in self.plugins:
if p.enabled:
try:
hooks[name](p)
except Exception as e:
logger.error(
"IOpipe plugin %s hook raised error" % (name, str(e))
)
logger.exception(e) | python | def run_hooks(self, name, event=None, context=None):
"""
Runs plugin hooks for each registered plugin.
"""
hooks = {
"pre:setup": lambda p: p.pre_setup(self),
"post:setup": lambda p: p.post_setup(self),
"pre:invoke": lambda p: p.pre_invoke(event, context),
"post:invoke": lambda p: p.post_invoke(event, context),
"pre:report": lambda p: p.pre_report(self.report),
"post:report": lambda p: p.post_report(self.report),
}
if name in hooks:
for p in self.plugins:
if p.enabled:
try:
hooks[name](p)
except Exception as e:
logger.error(
"IOpipe plugin %s hook raised error" % (name, str(e))
)
logger.exception(e) | [
"def",
"run_hooks",
"(",
"self",
",",
"name",
",",
"event",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"hooks",
"=",
"{",
"\"pre:setup\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_setup",
"(",
"self",
")",
",",
"\"post:setup\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_setup",
"(",
"self",
")",
",",
"\"pre:invoke\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_invoke",
"(",
"event",
",",
"context",
")",
",",
"\"post:invoke\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_invoke",
"(",
"event",
",",
"context",
")",
",",
"\"pre:report\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_report",
"(",
"self",
".",
"report",
")",
",",
"\"post:report\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_report",
"(",
"self",
".",
"report",
")",
",",
"}",
"if",
"name",
"in",
"hooks",
":",
"for",
"p",
"in",
"self",
".",
"plugins",
":",
"if",
"p",
".",
"enabled",
":",
"try",
":",
"hooks",
"[",
"name",
"]",
"(",
"p",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"IOpipe plugin %s hook raised error\"",
"%",
"(",
"name",
",",
"str",
"(",
"e",
")",
")",
")",
"logger",
".",
"exception",
"(",
"e",
")"
] | Runs plugin hooks for each registered plugin. | [
"Runs",
"plugin",
"hooks",
"for",
"each",
"registered",
"plugin",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L222-L244 | train |
iopipe/iopipe-python | iopipe/agent.py | IOpipeCore.wait_for_futures | def wait_for_futures(self):
"""
Wait for all futures to complete. This should be done at the end of an
an invocation.
"""
[future for future in futures.as_completed(self.futures)]
self.futures = [] | python | def wait_for_futures(self):
"""
Wait for all futures to complete. This should be done at the end of an
an invocation.
"""
[future for future in futures.as_completed(self.futures)]
self.futures = [] | [
"def",
"wait_for_futures",
"(",
"self",
")",
":",
"[",
"future",
"for",
"future",
"in",
"futures",
".",
"as_completed",
"(",
"self",
".",
"futures",
")",
"]",
"self",
".",
"futures",
"=",
"[",
"]"
] | Wait for all futures to complete. This should be done at the end of an
an invocation. | [
"Wait",
"for",
"all",
"futures",
"to",
"complete",
".",
"This",
"should",
"be",
"done",
"at",
"the",
"end",
"of",
"an",
"an",
"invocation",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L260-L266 | train |
iopipe/iopipe-python | iopipe/agent.py | IOpipeCore.validate_context | def validate_context(self, context):
"""
Checks to see if we're working with a valid lambda context object.
:returns: True if valid, False if not
:rtype: bool
"""
return all(
[
hasattr(context, attr)
for attr in [
"aws_request_id",
"function_name",
"function_version",
"get_remaining_time_in_millis",
"invoked_function_arn",
"log_group_name",
"log_stream_name",
"memory_limit_in_mb",
]
]
) and callable(context.get_remaining_time_in_millis) | python | def validate_context(self, context):
"""
Checks to see if we're working with a valid lambda context object.
:returns: True if valid, False if not
:rtype: bool
"""
return all(
[
hasattr(context, attr)
for attr in [
"aws_request_id",
"function_name",
"function_version",
"get_remaining_time_in_millis",
"invoked_function_arn",
"log_group_name",
"log_stream_name",
"memory_limit_in_mb",
]
]
) and callable(context.get_remaining_time_in_millis) | [
"def",
"validate_context",
"(",
"self",
",",
"context",
")",
":",
"return",
"all",
"(",
"[",
"hasattr",
"(",
"context",
",",
"attr",
")",
"for",
"attr",
"in",
"[",
"\"aws_request_id\"",
",",
"\"function_name\"",
",",
"\"function_version\"",
",",
"\"get_remaining_time_in_millis\"",
",",
"\"invoked_function_arn\"",
",",
"\"log_group_name\"",
",",
"\"log_stream_name\"",
",",
"\"memory_limit_in_mb\"",
",",
"]",
"]",
")",
"and",
"callable",
"(",
"context",
".",
"get_remaining_time_in_millis",
")"
] | Checks to see if we're working with a valid lambda context object.
:returns: True if valid, False if not
:rtype: bool | [
"Checks",
"to",
"see",
"if",
"we",
"re",
"working",
"with",
"a",
"valid",
"lambda",
"context",
"object",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L268-L289 | train |
iopipe/iopipe-python | iopipe/contrib/trace/auto_http.py | patch_session_send | def patch_session_send(context, http_filter):
"""
Monkey patches requests' Session class, if available. Overloads the
send method to add tracing and metrics collection.
"""
if Session is None:
return
def send(self, *args, **kwargs):
id = ensure_utf8(str(uuid.uuid4()))
with context.iopipe.mark(id):
response = original_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
Session.send = send | python | def patch_session_send(context, http_filter):
"""
Monkey patches requests' Session class, if available. Overloads the
send method to add tracing and metrics collection.
"""
if Session is None:
return
def send(self, *args, **kwargs):
id = ensure_utf8(str(uuid.uuid4()))
with context.iopipe.mark(id):
response = original_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
Session.send = send | [
"def",
"patch_session_send",
"(",
"context",
",",
"http_filter",
")",
":",
"if",
"Session",
"is",
"None",
":",
"return",
"def",
"send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"ensure_utf8",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"with",
"context",
".",
"iopipe",
".",
"mark",
"(",
"id",
")",
":",
"response",
"=",
"original_session_send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"trace",
"=",
"context",
".",
"iopipe",
".",
"mark",
".",
"measure",
"(",
"id",
")",
"context",
".",
"iopipe",
".",
"mark",
".",
"delete",
"(",
"id",
")",
"collect_metrics_for_response",
"(",
"response",
",",
"context",
",",
"trace",
",",
"http_filter",
")",
"return",
"response",
"Session",
".",
"send",
"=",
"send"
] | Monkey patches requests' Session class, if available. Overloads the
send method to add tracing and metrics collection. | [
"Monkey",
"patches",
"requests",
"Session",
"class",
"if",
"available",
".",
"Overloads",
"the",
"send",
"method",
"to",
"add",
"tracing",
"and",
"metrics",
"collection",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/trace/auto_http.py#L63-L80 | train |
iopipe/iopipe-python | iopipe/contrib/trace/auto_http.py | patch_botocore_session_send | def patch_botocore_session_send(context, http_filter):
"""
Monkey patches botocore's vendored requests, if available. Overloads the
Session class' send method to add tracing and metric collection.
"""
if BotocoreSession is None:
return
def send(self, *args, **kwargs):
id = str(uuid.uuid4())
with context.iopipe.mark(id):
response = original_botocore_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
BotocoreSession.send = send | python | def patch_botocore_session_send(context, http_filter):
"""
Monkey patches botocore's vendored requests, if available. Overloads the
Session class' send method to add tracing and metric collection.
"""
if BotocoreSession is None:
return
def send(self, *args, **kwargs):
id = str(uuid.uuid4())
with context.iopipe.mark(id):
response = original_botocore_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
BotocoreSession.send = send | [
"def",
"patch_botocore_session_send",
"(",
"context",
",",
"http_filter",
")",
":",
"if",
"BotocoreSession",
"is",
"None",
":",
"return",
"def",
"send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"with",
"context",
".",
"iopipe",
".",
"mark",
"(",
"id",
")",
":",
"response",
"=",
"original_botocore_session_send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"trace",
"=",
"context",
".",
"iopipe",
".",
"mark",
".",
"measure",
"(",
"id",
")",
"context",
".",
"iopipe",
".",
"mark",
".",
"delete",
"(",
"id",
")",
"collect_metrics_for_response",
"(",
"response",
",",
"context",
",",
"trace",
",",
"http_filter",
")",
"return",
"response",
"BotocoreSession",
".",
"send",
"=",
"send"
] | Monkey patches botocore's vendored requests, if available. Overloads the
Session class' send method to add tracing and metric collection. | [
"Monkey",
"patches",
"botocore",
"s",
"vendored",
"requests",
"if",
"available",
".",
"Overloads",
"the",
"Session",
"class",
"send",
"method",
"to",
"add",
"tracing",
"and",
"metric",
"collection",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/trace/auto_http.py#L83-L100 | train |
iopipe/iopipe-python | iopipe/contrib/trace/auto_http.py | collect_metrics_for_response | def collect_metrics_for_response(http_response, context, trace, http_filter):
"""
Collects relevant metrics from a requests Response object and adds them to
the IOpipe context.
"""
http_response = copy.deepcopy(http_response)
if http_filter is not None and callable(http_filter):
http_response = http_filter(http_response)
if http_response is False:
return
request = None
if hasattr(http_response, "request"):
parsed_url = None
if hasattr(http_response.request, "url"):
parsed_url = urlparse(http_response.request.url)
request_headers = []
if hasattr(http_response.request, "headers"):
request_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.request.headers.items()
if k.lower() in INCLUDE_HEADERS
]
request = Request(
hash=ensure_utf8(getattr(parsed_url, "fragment", None)),
headers=request_headers,
hostname=ensure_utf8(getattr(parsed_url, "hostname", None)),
method=ensure_utf8(getattr(http_response.request, "method", None)),
path=ensure_utf8(getattr(parsed_url, "path", None)),
# TODO: Determine if this is redundant
pathname=ensure_utf8(getattr(parsed_url, "path", None)),
port=ensure_utf8(getattr(parsed_url, "port", None)),
protocol=ensure_utf8(getattr(parsed_url, "scheme", None)),
query=ensure_utf8(getattr(parsed_url, "query", None)),
url=ensure_utf8(getattr(http_response.request, "url", None)),
)
response_headers = []
if hasattr(http_response, "headers"):
response_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.headers.items()
if k.lower() in INCLUDE_HEADERS
]
response = Response(
headers=response_headers,
statusCode=ensure_utf8(getattr(http_response, "status_code", None)),
statusMessage=None,
)
context.iopipe.mark.http_trace(trace, request, response) | python | def collect_metrics_for_response(http_response, context, trace, http_filter):
"""
Collects relevant metrics from a requests Response object and adds them to
the IOpipe context.
"""
http_response = copy.deepcopy(http_response)
if http_filter is not None and callable(http_filter):
http_response = http_filter(http_response)
if http_response is False:
return
request = None
if hasattr(http_response, "request"):
parsed_url = None
if hasattr(http_response.request, "url"):
parsed_url = urlparse(http_response.request.url)
request_headers = []
if hasattr(http_response.request, "headers"):
request_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.request.headers.items()
if k.lower() in INCLUDE_HEADERS
]
request = Request(
hash=ensure_utf8(getattr(parsed_url, "fragment", None)),
headers=request_headers,
hostname=ensure_utf8(getattr(parsed_url, "hostname", None)),
method=ensure_utf8(getattr(http_response.request, "method", None)),
path=ensure_utf8(getattr(parsed_url, "path", None)),
# TODO: Determine if this is redundant
pathname=ensure_utf8(getattr(parsed_url, "path", None)),
port=ensure_utf8(getattr(parsed_url, "port", None)),
protocol=ensure_utf8(getattr(parsed_url, "scheme", None)),
query=ensure_utf8(getattr(parsed_url, "query", None)),
url=ensure_utf8(getattr(http_response.request, "url", None)),
)
response_headers = []
if hasattr(http_response, "headers"):
response_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.headers.items()
if k.lower() in INCLUDE_HEADERS
]
response = Response(
headers=response_headers,
statusCode=ensure_utf8(getattr(http_response, "status_code", None)),
statusMessage=None,
)
context.iopipe.mark.http_trace(trace, request, response) | [
"def",
"collect_metrics_for_response",
"(",
"http_response",
",",
"context",
",",
"trace",
",",
"http_filter",
")",
":",
"http_response",
"=",
"copy",
".",
"deepcopy",
"(",
"http_response",
")",
"if",
"http_filter",
"is",
"not",
"None",
"and",
"callable",
"(",
"http_filter",
")",
":",
"http_response",
"=",
"http_filter",
"(",
"http_response",
")",
"if",
"http_response",
"is",
"False",
":",
"return",
"request",
"=",
"None",
"if",
"hasattr",
"(",
"http_response",
",",
"\"request\"",
")",
":",
"parsed_url",
"=",
"None",
"if",
"hasattr",
"(",
"http_response",
".",
"request",
",",
"\"url\"",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"http_response",
".",
"request",
".",
"url",
")",
"request_headers",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"http_response",
".",
"request",
",",
"\"headers\"",
")",
":",
"request_headers",
"=",
"[",
"{",
"\"key\"",
":",
"ensure_utf8",
"(",
"k",
")",
",",
"\"string\"",
":",
"ensure_utf8",
"(",
"v",
")",
"}",
"for",
"k",
",",
"v",
"in",
"http_response",
".",
"request",
".",
"headers",
".",
"items",
"(",
")",
"if",
"k",
".",
"lower",
"(",
")",
"in",
"INCLUDE_HEADERS",
"]",
"request",
"=",
"Request",
"(",
"hash",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"fragment\"",
",",
"None",
")",
")",
",",
"headers",
"=",
"request_headers",
",",
"hostname",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"hostname\"",
",",
"None",
")",
")",
",",
"method",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"http_response",
".",
"request",
",",
"\"method\"",
",",
"None",
")",
")",
",",
"path",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"path\"",
",",
"None",
")",
")",
",",
"# TODO: Determine if this is redundant",
"pathname",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"path\"",
",",
"None",
")",
")",
",",
"port",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"port\"",
",",
"None",
")",
")",
",",
"protocol",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"scheme\"",
",",
"None",
")",
")",
",",
"query",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"query\"",
",",
"None",
")",
")",
",",
"url",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"http_response",
".",
"request",
",",
"\"url\"",
",",
"None",
")",
")",
",",
")",
"response_headers",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"http_response",
",",
"\"headers\"",
")",
":",
"response_headers",
"=",
"[",
"{",
"\"key\"",
":",
"ensure_utf8",
"(",
"k",
")",
",",
"\"string\"",
":",
"ensure_utf8",
"(",
"v",
")",
"}",
"for",
"k",
",",
"v",
"in",
"http_response",
".",
"headers",
".",
"items",
"(",
")",
"if",
"k",
".",
"lower",
"(",
")",
"in",
"INCLUDE_HEADERS",
"]",
"response",
"=",
"Response",
"(",
"headers",
"=",
"response_headers",
",",
"statusCode",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"http_response",
",",
"\"status_code\"",
",",
"None",
")",
")",
",",
"statusMessage",
"=",
"None",
",",
")",
"context",
".",
"iopipe",
".",
"mark",
".",
"http_trace",
"(",
"trace",
",",
"request",
",",
"response",
")"
] | Collects relevant metrics from a requests Response object and adds them to
the IOpipe context. | [
"Collects",
"relevant",
"metrics",
"from",
"a",
"requests",
"Response",
"object",
"and",
"adds",
"them",
"to",
"the",
"IOpipe",
"context",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/trace/auto_http.py#L125-L178 | train |
iopipe/iopipe-python | iopipe/plugins.py | get_plugin_meta | def get_plugin_meta(plugins):
"""
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
"""
return [
{
"name": p.name,
"version": p.version,
"homepage": p.homepage,
"enabled": p.enabled,
}
for p in plugins
if is_plugin(p)
] | python | def get_plugin_meta(plugins):
"""
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
"""
return [
{
"name": p.name,
"version": p.version,
"homepage": p.homepage,
"enabled": p.enabled,
}
for p in plugins
if is_plugin(p)
] | [
"def",
"get_plugin_meta",
"(",
"plugins",
")",
":",
"return",
"[",
"{",
"\"name\"",
":",
"p",
".",
"name",
",",
"\"version\"",
":",
"p",
".",
"version",
",",
"\"homepage\"",
":",
"p",
".",
"homepage",
",",
"\"enabled\"",
":",
"p",
".",
"enabled",
",",
"}",
"for",
"p",
"in",
"plugins",
"if",
"is_plugin",
"(",
"p",
")",
"]"
] | Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list | [
"Returns",
"meta",
"data",
"about",
"plugins",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/plugins.py#L4-L22 | train |
iopipe/iopipe-python | iopipe/plugins.py | is_plugin | def is_plugin(plugin):
"""
Returns true if the plugin implements the `Plugin` interface.
:param plugin: The plugin to check.
:returns: True if plugin, False otherwise.
:rtype: bool
"""
try:
return isinstance(plugin, Plugin) or issubclass(plugin, Plugin)
except TypeError:
return False | python | def is_plugin(plugin):
"""
Returns true if the plugin implements the `Plugin` interface.
:param plugin: The plugin to check.
:returns: True if plugin, False otherwise.
:rtype: bool
"""
try:
return isinstance(plugin, Plugin) or issubclass(plugin, Plugin)
except TypeError:
return False | [
"def",
"is_plugin",
"(",
"plugin",
")",
":",
"try",
":",
"return",
"isinstance",
"(",
"plugin",
",",
"Plugin",
")",
"or",
"issubclass",
"(",
"plugin",
",",
"Plugin",
")",
"except",
"TypeError",
":",
"return",
"False"
] | Returns true if the plugin implements the `Plugin` interface.
:param plugin: The plugin to check.
:returns: True if plugin, False otherwise.
:rtype: bool | [
"Returns",
"true",
"if",
"the",
"plugin",
"implements",
"the",
"Plugin",
"interface",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/plugins.py#L25-L36 | train |
iopipe/iopipe-python | iopipe/plugins.py | with_metaclass | def with_metaclass(meta, *bases):
"""Python 2 and 3 compatible way to do meta classes"""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {}) | python | def with_metaclass(meta, *bases):
"""Python 2 and 3 compatible way to do meta classes"""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {}) | [
"def",
"with_metaclass",
"(",
"meta",
",",
"*",
"bases",
")",
":",
"class",
"metaclass",
"(",
"meta",
")",
":",
"def",
"__new__",
"(",
"cls",
",",
"name",
",",
"this_bases",
",",
"d",
")",
":",
"return",
"meta",
"(",
"name",
",",
"bases",
",",
"d",
")",
"return",
"type",
".",
"__new__",
"(",
"metaclass",
",",
"\"temporary_class\"",
",",
"(",
")",
",",
"{",
"}",
")"
] | Python 2 and 3 compatible way to do meta classes | [
"Python",
"2",
"and",
"3",
"compatible",
"way",
"to",
"do",
"meta",
"classes"
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/plugins.py#L39-L46 | train |
iopipe/iopipe-python | iopipe/report.py | Report.extract_context_data | def extract_context_data(self):
"""
Returns the contents of a AWS Lambda context.
:returns: A dict of relevant context data.
:rtype: dict
"""
data = {}
for k, v in {
# camel case names in the report to align with AWS standards
"functionName": "function_name",
"functionVersion": "function_version",
"memoryLimitInMB": "memory_limit_in_mb",
"invokedFunctionArn": "invoked_function_arn",
"awsRequestId": "aws_request_id",
"logGroupName": "log_group_name",
"logStreamName": "log_stream_name",
}.items():
if hasattr(self.context, v):
data[k] = getattr(self.context, v)
if (
hasattr(self.context, "invoked_function_arn")
and "AWS_SAM_LOCAL" in os.environ
):
data["invokedFunctionArn"] = (
"arn:aws:lambda:local:0:function:%s"
% data.get("functionName", "unknown")
)
if hasattr(self.context, "get_remaining_time_in_millis") and callable(
self.context.get_remaining_time_in_millis
):
data[
"getRemainingTimeInMillis"
] = self.context.get_remaining_time_in_millis()
data["traceId"] = os.getenv("_X_AMZN_TRACE_ID", "")
return data | python | def extract_context_data(self):
"""
Returns the contents of a AWS Lambda context.
:returns: A dict of relevant context data.
:rtype: dict
"""
data = {}
for k, v in {
# camel case names in the report to align with AWS standards
"functionName": "function_name",
"functionVersion": "function_version",
"memoryLimitInMB": "memory_limit_in_mb",
"invokedFunctionArn": "invoked_function_arn",
"awsRequestId": "aws_request_id",
"logGroupName": "log_group_name",
"logStreamName": "log_stream_name",
}.items():
if hasattr(self.context, v):
data[k] = getattr(self.context, v)
if (
hasattr(self.context, "invoked_function_arn")
and "AWS_SAM_LOCAL" in os.environ
):
data["invokedFunctionArn"] = (
"arn:aws:lambda:local:0:function:%s"
% data.get("functionName", "unknown")
)
if hasattr(self.context, "get_remaining_time_in_millis") and callable(
self.context.get_remaining_time_in_millis
):
data[
"getRemainingTimeInMillis"
] = self.context.get_remaining_time_in_millis()
data["traceId"] = os.getenv("_X_AMZN_TRACE_ID", "")
return data | [
"def",
"extract_context_data",
"(",
"self",
")",
":",
"data",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"{",
"# camel case names in the report to align with AWS standards",
"\"functionName\"",
":",
"\"function_name\"",
",",
"\"functionVersion\"",
":",
"\"function_version\"",
",",
"\"memoryLimitInMB\"",
":",
"\"memory_limit_in_mb\"",
",",
"\"invokedFunctionArn\"",
":",
"\"invoked_function_arn\"",
",",
"\"awsRequestId\"",
":",
"\"aws_request_id\"",
",",
"\"logGroupName\"",
":",
"\"log_group_name\"",
",",
"\"logStreamName\"",
":",
"\"log_stream_name\"",
",",
"}",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"context",
",",
"v",
")",
":",
"data",
"[",
"k",
"]",
"=",
"getattr",
"(",
"self",
".",
"context",
",",
"v",
")",
"if",
"(",
"hasattr",
"(",
"self",
".",
"context",
",",
"\"invoked_function_arn\"",
")",
"and",
"\"AWS_SAM_LOCAL\"",
"in",
"os",
".",
"environ",
")",
":",
"data",
"[",
"\"invokedFunctionArn\"",
"]",
"=",
"(",
"\"arn:aws:lambda:local:0:function:%s\"",
"%",
"data",
".",
"get",
"(",
"\"functionName\"",
",",
"\"unknown\"",
")",
")",
"if",
"hasattr",
"(",
"self",
".",
"context",
",",
"\"get_remaining_time_in_millis\"",
")",
"and",
"callable",
"(",
"self",
".",
"context",
".",
"get_remaining_time_in_millis",
")",
":",
"data",
"[",
"\"getRemainingTimeInMillis\"",
"]",
"=",
"self",
".",
"context",
".",
"get_remaining_time_in_millis",
"(",
")",
"data",
"[",
"\"traceId\"",
"]",
"=",
"os",
".",
"getenv",
"(",
"\"_X_AMZN_TRACE_ID\"",
",",
"\"\"",
")",
"return",
"data"
] | Returns the contents of a AWS Lambda context.
:returns: A dict of relevant context data.
:rtype: dict | [
"Returns",
"the",
"contents",
"of",
"a",
"AWS",
"Lambda",
"context",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L82-L117 | train |
iopipe/iopipe-python | iopipe/report.py | Report.retain_error | def retain_error(self, error, frame=None):
"""
Adds details of an error to the report.
:param error: The error exception to add to the report.
"""
if frame is None:
stack = traceback.format_exc()
self.labels.add("@iopipe/error")
else:
stack = "\n".join(traceback.format_stack(frame))
self.labels.add("@iopipe/timeout")
details = {
"name": type(error).__name__,
"message": "{}".format(error),
"stack": stack,
}
self.report["errors"] = details | python | def retain_error(self, error, frame=None):
"""
Adds details of an error to the report.
:param error: The error exception to add to the report.
"""
if frame is None:
stack = traceback.format_exc()
self.labels.add("@iopipe/error")
else:
stack = "\n".join(traceback.format_stack(frame))
self.labels.add("@iopipe/timeout")
details = {
"name": type(error).__name__,
"message": "{}".format(error),
"stack": stack,
}
self.report["errors"] = details | [
"def",
"retain_error",
"(",
"self",
",",
"error",
",",
"frame",
"=",
"None",
")",
":",
"if",
"frame",
"is",
"None",
":",
"stack",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"labels",
".",
"add",
"(",
"\"@iopipe/error\"",
")",
"else",
":",
"stack",
"=",
"\"\\n\"",
".",
"join",
"(",
"traceback",
".",
"format_stack",
"(",
"frame",
")",
")",
"self",
".",
"labels",
".",
"add",
"(",
"\"@iopipe/timeout\"",
")",
"details",
"=",
"{",
"\"name\"",
":",
"type",
"(",
"error",
")",
".",
"__name__",
",",
"\"message\"",
":",
"\"{}\"",
".",
"format",
"(",
"error",
")",
",",
"\"stack\"",
":",
"stack",
",",
"}",
"self",
".",
"report",
"[",
"\"errors\"",
"]",
"=",
"details"
] | Adds details of an error to the report.
:param error: The error exception to add to the report. | [
"Adds",
"details",
"of",
"an",
"error",
"to",
"the",
"report",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L119-L136 | train |
iopipe/iopipe-python | iopipe/report.py | Report.prepare | def prepare(self, error=None, frame=None):
"""
Prepare the report to be sent to IOpipe.
:param error: An optional error to add to report.
:param frame: A stack frame to add to report in the event of a timeout.
"""
if error:
self.retain_error(error, frame)
self.report["environment"]["host"]["boot_id"] = system.read_bootid()
# convert labels to list for sending
self.report["labels"] = list(self.labels)
meminfo = system.read_meminfo()
self.report.update(
{
"aws": self.extract_context_data(),
"timestampEnd": int(time.time() * 1000),
}
)
self.report["environment"]["os"].update(
{
"cpus": system.read_stat(),
"freemem": meminfo["MemFree"],
"hostname": system.read_hostname(),
"totalmem": meminfo["MemTotal"],
"usedmem": meminfo["MemTotal"] - meminfo["MemFree"],
}
)
self.report["environment"]["os"]["linux"]["pid"] = {
"self": {
"stat": system.read_pid_stat("self"),
"stat_start": self.stat_start,
"status": system.read_pid_status("self"),
}
}
self.report["disk"] = system.read_disk()
self.report["duration"] = int((monotonic() - self.start_time) * 1e9) | python | def prepare(self, error=None, frame=None):
"""
Prepare the report to be sent to IOpipe.
:param error: An optional error to add to report.
:param frame: A stack frame to add to report in the event of a timeout.
"""
if error:
self.retain_error(error, frame)
self.report["environment"]["host"]["boot_id"] = system.read_bootid()
# convert labels to list for sending
self.report["labels"] = list(self.labels)
meminfo = system.read_meminfo()
self.report.update(
{
"aws": self.extract_context_data(),
"timestampEnd": int(time.time() * 1000),
}
)
self.report["environment"]["os"].update(
{
"cpus": system.read_stat(),
"freemem": meminfo["MemFree"],
"hostname": system.read_hostname(),
"totalmem": meminfo["MemTotal"],
"usedmem": meminfo["MemTotal"] - meminfo["MemFree"],
}
)
self.report["environment"]["os"]["linux"]["pid"] = {
"self": {
"stat": system.read_pid_stat("self"),
"stat_start": self.stat_start,
"status": system.read_pid_status("self"),
}
}
self.report["disk"] = system.read_disk()
self.report["duration"] = int((monotonic() - self.start_time) * 1e9) | [
"def",
"prepare",
"(",
"self",
",",
"error",
"=",
"None",
",",
"frame",
"=",
"None",
")",
":",
"if",
"error",
":",
"self",
".",
"retain_error",
"(",
"error",
",",
"frame",
")",
"self",
".",
"report",
"[",
"\"environment\"",
"]",
"[",
"\"host\"",
"]",
"[",
"\"boot_id\"",
"]",
"=",
"system",
".",
"read_bootid",
"(",
")",
"# convert labels to list for sending",
"self",
".",
"report",
"[",
"\"labels\"",
"]",
"=",
"list",
"(",
"self",
".",
"labels",
")",
"meminfo",
"=",
"system",
".",
"read_meminfo",
"(",
")",
"self",
".",
"report",
".",
"update",
"(",
"{",
"\"aws\"",
":",
"self",
".",
"extract_context_data",
"(",
")",
",",
"\"timestampEnd\"",
":",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
",",
"}",
")",
"self",
".",
"report",
"[",
"\"environment\"",
"]",
"[",
"\"os\"",
"]",
".",
"update",
"(",
"{",
"\"cpus\"",
":",
"system",
".",
"read_stat",
"(",
")",
",",
"\"freemem\"",
":",
"meminfo",
"[",
"\"MemFree\"",
"]",
",",
"\"hostname\"",
":",
"system",
".",
"read_hostname",
"(",
")",
",",
"\"totalmem\"",
":",
"meminfo",
"[",
"\"MemTotal\"",
"]",
",",
"\"usedmem\"",
":",
"meminfo",
"[",
"\"MemTotal\"",
"]",
"-",
"meminfo",
"[",
"\"MemFree\"",
"]",
",",
"}",
")",
"self",
".",
"report",
"[",
"\"environment\"",
"]",
"[",
"\"os\"",
"]",
"[",
"\"linux\"",
"]",
"[",
"\"pid\"",
"]",
"=",
"{",
"\"self\"",
":",
"{",
"\"stat\"",
":",
"system",
".",
"read_pid_stat",
"(",
"\"self\"",
")",
",",
"\"stat_start\"",
":",
"self",
".",
"stat_start",
",",
"\"status\"",
":",
"system",
".",
"read_pid_status",
"(",
"\"self\"",
")",
",",
"}",
"}",
"self",
".",
"report",
"[",
"\"disk\"",
"]",
"=",
"system",
".",
"read_disk",
"(",
")",
"self",
".",
"report",
"[",
"\"duration\"",
"]",
"=",
"int",
"(",
"(",
"monotonic",
"(",
")",
"-",
"self",
".",
"start_time",
")",
"*",
"1e9",
")"
] | Prepare the report to be sent to IOpipe.
:param error: An optional error to add to report.
:param frame: A stack frame to add to report in the event of a timeout. | [
"Prepare",
"the",
"report",
"to",
"be",
"sent",
"to",
"IOpipe",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L138-L182 | train |
iopipe/iopipe-python | iopipe/report.py | Report.send | def send(self):
"""
Sends the report to IOpipe.
"""
if self.sent is True:
return
self.sent = True
logger.debug("Sending report to IOpipe:")
logger.debug(json.dumps(self.report, indent=2, sort_keys=True))
self.client.submit_future(send_report, copy.deepcopy(self.report), self.config) | python | def send(self):
"""
Sends the report to IOpipe.
"""
if self.sent is True:
return
self.sent = True
logger.debug("Sending report to IOpipe:")
logger.debug(json.dumps(self.report, indent=2, sort_keys=True))
self.client.submit_future(send_report, copy.deepcopy(self.report), self.config) | [
"def",
"send",
"(",
"self",
")",
":",
"if",
"self",
".",
"sent",
"is",
"True",
":",
"return",
"self",
".",
"sent",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"Sending report to IOpipe:\"",
")",
"logger",
".",
"debug",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"report",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")",
")",
"self",
".",
"client",
".",
"submit_future",
"(",
"send_report",
",",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"report",
")",
",",
"self",
".",
"config",
")"
] | Sends the report to IOpipe. | [
"Sends",
"the",
"report",
"to",
"IOpipe",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L184-L195 | train |
iopipe/iopipe-python | iopipe/send_report.py | send_report | def send_report(report, config):
"""
Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration.
"""
headers = {"Authorization": "Bearer {}".format(config["token"])}
url = "https://{host}{path}".format(**config)
try:
response = session.post(
url, json=report, headers=headers, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error sending report to IOpipe: %s" % e)
else:
logger.debug("Report sent to IOpipe successfully") | python | def send_report(report, config):
"""
Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration.
"""
headers = {"Authorization": "Bearer {}".format(config["token"])}
url = "https://{host}{path}".format(**config)
try:
response = session.post(
url, json=report, headers=headers, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error sending report to IOpipe: %s" % e)
else:
logger.debug("Report sent to IOpipe successfully") | [
"def",
"send_report",
"(",
"report",
",",
"config",
")",
":",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"Bearer {}\"",
".",
"format",
"(",
"config",
"[",
"\"token\"",
"]",
")",
"}",
"url",
"=",
"\"https://{host}{path}\"",
".",
"format",
"(",
"*",
"*",
"config",
")",
"try",
":",
"response",
"=",
"session",
".",
"post",
"(",
"url",
",",
"json",
"=",
"report",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error sending report to IOpipe: %s\"",
"%",
"e",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Report sent to IOpipe successfully\"",
")"
] | Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration. | [
"Sends",
"the",
"report",
"to",
"IOpipe",
"s",
"collector",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/send_report.py#L12-L30 | train |
iopipe/iopipe-python | iopipe/contrib/logger/request.py | upload_log_data | def upload_log_data(url, stream_or_file, config):
"""
Uploads log data to IOpipe.
:param url: The signed URL
:param stream_or_file: The log data stream or file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading log data to IOpipe")
if isinstance(stream_or_file, StringIO):
stream_or_file.seek(0)
response = requests.put(
url, data=stream_or_file, timeout=config["network_timeout"]
)
else:
with open(stream_or_file, "rb") as data:
response = requests.put(
url, data=data, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading log data: %s", e)
logger.exception(e)
if hasattr(e, "response") and hasattr(e.response, "content"):
logger.debug(e.response.content)
else:
logger.debug("Log data uploaded successfully")
finally:
if isinstance(stream_or_file, str) and os.path.exists(stream_or_file):
os.remove(stream_or_file) | python | def upload_log_data(url, stream_or_file, config):
"""
Uploads log data to IOpipe.
:param url: The signed URL
:param stream_or_file: The log data stream or file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading log data to IOpipe")
if isinstance(stream_or_file, StringIO):
stream_or_file.seek(0)
response = requests.put(
url, data=stream_or_file, timeout=config["network_timeout"]
)
else:
with open(stream_or_file, "rb") as data:
response = requests.put(
url, data=data, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading log data: %s", e)
logger.exception(e)
if hasattr(e, "response") and hasattr(e.response, "content"):
logger.debug(e.response.content)
else:
logger.debug("Log data uploaded successfully")
finally:
if isinstance(stream_or_file, str) and os.path.exists(stream_or_file):
os.remove(stream_or_file) | [
"def",
"upload_log_data",
"(",
"url",
",",
"stream_or_file",
",",
"config",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Uploading log data to IOpipe\"",
")",
"if",
"isinstance",
"(",
"stream_or_file",
",",
"StringIO",
")",
":",
"stream_or_file",
".",
"seek",
"(",
"0",
")",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"stream_or_file",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"else",
":",
"with",
"open",
"(",
"stream_or_file",
",",
"\"rb\"",
")",
"as",
"data",
":",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error while uploading log data: %s\"",
",",
"e",
")",
"logger",
".",
"exception",
"(",
"e",
")",
"if",
"hasattr",
"(",
"e",
",",
"\"response\"",
")",
"and",
"hasattr",
"(",
"e",
".",
"response",
",",
"\"content\"",
")",
":",
"logger",
".",
"debug",
"(",
"e",
".",
"response",
".",
"content",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Log data uploaded successfully\"",
")",
"finally",
":",
"if",
"isinstance",
"(",
"stream_or_file",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"stream_or_file",
")",
":",
"os",
".",
"remove",
"(",
"stream_or_file",
")"
] | Uploads log data to IOpipe.
:param url: The signed URL
:param stream_or_file: The log data stream or file
:param config: The IOpipe config | [
"Uploads",
"log",
"data",
"to",
"IOpipe",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/logger/request.py#L14-L44 | train |
iopipe/iopipe-python | iopipe/signer.py | get_signer_hostname | def get_signer_hostname():
"""
Returns the IOpipe signer hostname for a region
:returns: The signer hostname
:rtype str
"""
region = os.getenv("AWS_REGION", "")
region = region if region and region in SUPPORTED_REGIONS else "us-west-2"
return "signer.{region}.iopipe.com".format(region=region) | python | def get_signer_hostname():
"""
Returns the IOpipe signer hostname for a region
:returns: The signer hostname
:rtype str
"""
region = os.getenv("AWS_REGION", "")
region = region if region and region in SUPPORTED_REGIONS else "us-west-2"
return "signer.{region}.iopipe.com".format(region=region) | [
"def",
"get_signer_hostname",
"(",
")",
":",
"region",
"=",
"os",
".",
"getenv",
"(",
"\"AWS_REGION\"",
",",
"\"\"",
")",
"region",
"=",
"region",
"if",
"region",
"and",
"region",
"in",
"SUPPORTED_REGIONS",
"else",
"\"us-west-2\"",
"return",
"\"signer.{region}.iopipe.com\"",
".",
"format",
"(",
"region",
"=",
"region",
")"
] | Returns the IOpipe signer hostname for a region
:returns: The signer hostname
:rtype str | [
"Returns",
"the",
"IOpipe",
"signer",
"hostname",
"for",
"a",
"region"
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/signer.py#L15-L24 | train |
iopipe/iopipe-python | iopipe/signer.py | get_signed_request | def get_signed_request(config, context, extension):
"""
Returns a signed request URL from IOpipe
:param config: The IOpipe config
:param context: The AWS context to request a signed URL
:param extension: The extension of the file to sign
:returns: A signed request URL
:rtype: str
"""
url = "https://{hostname}/".format(hostname=get_signer_hostname())
try:
logger.debug("Requesting signed request URL from %s", url)
response = requests.post(
url,
json={
"arn": context.invoked_function_arn,
"requestId": context.aws_request_id,
"timestamp": int(time.time() * 1000),
"extension": extension,
},
headers={"Authorization": config["token"]},
timeout=config["network_timeout"],
)
response.raise_for_status()
except Exception as e:
logger.debug("Error requesting signed request URL: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
response = response.json()
logger.debug("Signed request URL received for %s", response["url"])
return response | python | def get_signed_request(config, context, extension):
"""
Returns a signed request URL from IOpipe
:param config: The IOpipe config
:param context: The AWS context to request a signed URL
:param extension: The extension of the file to sign
:returns: A signed request URL
:rtype: str
"""
url = "https://{hostname}/".format(hostname=get_signer_hostname())
try:
logger.debug("Requesting signed request URL from %s", url)
response = requests.post(
url,
json={
"arn": context.invoked_function_arn,
"requestId": context.aws_request_id,
"timestamp": int(time.time() * 1000),
"extension": extension,
},
headers={"Authorization": config["token"]},
timeout=config["network_timeout"],
)
response.raise_for_status()
except Exception as e:
logger.debug("Error requesting signed request URL: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
response = response.json()
logger.debug("Signed request URL received for %s", response["url"])
return response | [
"def",
"get_signed_request",
"(",
"config",
",",
"context",
",",
"extension",
")",
":",
"url",
"=",
"\"https://{hostname}/\"",
".",
"format",
"(",
"hostname",
"=",
"get_signer_hostname",
"(",
")",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Requesting signed request URL from %s\"",
",",
"url",
")",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"json",
"=",
"{",
"\"arn\"",
":",
"context",
".",
"invoked_function_arn",
",",
"\"requestId\"",
":",
"context",
".",
"aws_request_id",
",",
"\"timestamp\"",
":",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
",",
"\"extension\"",
":",
"extension",
",",
"}",
",",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"config",
"[",
"\"token\"",
"]",
"}",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
",",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error requesting signed request URL: %s\"",
",",
"e",
")",
"if",
"hasattr",
"(",
"e",
",",
"\"response\"",
")",
":",
"logger",
".",
"debug",
"(",
"e",
".",
"response",
".",
"content",
")",
"else",
":",
"response",
"=",
"response",
".",
"json",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Signed request URL received for %s\"",
",",
"response",
"[",
"\"url\"",
"]",
")",
"return",
"response"
] | Returns a signed request URL from IOpipe
:param config: The IOpipe config
:param context: The AWS context to request a signed URL
:param extension: The extension of the file to sign
:returns: A signed request URL
:rtype: str | [
"Returns",
"a",
"signed",
"request",
"URL",
"from",
"IOpipe"
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/signer.py#L27-L60 | train |
iopipe/iopipe-python | acceptance/serverless-layers/handler.py | handler | def handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
try:
ip = requests.get("http://checkip.amazonaws.com/")
except requests.RequestException as e:
# Send some context about this error to Lambda Logs
print(e)
raise e
return {
"statusCode": 200,
"body": json.dumps(
{"message": "hello world", "location": ip.text.replace("\n", "")}
),
} | python | def handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
try:
ip = requests.get("http://checkip.amazonaws.com/")
except requests.RequestException as e:
# Send some context about this error to Lambda Logs
print(e)
raise e
return {
"statusCode": 200,
"body": json.dumps(
{"message": "hello world", "location": ip.text.replace("\n", "")}
),
} | [
"def",
"handler",
"(",
"event",
",",
"context",
")",
":",
"try",
":",
"ip",
"=",
"requests",
".",
"get",
"(",
"\"http://checkip.amazonaws.com/\"",
")",
"except",
"requests",
".",
"RequestException",
"as",
"e",
":",
"# Send some context about this error to Lambda Logs",
"print",
"(",
"e",
")",
"raise",
"e",
"return",
"{",
"\"statusCode\"",
":",
"200",
",",
"\"body\"",
":",
"json",
".",
"dumps",
"(",
"{",
"\"message\"",
":",
"\"hello world\"",
",",
"\"location\"",
":",
"ip",
".",
"text",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"}",
")",
",",
"}"
] | Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html | [
"Sample",
"pure",
"Lambda",
"function"
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/acceptance/serverless-layers/handler.py#L6-L87 | train |
iopipe/iopipe-python | iopipe/system.py | read_meminfo | def read_meminfo():
"""
Returns system memory usage information.
:returns: The system memory usage.
:rtype: dict
"""
data = {}
with open("/proc/meminfo", "rb") as meminfo_file:
for row in meminfo_file:
fields = row.split()
# Example content:
# MemTotal: 3801016 kB
# MemFree: 1840972 kB
# MemAvailable: 3287752 kB
# HugePages_Total: 0
data[fields[0].decode("ascii")[:-1]] = int(fields[1]) * 1024
return data | python | def read_meminfo():
"""
Returns system memory usage information.
:returns: The system memory usage.
:rtype: dict
"""
data = {}
with open("/proc/meminfo", "rb") as meminfo_file:
for row in meminfo_file:
fields = row.split()
# Example content:
# MemTotal: 3801016 kB
# MemFree: 1840972 kB
# MemAvailable: 3287752 kB
# HugePages_Total: 0
data[fields[0].decode("ascii")[:-1]] = int(fields[1]) * 1024
return data | [
"def",
"read_meminfo",
"(",
")",
":",
"data",
"=",
"{",
"}",
"with",
"open",
"(",
"\"/proc/meminfo\"",
",",
"\"rb\"",
")",
"as",
"meminfo_file",
":",
"for",
"row",
"in",
"meminfo_file",
":",
"fields",
"=",
"row",
".",
"split",
"(",
")",
"# Example content:",
"# MemTotal: 3801016 kB",
"# MemFree: 1840972 kB",
"# MemAvailable: 3287752 kB",
"# HugePages_Total: 0",
"data",
"[",
"fields",
"[",
"0",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"int",
"(",
"fields",
"[",
"1",
"]",
")",
"*",
"1024",
"return",
"data"
] | Returns system memory usage information.
:returns: The system memory usage.
:rtype: dict | [
"Returns",
"system",
"memory",
"usage",
"information",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L50-L67 | train |
iopipe/iopipe-python | iopipe/system.py | read_pid_stat | def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
} | python | def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
} | [
"def",
"read_pid_stat",
"(",
"pid",
"=",
"\"self\"",
")",
":",
"with",
"open",
"(",
"\"/proc/%s/stat\"",
"%",
"(",
"pid",
",",
")",
",",
"\"rb\"",
")",
"as",
"f",
":",
"stat",
"=",
"f",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"return",
"{",
"\"utime\"",
":",
"int",
"(",
"stat",
"[",
"13",
"]",
")",
",",
"\"stime\"",
":",
"int",
"(",
"stat",
"[",
"14",
"]",
")",
",",
"\"cutime\"",
":",
"int",
"(",
"stat",
"[",
"15",
"]",
")",
",",
"\"cstime\"",
":",
"int",
"(",
"stat",
"[",
"16",
"]",
")",
",",
"}"
] | Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict | [
"Returns",
"system",
"process",
"stat",
"information",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L70-L85 | train |
iopipe/iopipe-python | iopipe/system.py | read_pid_status | def read_pid_status(pid="self"):
"""
Returns the system process sstatus.
:param pid: The process ID.
:returns: The system process status.
:rtype: dict
"""
data = {}
with open("/proc/%s/status" % (pid,), "rb") as status_file:
for row in status_file:
fields = row.split()
if fields and fields[0] in [b"VmRSS:", b"Threads:", b"FDSize:"]:
try:
data[fields[0].decode("ascii")[:-1]] = int(fields[1])
except ValueError:
data[fields[0].decode("ascii")[:-1]] = fields[1].decode("ascii")
return data | python | def read_pid_status(pid="self"):
"""
Returns the system process sstatus.
:param pid: The process ID.
:returns: The system process status.
:rtype: dict
"""
data = {}
with open("/proc/%s/status" % (pid,), "rb") as status_file:
for row in status_file:
fields = row.split()
if fields and fields[0] in [b"VmRSS:", b"Threads:", b"FDSize:"]:
try:
data[fields[0].decode("ascii")[:-1]] = int(fields[1])
except ValueError:
data[fields[0].decode("ascii")[:-1]] = fields[1].decode("ascii")
return data | [
"def",
"read_pid_status",
"(",
"pid",
"=",
"\"self\"",
")",
":",
"data",
"=",
"{",
"}",
"with",
"open",
"(",
"\"/proc/%s/status\"",
"%",
"(",
"pid",
",",
")",
",",
"\"rb\"",
")",
"as",
"status_file",
":",
"for",
"row",
"in",
"status_file",
":",
"fields",
"=",
"row",
".",
"split",
"(",
")",
"if",
"fields",
"and",
"fields",
"[",
"0",
"]",
"in",
"[",
"b\"VmRSS:\"",
",",
"b\"Threads:\"",
",",
"b\"FDSize:\"",
"]",
":",
"try",
":",
"data",
"[",
"fields",
"[",
"0",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"int",
"(",
"fields",
"[",
"1",
"]",
")",
"except",
"ValueError",
":",
"data",
"[",
"fields",
"[",
"0",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"fields",
"[",
"1",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"return",
"data"
] | Returns the system process sstatus.
:param pid: The process ID.
:returns: The system process status.
:rtype: dict | [
"Returns",
"the",
"system",
"process",
"sstatus",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L88-L105 | train |
iopipe/iopipe-python | iopipe/system.py | read_stat | def read_stat():
"""
Returns the system stat information.
:returns: The system stat information.
:rtype: list
"""
data = []
with open("/proc/stat", "rb") as stat_file:
for line in stat_file:
cpu_stat = line.split()
if cpu_stat[0][:3] != b"cpu":
break
# First cpu line is aggregation of following lines, skip it
if len(cpu_stat[0]) == 3:
continue
data.append(
{
"times": {
"user": int(cpu_stat[1]),
"nice": int(cpu_stat[2]),
"sys": int(cpu_stat[3]),
"idle": int(cpu_stat[4]),
"irq": int(cpu_stat[6]),
}
}
)
return data | python | def read_stat():
"""
Returns the system stat information.
:returns: The system stat information.
:rtype: list
"""
data = []
with open("/proc/stat", "rb") as stat_file:
for line in stat_file:
cpu_stat = line.split()
if cpu_stat[0][:3] != b"cpu":
break
# First cpu line is aggregation of following lines, skip it
if len(cpu_stat[0]) == 3:
continue
data.append(
{
"times": {
"user": int(cpu_stat[1]),
"nice": int(cpu_stat[2]),
"sys": int(cpu_stat[3]),
"idle": int(cpu_stat[4]),
"irq": int(cpu_stat[6]),
}
}
)
return data | [
"def",
"read_stat",
"(",
")",
":",
"data",
"=",
"[",
"]",
"with",
"open",
"(",
"\"/proc/stat\"",
",",
"\"rb\"",
")",
"as",
"stat_file",
":",
"for",
"line",
"in",
"stat_file",
":",
"cpu_stat",
"=",
"line",
".",
"split",
"(",
")",
"if",
"cpu_stat",
"[",
"0",
"]",
"[",
":",
"3",
"]",
"!=",
"b\"cpu\"",
":",
"break",
"# First cpu line is aggregation of following lines, skip it",
"if",
"len",
"(",
"cpu_stat",
"[",
"0",
"]",
")",
"==",
"3",
":",
"continue",
"data",
".",
"append",
"(",
"{",
"\"times\"",
":",
"{",
"\"user\"",
":",
"int",
"(",
"cpu_stat",
"[",
"1",
"]",
")",
",",
"\"nice\"",
":",
"int",
"(",
"cpu_stat",
"[",
"2",
"]",
")",
",",
"\"sys\"",
":",
"int",
"(",
"cpu_stat",
"[",
"3",
"]",
")",
",",
"\"idle\"",
":",
"int",
"(",
"cpu_stat",
"[",
"4",
"]",
")",
",",
"\"irq\"",
":",
"int",
"(",
"cpu_stat",
"[",
"6",
"]",
")",
",",
"}",
"}",
")",
"return",
"data"
] | Returns the system stat information.
:returns: The system stat information.
:rtype: list | [
"Returns",
"the",
"system",
"stat",
"information",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L108-L135 | train |
iopipe/iopipe-python | iopipe/config.py | set_config | def set_config(**config):
"""
Returns IOpipe configuration options, setting defaults as necessary.
"""
config.setdefault("debug", bool(strtobool(os.getenv("IOPIPE_DEBUG", "false"))))
config.setdefault("enabled", bool(strtobool(os.getenv("IOPIPE_ENABLED", "true"))))
config.setdefault("host", get_hostname())
config.setdefault("install_method", os.getenv("IOPIPE_INSTALL_METHOD", "manual"))
config.setdefault("network_timeout", os.getenv("IOPIPE_NETWORK_TIMEOUT", 5000))
config.setdefault("path", get_collector_path())
config.setdefault("plugins", [])
config.setdefault("sync_http", False)
config.setdefault("timeout_window", os.getenv("IOPIPE_TIMEOUT_WINDOW", 500))
config.setdefault(
"token", os.getenv("IOPIPE_TOKEN") or os.getenv("IOPIPE_CLIENTID") or ""
)
if "client_id" in config:
config["token"] = config.pop("client_id")
if "url" in config:
url = config.pop("url")
config["host"] = get_hostname(url)
config["path"] = get_collector_path(url)
if "." in str(config["network_timeout"]):
warnings.warn(
"IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer"
)
try:
config["debug"] = bool(config["debug"])
except ValueError:
config["debug"] = False
try:
config["network_timeout"] = int(config["network_timeout"]) / 1000.0
except ValueError:
config["network_timeout"] = 5.0
if "." in str(config["timeout_window"]):
warnings.warn(
"IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer"
)
try:
config["timeout_window"] = int(config["timeout_window"]) / 1000.0
except ValueError:
config["timeout_window"] = 0.5
return config | python | def set_config(**config):
"""
Returns IOpipe configuration options, setting defaults as necessary.
"""
config.setdefault("debug", bool(strtobool(os.getenv("IOPIPE_DEBUG", "false"))))
config.setdefault("enabled", bool(strtobool(os.getenv("IOPIPE_ENABLED", "true"))))
config.setdefault("host", get_hostname())
config.setdefault("install_method", os.getenv("IOPIPE_INSTALL_METHOD", "manual"))
config.setdefault("network_timeout", os.getenv("IOPIPE_NETWORK_TIMEOUT", 5000))
config.setdefault("path", get_collector_path())
config.setdefault("plugins", [])
config.setdefault("sync_http", False)
config.setdefault("timeout_window", os.getenv("IOPIPE_TIMEOUT_WINDOW", 500))
config.setdefault(
"token", os.getenv("IOPIPE_TOKEN") or os.getenv("IOPIPE_CLIENTID") or ""
)
if "client_id" in config:
config["token"] = config.pop("client_id")
if "url" in config:
url = config.pop("url")
config["host"] = get_hostname(url)
config["path"] = get_collector_path(url)
if "." in str(config["network_timeout"]):
warnings.warn(
"IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer"
)
try:
config["debug"] = bool(config["debug"])
except ValueError:
config["debug"] = False
try:
config["network_timeout"] = int(config["network_timeout"]) / 1000.0
except ValueError:
config["network_timeout"] = 5.0
if "." in str(config["timeout_window"]):
warnings.warn(
"IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer"
)
try:
config["timeout_window"] = int(config["timeout_window"]) / 1000.0
except ValueError:
config["timeout_window"] = 0.5
return config | [
"def",
"set_config",
"(",
"*",
"*",
"config",
")",
":",
"config",
".",
"setdefault",
"(",
"\"debug\"",
",",
"bool",
"(",
"strtobool",
"(",
"os",
".",
"getenv",
"(",
"\"IOPIPE_DEBUG\"",
",",
"\"false\"",
")",
")",
")",
")",
"config",
".",
"setdefault",
"(",
"\"enabled\"",
",",
"bool",
"(",
"strtobool",
"(",
"os",
".",
"getenv",
"(",
"\"IOPIPE_ENABLED\"",
",",
"\"true\"",
")",
")",
")",
")",
"config",
".",
"setdefault",
"(",
"\"host\"",
",",
"get_hostname",
"(",
")",
")",
"config",
".",
"setdefault",
"(",
"\"install_method\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_INSTALL_METHOD\"",
",",
"\"manual\"",
")",
")",
"config",
".",
"setdefault",
"(",
"\"network_timeout\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_NETWORK_TIMEOUT\"",
",",
"5000",
")",
")",
"config",
".",
"setdefault",
"(",
"\"path\"",
",",
"get_collector_path",
"(",
")",
")",
"config",
".",
"setdefault",
"(",
"\"plugins\"",
",",
"[",
"]",
")",
"config",
".",
"setdefault",
"(",
"\"sync_http\"",
",",
"False",
")",
"config",
".",
"setdefault",
"(",
"\"timeout_window\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_TIMEOUT_WINDOW\"",
",",
"500",
")",
")",
"config",
".",
"setdefault",
"(",
"\"token\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_TOKEN\"",
")",
"or",
"os",
".",
"getenv",
"(",
"\"IOPIPE_CLIENTID\"",
")",
"or",
"\"\"",
")",
"if",
"\"client_id\"",
"in",
"config",
":",
"config",
"[",
"\"token\"",
"]",
"=",
"config",
".",
"pop",
"(",
"\"client_id\"",
")",
"if",
"\"url\"",
"in",
"config",
":",
"url",
"=",
"config",
".",
"pop",
"(",
"\"url\"",
")",
"config",
"[",
"\"host\"",
"]",
"=",
"get_hostname",
"(",
"url",
")",
"config",
"[",
"\"path\"",
"]",
"=",
"get_collector_path",
"(",
"url",
")",
"if",
"\".\"",
"in",
"str",
"(",
"config",
"[",
"\"network_timeout\"",
"]",
")",
":",
"warnings",
".",
"warn",
"(",
"\"IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer\"",
")",
"try",
":",
"config",
"[",
"\"debug\"",
"]",
"=",
"bool",
"(",
"config",
"[",
"\"debug\"",
"]",
")",
"except",
"ValueError",
":",
"config",
"[",
"\"debug\"",
"]",
"=",
"False",
"try",
":",
"config",
"[",
"\"network_timeout\"",
"]",
"=",
"int",
"(",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"/",
"1000.0",
"except",
"ValueError",
":",
"config",
"[",
"\"network_timeout\"",
"]",
"=",
"5.0",
"if",
"\".\"",
"in",
"str",
"(",
"config",
"[",
"\"timeout_window\"",
"]",
")",
":",
"warnings",
".",
"warn",
"(",
"\"IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer\"",
")",
"try",
":",
"config",
"[",
"\"timeout_window\"",
"]",
"=",
"int",
"(",
"config",
"[",
"\"timeout_window\"",
"]",
")",
"/",
"1000.0",
"except",
"ValueError",
":",
"config",
"[",
"\"timeout_window\"",
"]",
"=",
"0.5",
"return",
"config"
] | Returns IOpipe configuration options, setting defaults as necessary. | [
"Returns",
"IOpipe",
"configuration",
"options",
"setting",
"defaults",
"as",
"necessary",
"."
] | 4eb653977341bc67f8b1b87aedb3aaaefc25af61 | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/config.py#L8-L59 | train |
pief/python-netsnmpagent | netsnmpapi.py | b | def b(s):
""" Encodes Unicode strings to byte strings, if necessary. """
return s if isinstance(s, bytes) else s.encode(locale.getpreferredencoding()) | python | def b(s):
""" Encodes Unicode strings to byte strings, if necessary. """
return s if isinstance(s, bytes) else s.encode(locale.getpreferredencoding()) | [
"def",
"b",
"(",
"s",
")",
":",
"return",
"s",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
"else",
"s",
".",
"encode",
"(",
"locale",
".",
"getpreferredencoding",
"(",
")",
")"
] | Encodes Unicode strings to byte strings, if necessary. | [
"Encodes",
"Unicode",
"strings",
"to",
"byte",
"strings",
"if",
"necessary",
"."
] | b1aad1c7f034509c40d9ab17d59be32e809bd31d | https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/netsnmpapi.py#L16-L19 | train |
pief/python-netsnmpagent | examples/threading_agent.py | LogMsg | def LogMsg(msg):
""" Writes a formatted log message with a timestamp to stdout. """
global headerlogged
if headerlogged == 0:
print("{0:<8} {1:<90} {2}".format(
"Time",
"MainThread",
"UpdateSNMPObjsThread"
))
print("{0:-^120}".format("-"))
headerlogged = 1
threadname = threading.currentThread().name
funcname = sys._getframe(1).f_code.co_name
if funcname == "<module>":
funcname = "Main code path"
elif funcname == "LogNetSnmpMsg":
funcname = "net-snmp code"
else:
funcname = "{0}()".format(funcname)
if threadname == "MainThread":
logmsg = "{0} {1:<112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
else:
logmsg = "{0} {1:>112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
print(logmsg) | python | def LogMsg(msg):
""" Writes a formatted log message with a timestamp to stdout. """
global headerlogged
if headerlogged == 0:
print("{0:<8} {1:<90} {2}".format(
"Time",
"MainThread",
"UpdateSNMPObjsThread"
))
print("{0:-^120}".format("-"))
headerlogged = 1
threadname = threading.currentThread().name
funcname = sys._getframe(1).f_code.co_name
if funcname == "<module>":
funcname = "Main code path"
elif funcname == "LogNetSnmpMsg":
funcname = "net-snmp code"
else:
funcname = "{0}()".format(funcname)
if threadname == "MainThread":
logmsg = "{0} {1:<112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
else:
logmsg = "{0} {1:>112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
print(logmsg) | [
"def",
"LogMsg",
"(",
"msg",
")",
":",
"global",
"headerlogged",
"if",
"headerlogged",
"==",
"0",
":",
"print",
"(",
"\"{0:<8} {1:<90} {2}\"",
".",
"format",
"(",
"\"Time\"",
",",
"\"MainThread\"",
",",
"\"UpdateSNMPObjsThread\"",
")",
")",
"print",
"(",
"\"{0:-^120}\"",
".",
"format",
"(",
"\"-\"",
")",
")",
"headerlogged",
"=",
"1",
"threadname",
"=",
"threading",
".",
"currentThread",
"(",
")",
".",
"name",
"funcname",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_code",
".",
"co_name",
"if",
"funcname",
"==",
"\"<module>\"",
":",
"funcname",
"=",
"\"Main code path\"",
"elif",
"funcname",
"==",
"\"LogNetSnmpMsg\"",
":",
"funcname",
"=",
"\"net-snmp code\"",
"else",
":",
"funcname",
"=",
"\"{0}()\"",
".",
"format",
"(",
"funcname",
")",
"if",
"threadname",
"==",
"\"MainThread\"",
":",
"logmsg",
"=",
"\"{0} {1:<112.112}\"",
".",
"format",
"(",
"time",
".",
"strftime",
"(",
"\"%T\"",
",",
"time",
".",
"localtime",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
",",
"\"{0}: {1}\"",
".",
"format",
"(",
"funcname",
",",
"msg",
")",
")",
"else",
":",
"logmsg",
"=",
"\"{0} {1:>112.112}\"",
".",
"format",
"(",
"time",
".",
"strftime",
"(",
"\"%T\"",
",",
"time",
".",
"localtime",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
",",
"\"{0}: {1}\"",
".",
"format",
"(",
"funcname",
",",
"msg",
")",
")",
"print",
"(",
"logmsg",
")"
] | Writes a formatted log message with a timestamp to stdout. | [
"Writes",
"a",
"formatted",
"log",
"message",
"with",
"a",
"timestamp",
"to",
"stdout",
"."
] | b1aad1c7f034509c40d9ab17d59be32e809bd31d | https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/examples/threading_agent.py#L70-L104 | train |
pief/python-netsnmpagent | examples/threading_agent.py | UpdateSNMPObjs | def UpdateSNMPObjs():
""" Function that does the actual data update. """
global threadingString
LogMsg("Beginning data update.")
data = ""
# Obtain the data by calling an external command. We don't use
# subprocess.check_output() here for compatibility with Python versions
# older than 2.7.
LogMsg("Calling external command \"sleep 5; date\".")
proc = subprocess.Popen(
"sleep 5; date", shell=True, env={ "LANG": "C" },
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = proc.communicate()[0].splitlines()[0]
rc = proc.poll()
if rc != 0:
LogMsg("An error occured executing the command: {0}".format(output))
return
msg = "Updating \"threadingString\" object with data \"{0}\"."
LogMsg(msg.format(output))
threadingString.update(output)
LogMsg("Data update done, exiting thread.") | python | def UpdateSNMPObjs():
""" Function that does the actual data update. """
global threadingString
LogMsg("Beginning data update.")
data = ""
# Obtain the data by calling an external command. We don't use
# subprocess.check_output() here for compatibility with Python versions
# older than 2.7.
LogMsg("Calling external command \"sleep 5; date\".")
proc = subprocess.Popen(
"sleep 5; date", shell=True, env={ "LANG": "C" },
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = proc.communicate()[0].splitlines()[0]
rc = proc.poll()
if rc != 0:
LogMsg("An error occured executing the command: {0}".format(output))
return
msg = "Updating \"threadingString\" object with data \"{0}\"."
LogMsg(msg.format(output))
threadingString.update(output)
LogMsg("Data update done, exiting thread.") | [
"def",
"UpdateSNMPObjs",
"(",
")",
":",
"global",
"threadingString",
"LogMsg",
"(",
"\"Beginning data update.\"",
")",
"data",
"=",
"\"\"",
"# Obtain the data by calling an external command. We don't use",
"# subprocess.check_output() here for compatibility with Python versions",
"# older than 2.7.",
"LogMsg",
"(",
"\"Calling external command \\\"sleep 5; date\\\".\"",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"\"sleep 5; date\"",
",",
"shell",
"=",
"True",
",",
"env",
"=",
"{",
"\"LANG\"",
":",
"\"C\"",
"}",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"output",
"=",
"proc",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"rc",
"=",
"proc",
".",
"poll",
"(",
")",
"if",
"rc",
"!=",
"0",
":",
"LogMsg",
"(",
"\"An error occured executing the command: {0}\"",
".",
"format",
"(",
"output",
")",
")",
"return",
"msg",
"=",
"\"Updating \\\"threadingString\\\" object with data \\\"{0}\\\".\"",
"LogMsg",
"(",
"msg",
".",
"format",
"(",
"output",
")",
")",
"threadingString",
".",
"update",
"(",
"output",
")",
"LogMsg",
"(",
"\"Data update done, exiting thread.\"",
")"
] | Function that does the actual data update. | [
"Function",
"that",
"does",
"the",
"actual",
"data",
"update",
"."
] | b1aad1c7f034509c40d9ab17d59be32e809bd31d | https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/examples/threading_agent.py#L131-L157 | train |
pief/python-netsnmpagent | netsnmpagent.py | netsnmpAgent.getRegistered | def getRegistered(self, context = ""):
""" Returns a dictionary with the currently registered SNMP objects.
Returned is a dictionary objects for the specified "context",
which defaults to the default context. """
myobjs = {}
try:
# Python 2.x
objs_iterator = self._objs[context].iteritems()
except AttributeError:
# Python 3.x
objs_iterator = self._objs[context].items()
for oidstr, snmpobj in objs_iterator:
myobjs[oidstr] = {
"type": type(snmpobj).__name__,
"value": snmpobj.value()
}
return dict(myobjs) | python | def getRegistered(self, context = ""):
""" Returns a dictionary with the currently registered SNMP objects.
Returned is a dictionary objects for the specified "context",
which defaults to the default context. """
myobjs = {}
try:
# Python 2.x
objs_iterator = self._objs[context].iteritems()
except AttributeError:
# Python 3.x
objs_iterator = self._objs[context].items()
for oidstr, snmpobj in objs_iterator:
myobjs[oidstr] = {
"type": type(snmpobj).__name__,
"value": snmpobj.value()
}
return dict(myobjs) | [
"def",
"getRegistered",
"(",
"self",
",",
"context",
"=",
"\"\"",
")",
":",
"myobjs",
"=",
"{",
"}",
"try",
":",
"# Python 2.x",
"objs_iterator",
"=",
"self",
".",
"_objs",
"[",
"context",
"]",
".",
"iteritems",
"(",
")",
"except",
"AttributeError",
":",
"# Python 3.x",
"objs_iterator",
"=",
"self",
".",
"_objs",
"[",
"context",
"]",
".",
"items",
"(",
")",
"for",
"oidstr",
",",
"snmpobj",
"in",
"objs_iterator",
":",
"myobjs",
"[",
"oidstr",
"]",
"=",
"{",
"\"type\"",
":",
"type",
"(",
"snmpobj",
")",
".",
"__name__",
",",
"\"value\"",
":",
"snmpobj",
".",
"value",
"(",
")",
"}",
"return",
"dict",
"(",
"myobjs",
")"
] | Returns a dictionary with the currently registered SNMP objects.
Returned is a dictionary objects for the specified "context",
which defaults to the default context. | [
"Returns",
"a",
"dictionary",
"with",
"the",
"currently",
"registered",
"SNMP",
"objects",
"."
] | b1aad1c7f034509c40d9ab17d59be32e809bd31d | https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/netsnmpagent.py#L696-L713 | train |
pief/python-netsnmpagent | netsnmpagent.py | netsnmpAgent.start | def start(self):
""" Starts the agent. Among other things, this means connecting
to the master agent, if configured that way. """
if self._status != netsnmpAgentStatus.CONNECTED \
and self._status != netsnmpAgentStatus.RECONNECTING:
self._status = netsnmpAgentStatus.FIRSTCONNECT
libnsa.init_snmp(b(self.AgentName))
if self._status == netsnmpAgentStatus.CONNECTFAILED:
msg = "Error connecting to snmpd instance at \"{0}\" -- " \
"incorrect \"MasterSocket\" or snmpd not running?"
msg = msg.format(self.MasterSocket)
raise netsnmpAgentException(msg) | python | def start(self):
""" Starts the agent. Among other things, this means connecting
to the master agent, if configured that way. """
if self._status != netsnmpAgentStatus.CONNECTED \
and self._status != netsnmpAgentStatus.RECONNECTING:
self._status = netsnmpAgentStatus.FIRSTCONNECT
libnsa.init_snmp(b(self.AgentName))
if self._status == netsnmpAgentStatus.CONNECTFAILED:
msg = "Error connecting to snmpd instance at \"{0}\" -- " \
"incorrect \"MasterSocket\" or snmpd not running?"
msg = msg.format(self.MasterSocket)
raise netsnmpAgentException(msg) | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"_status",
"!=",
"netsnmpAgentStatus",
".",
"CONNECTED",
"and",
"self",
".",
"_status",
"!=",
"netsnmpAgentStatus",
".",
"RECONNECTING",
":",
"self",
".",
"_status",
"=",
"netsnmpAgentStatus",
".",
"FIRSTCONNECT",
"libnsa",
".",
"init_snmp",
"(",
"b",
"(",
"self",
".",
"AgentName",
")",
")",
"if",
"self",
".",
"_status",
"==",
"netsnmpAgentStatus",
".",
"CONNECTFAILED",
":",
"msg",
"=",
"\"Error connecting to snmpd instance at \\\"{0}\\\" -- \"",
"\"incorrect \\\"MasterSocket\\\" or snmpd not running?\"",
"msg",
"=",
"msg",
".",
"format",
"(",
"self",
".",
"MasterSocket",
")",
"raise",
"netsnmpAgentException",
"(",
"msg",
")"
] | Starts the agent. Among other things, this means connecting
to the master agent, if configured that way. | [
"Starts",
"the",
"agent",
".",
"Among",
"other",
"things",
"this",
"means",
"connecting",
"to",
"the",
"master",
"agent",
"if",
"configured",
"that",
"way",
"."
] | b1aad1c7f034509c40d9ab17d59be32e809bd31d | https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/netsnmpagent.py#L715-L726 | train |
tonycpsu/panwid | panwid/scroll.py | Scrollable._adjust_trim_top | def _adjust_trim_top(self, canv, size):
"""Adjust self._trim_top according to self._scroll_action"""
action = self._scroll_action
self._scroll_action = None
maxcol, maxrow = size
trim_top = self._trim_top
canv_rows = canv.rows()
if trim_top < 0:
# Negative trim_top values use bottom of canvas as reference
trim_top = canv_rows - maxrow + trim_top + 1
if canv_rows <= maxrow:
self._trim_top = 0 # Reset scroll position
return
def ensure_bounds(new_trim_top):
return max(0, min(canv_rows - maxrow, new_trim_top))
if action == SCROLL_LINE_UP:
self._trim_top = ensure_bounds(trim_top - 1)
elif action == SCROLL_LINE_DOWN:
self._trim_top = ensure_bounds(trim_top + 1)
elif action == SCROLL_PAGE_UP:
self._trim_top = ensure_bounds(trim_top - maxrow+1)
elif action == SCROLL_PAGE_DOWN:
self._trim_top = ensure_bounds(trim_top + maxrow-1)
elif action == SCROLL_TO_TOP:
self._trim_top = 0
elif action == SCROLL_TO_END:
self._trim_top = canv_rows - maxrow
else:
self._trim_top = ensure_bounds(trim_top)
# If the cursor was moved by the most recent keypress, adjust trim_top
# so that the new cursor position is within the displayed canvas part.
# But don't do this if the cursor is at the top/bottom edge so we can still scroll out
if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor:
self._old_cursor_coords = None
curscol, cursrow = canv.cursor
if cursrow < self._trim_top:
self._trim_top = cursrow
elif cursrow >= self._trim_top + maxrow:
self._trim_top = max(0, cursrow - maxrow + 1) | python | def _adjust_trim_top(self, canv, size):
"""Adjust self._trim_top according to self._scroll_action"""
action = self._scroll_action
self._scroll_action = None
maxcol, maxrow = size
trim_top = self._trim_top
canv_rows = canv.rows()
if trim_top < 0:
# Negative trim_top values use bottom of canvas as reference
trim_top = canv_rows - maxrow + trim_top + 1
if canv_rows <= maxrow:
self._trim_top = 0 # Reset scroll position
return
def ensure_bounds(new_trim_top):
return max(0, min(canv_rows - maxrow, new_trim_top))
if action == SCROLL_LINE_UP:
self._trim_top = ensure_bounds(trim_top - 1)
elif action == SCROLL_LINE_DOWN:
self._trim_top = ensure_bounds(trim_top + 1)
elif action == SCROLL_PAGE_UP:
self._trim_top = ensure_bounds(trim_top - maxrow+1)
elif action == SCROLL_PAGE_DOWN:
self._trim_top = ensure_bounds(trim_top + maxrow-1)
elif action == SCROLL_TO_TOP:
self._trim_top = 0
elif action == SCROLL_TO_END:
self._trim_top = canv_rows - maxrow
else:
self._trim_top = ensure_bounds(trim_top)
# If the cursor was moved by the most recent keypress, adjust trim_top
# so that the new cursor position is within the displayed canvas part.
# But don't do this if the cursor is at the top/bottom edge so we can still scroll out
if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor:
self._old_cursor_coords = None
curscol, cursrow = canv.cursor
if cursrow < self._trim_top:
self._trim_top = cursrow
elif cursrow >= self._trim_top + maxrow:
self._trim_top = max(0, cursrow - maxrow + 1) | [
"def",
"_adjust_trim_top",
"(",
"self",
",",
"canv",
",",
"size",
")",
":",
"action",
"=",
"self",
".",
"_scroll_action",
"self",
".",
"_scroll_action",
"=",
"None",
"maxcol",
",",
"maxrow",
"=",
"size",
"trim_top",
"=",
"self",
".",
"_trim_top",
"canv_rows",
"=",
"canv",
".",
"rows",
"(",
")",
"if",
"trim_top",
"<",
"0",
":",
"# Negative trim_top values use bottom of canvas as reference",
"trim_top",
"=",
"canv_rows",
"-",
"maxrow",
"+",
"trim_top",
"+",
"1",
"if",
"canv_rows",
"<=",
"maxrow",
":",
"self",
".",
"_trim_top",
"=",
"0",
"# Reset scroll position",
"return",
"def",
"ensure_bounds",
"(",
"new_trim_top",
")",
":",
"return",
"max",
"(",
"0",
",",
"min",
"(",
"canv_rows",
"-",
"maxrow",
",",
"new_trim_top",
")",
")",
"if",
"action",
"==",
"SCROLL_LINE_UP",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"-",
"1",
")",
"elif",
"action",
"==",
"SCROLL_LINE_DOWN",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"+",
"1",
")",
"elif",
"action",
"==",
"SCROLL_PAGE_UP",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"-",
"maxrow",
"+",
"1",
")",
"elif",
"action",
"==",
"SCROLL_PAGE_DOWN",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"+",
"maxrow",
"-",
"1",
")",
"elif",
"action",
"==",
"SCROLL_TO_TOP",
":",
"self",
".",
"_trim_top",
"=",
"0",
"elif",
"action",
"==",
"SCROLL_TO_END",
":",
"self",
".",
"_trim_top",
"=",
"canv_rows",
"-",
"maxrow",
"else",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
")",
"# If the cursor was moved by the most recent keypress, adjust trim_top",
"# so that the new cursor position is within the displayed canvas part.",
"# But don't do this if the cursor is at the top/bottom edge so we can still scroll out",
"if",
"self",
".",
"_old_cursor_coords",
"is",
"not",
"None",
"and",
"self",
".",
"_old_cursor_coords",
"!=",
"canv",
".",
"cursor",
":",
"self",
".",
"_old_cursor_coords",
"=",
"None",
"curscol",
",",
"cursrow",
"=",
"canv",
".",
"cursor",
"if",
"cursrow",
"<",
"self",
".",
"_trim_top",
":",
"self",
".",
"_trim_top",
"=",
"cursrow",
"elif",
"cursrow",
">=",
"self",
".",
"_trim_top",
"+",
"maxrow",
":",
"self",
".",
"_trim_top",
"=",
"max",
"(",
"0",
",",
"cursrow",
"-",
"maxrow",
"+",
"1",
")"
] | Adjust self._trim_top according to self._scroll_action | [
"Adjust",
"self",
".",
"_trim_top",
"according",
"to",
"self",
".",
"_scroll_action"
] | e83a1f612cf5c53de88a7180c1b84b3b7b85460a | https://github.com/tonycpsu/panwid/blob/e83a1f612cf5c53de88a7180c1b84b3b7b85460a/panwid/scroll.py#L177-L224 | train |
tonycpsu/panwid | panwid/scroll.py | Scrollable.rows_max | def rows_max(self, size=None, focus=False):
"""Return the number of rows for `size`
If `size` is not given, the currently rendered number of rows is returned.
"""
if size is not None:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
sizing = ow.sizing()
if FIXED in sizing:
self._rows_max_cached = ow.pack(ow_size, focus)[1]
elif FLOW in sizing:
self._rows_max_cached = ow.rows(ow_size, focus)
else:
raise RuntimeError('Not a flow/box widget: %r' % self._original_widget)
return self._rows_max_cached | python | def rows_max(self, size=None, focus=False):
"""Return the number of rows for `size`
If `size` is not given, the currently rendered number of rows is returned.
"""
if size is not None:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
sizing = ow.sizing()
if FIXED in sizing:
self._rows_max_cached = ow.pack(ow_size, focus)[1]
elif FLOW in sizing:
self._rows_max_cached = ow.rows(ow_size, focus)
else:
raise RuntimeError('Not a flow/box widget: %r' % self._original_widget)
return self._rows_max_cached | [
"def",
"rows_max",
"(",
"self",
",",
"size",
"=",
"None",
",",
"focus",
"=",
"False",
")",
":",
"if",
"size",
"is",
"not",
"None",
":",
"ow",
"=",
"self",
".",
"_original_widget",
"ow_size",
"=",
"self",
".",
"_get_original_widget_size",
"(",
"size",
")",
"sizing",
"=",
"ow",
".",
"sizing",
"(",
")",
"if",
"FIXED",
"in",
"sizing",
":",
"self",
".",
"_rows_max_cached",
"=",
"ow",
".",
"pack",
"(",
"ow_size",
",",
"focus",
")",
"[",
"1",
"]",
"elif",
"FLOW",
"in",
"sizing",
":",
"self",
".",
"_rows_max_cached",
"=",
"ow",
".",
"rows",
"(",
"ow_size",
",",
"focus",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Not a flow/box widget: %r'",
"%",
"self",
".",
"_original_widget",
")",
"return",
"self",
".",
"_rows_max_cached"
] | Return the number of rows for `size`
If `size` is not given, the currently rendered number of rows is returned. | [
"Return",
"the",
"number",
"of",
"rows",
"for",
"size"
] | e83a1f612cf5c53de88a7180c1b84b3b7b85460a | https://github.com/tonycpsu/panwid/blob/e83a1f612cf5c53de88a7180c1b84b3b7b85460a/panwid/scroll.py#L257-L272 | train |
tonycpsu/panwid | panwid/scroll.py | ScrollBar.scrolling_base_widget | def scrolling_base_widget(self):
"""Nearest `original_widget` that is compatible with the scrolling API"""
def orig_iter(w):
while hasattr(w, 'original_widget'):
w = w.original_widget
yield w
yield w
def is_scrolling_widget(w):
return hasattr(w, 'get_scrollpos') and hasattr(w, 'rows_max')
for w in orig_iter(self):
if is_scrolling_widget(w):
return w
raise ValueError('Not compatible to be wrapped by ScrollBar: %r' % w) | python | def scrolling_base_widget(self):
"""Nearest `original_widget` that is compatible with the scrolling API"""
def orig_iter(w):
while hasattr(w, 'original_widget'):
w = w.original_widget
yield w
yield w
def is_scrolling_widget(w):
return hasattr(w, 'get_scrollpos') and hasattr(w, 'rows_max')
for w in orig_iter(self):
if is_scrolling_widget(w):
return w
raise ValueError('Not compatible to be wrapped by ScrollBar: %r' % w) | [
"def",
"scrolling_base_widget",
"(",
"self",
")",
":",
"def",
"orig_iter",
"(",
"w",
")",
":",
"while",
"hasattr",
"(",
"w",
",",
"'original_widget'",
")",
":",
"w",
"=",
"w",
".",
"original_widget",
"yield",
"w",
"yield",
"w",
"def",
"is_scrolling_widget",
"(",
"w",
")",
":",
"return",
"hasattr",
"(",
"w",
",",
"'get_scrollpos'",
")",
"and",
"hasattr",
"(",
"w",
",",
"'rows_max'",
")",
"for",
"w",
"in",
"orig_iter",
"(",
"self",
")",
":",
"if",
"is_scrolling_widget",
"(",
"w",
")",
":",
"return",
"w",
"raise",
"ValueError",
"(",
"'Not compatible to be wrapped by ScrollBar: %r'",
"%",
"w",
")"
] | Nearest `original_widget` that is compatible with the scrolling API | [
"Nearest",
"original_widget",
"that",
"is",
"compatible",
"with",
"the",
"scrolling",
"API"
] | e83a1f612cf5c53de88a7180c1b84b3b7b85460a | https://github.com/tonycpsu/panwid/blob/e83a1f612cf5c53de88a7180c1b84b3b7b85460a/panwid/scroll.py#L389-L403 | train |
kyuupichan/aiorpcX | aiorpcx/curio.py | ignore_after | def ignore_after(seconds, coro=None, *args, timeout_result=None):
'''Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
'''
if coro:
return _ignore_after_func(seconds, False, coro, args, timeout_result)
return TimeoutAfter(seconds, ignore=True) | python | def ignore_after(seconds, coro=None, *args, timeout_result=None):
'''Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
'''
if coro:
return _ignore_after_func(seconds, False, coro, args, timeout_result)
return TimeoutAfter(seconds, ignore=True) | [
"def",
"ignore_after",
"(",
"seconds",
",",
"coro",
"=",
"None",
",",
"*",
"args",
",",
"timeout_result",
"=",
"None",
")",
":",
"if",
"coro",
":",
"return",
"_ignore_after_func",
"(",
"seconds",
",",
"False",
",",
"coro",
",",
"args",
",",
"timeout_result",
")",
"return",
"TimeoutAfter",
"(",
"seconds",
",",
"ignore",
"=",
"True",
")"
] | Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after(). | [
"Execute",
"the",
"specified",
"coroutine",
"and",
"return",
"its",
"result",
".",
"Issue",
"a",
"cancellation",
"request",
"after",
"seconds",
"have",
"elapsed",
".",
"When",
"a",
"timeout",
"occurs",
"no",
"exception",
"is",
"raised",
".",
"Instead",
"timeout_result",
"is",
"returned",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L392-L411 | train |
kyuupichan/aiorpcX | aiorpcx/curio.py | TaskGroup._add_task | def _add_task(self, task):
'''Add an already existing task to the task group.'''
if hasattr(task, '_task_group'):
raise RuntimeError('task is already part of a group')
if self._closed:
raise RuntimeError('task group is closed')
task._task_group = self
if task.done():
self._done.append(task)
else:
self._pending.add(task)
task.add_done_callback(self._on_done) | python | def _add_task(self, task):
'''Add an already existing task to the task group.'''
if hasattr(task, '_task_group'):
raise RuntimeError('task is already part of a group')
if self._closed:
raise RuntimeError('task group is closed')
task._task_group = self
if task.done():
self._done.append(task)
else:
self._pending.add(task)
task.add_done_callback(self._on_done) | [
"def",
"_add_task",
"(",
"self",
",",
"task",
")",
":",
"if",
"hasattr",
"(",
"task",
",",
"'_task_group'",
")",
":",
"raise",
"RuntimeError",
"(",
"'task is already part of a group'",
")",
"if",
"self",
".",
"_closed",
":",
"raise",
"RuntimeError",
"(",
"'task group is closed'",
")",
"task",
".",
"_task_group",
"=",
"self",
"if",
"task",
".",
"done",
"(",
")",
":",
"self",
".",
"_done",
".",
"append",
"(",
"task",
")",
"else",
":",
"self",
".",
"_pending",
".",
"add",
"(",
"task",
")",
"task",
".",
"add_done_callback",
"(",
"self",
".",
"_on_done",
")"
] | Add an already existing task to the task group. | [
"Add",
"an",
"already",
"existing",
"task",
"to",
"the",
"task",
"group",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L118-L129 | train |
kyuupichan/aiorpcX | aiorpcx/curio.py | TaskGroup.next_done | async def next_done(self):
'''Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator.
'''
if not self._done and self._pending:
self._done_event.clear()
await self._done_event.wait()
if self._done:
return self._done.popleft()
return None | python | async def next_done(self):
'''Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator.
'''
if not self._done and self._pending:
self._done_event.clear()
await self._done_event.wait()
if self._done:
return self._done.popleft()
return None | [
"async",
"def",
"next_done",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_done",
"and",
"self",
".",
"_pending",
":",
"self",
".",
"_done_event",
".",
"clear",
"(",
")",
"await",
"self",
".",
"_done_event",
".",
"wait",
"(",
")",
"if",
"self",
".",
"_done",
":",
"return",
"self",
".",
"_done",
".",
"popleft",
"(",
")",
"return",
"None"
] | Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator. | [
"Returns",
"the",
"next",
"completed",
"task",
".",
"Returns",
"None",
"if",
"no",
"more",
"tasks",
"remain",
".",
"A",
"TaskGroup",
"may",
"also",
"be",
"used",
"as",
"an",
"asynchronous",
"iterator",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L155-L164 | train |
kyuupichan/aiorpcX | aiorpcx/curio.py | TaskGroup.join | async def join(self):
'''Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running.
'''
def errored(task):
return not task.cancelled() and task.exception()
try:
if self._wait in (all, object):
while True:
task = await self.next_done()
if task is None:
return
if errored(task):
break
if self._wait is object:
if task.cancelled() or task.result() is not None:
return
else: # any
task = await self.next_done()
if task is None or not errored(task):
return
finally:
await self.cancel_remaining()
if errored(task):
raise task.exception() | python | async def join(self):
'''Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running.
'''
def errored(task):
return not task.cancelled() and task.exception()
try:
if self._wait in (all, object):
while True:
task = await self.next_done()
if task is None:
return
if errored(task):
break
if self._wait is object:
if task.cancelled() or task.result() is not None:
return
else: # any
task = await self.next_done()
if task is None or not errored(task):
return
finally:
await self.cancel_remaining()
if errored(task):
raise task.exception() | [
"async",
"def",
"join",
"(",
"self",
")",
":",
"def",
"errored",
"(",
"task",
")",
":",
"return",
"not",
"task",
".",
"cancelled",
"(",
")",
"and",
"task",
".",
"exception",
"(",
")",
"try",
":",
"if",
"self",
".",
"_wait",
"in",
"(",
"all",
",",
"object",
")",
":",
"while",
"True",
":",
"task",
"=",
"await",
"self",
".",
"next_done",
"(",
")",
"if",
"task",
"is",
"None",
":",
"return",
"if",
"errored",
"(",
"task",
")",
":",
"break",
"if",
"self",
".",
"_wait",
"is",
"object",
":",
"if",
"task",
".",
"cancelled",
"(",
")",
"or",
"task",
".",
"result",
"(",
")",
"is",
"not",
"None",
":",
"return",
"else",
":",
"# any",
"task",
"=",
"await",
"self",
".",
"next_done",
"(",
")",
"if",
"task",
"is",
"None",
"or",
"not",
"errored",
"(",
"task",
")",
":",
"return",
"finally",
":",
"await",
"self",
".",
"cancel_remaining",
"(",
")",
"if",
"errored",
"(",
"task",
")",
":",
"raise",
"task",
".",
"exception",
"(",
")"
] | Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running. | [
"Wait",
"for",
"tasks",
"in",
"the",
"group",
"to",
"terminate",
"according",
"to",
"the",
"wait",
"policy",
"for",
"the",
"group",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L176-L211 | train |
kyuupichan/aiorpcX | aiorpcx/curio.py | TaskGroup.cancel_remaining | async def cancel_remaining(self):
'''Cancel all remaining tasks.'''
self._closed = True
task_list = list(self._pending)
for task in task_list:
task.cancel()
for task in task_list:
with suppress(CancelledError):
await task | python | async def cancel_remaining(self):
'''Cancel all remaining tasks.'''
self._closed = True
task_list = list(self._pending)
for task in task_list:
task.cancel()
for task in task_list:
with suppress(CancelledError):
await task | [
"async",
"def",
"cancel_remaining",
"(",
"self",
")",
":",
"self",
".",
"_closed",
"=",
"True",
"task_list",
"=",
"list",
"(",
"self",
".",
"_pending",
")",
"for",
"task",
"in",
"task_list",
":",
"task",
".",
"cancel",
"(",
")",
"for",
"task",
"in",
"task_list",
":",
"with",
"suppress",
"(",
"CancelledError",
")",
":",
"await",
"task"
] | Cancel all remaining tasks. | [
"Cancel",
"all",
"remaining",
"tasks",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L213-L221 | train |
kyuupichan/aiorpcX | aiorpcx/socks.py | SOCKSProxy._connect_one | async def _connect_one(self, remote_address):
'''Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
'''
loop = asyncio.get_event_loop()
for info in await loop.getaddrinfo(str(self.address.host), self.address.port,
type=socket.SOCK_STREAM):
# This object has state so is only good for one connection
client = self.protocol(remote_address, self.auth)
sock = socket.socket(family=info[0])
try:
# A non-blocking socket is required by loop socket methods
sock.setblocking(False)
await loop.sock_connect(sock, info[4])
await self._handshake(client, sock, loop)
self.peername = sock.getpeername()
return sock
except (OSError, SOCKSProtocolError) as e:
exception = e
# Don't close the socket because of an asyncio bug
# see https://github.com/kyuupichan/aiorpcX/issues/8
return exception | python | async def _connect_one(self, remote_address):
'''Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
'''
loop = asyncio.get_event_loop()
for info in await loop.getaddrinfo(str(self.address.host), self.address.port,
type=socket.SOCK_STREAM):
# This object has state so is only good for one connection
client = self.protocol(remote_address, self.auth)
sock = socket.socket(family=info[0])
try:
# A non-blocking socket is required by loop socket methods
sock.setblocking(False)
await loop.sock_connect(sock, info[4])
await self._handshake(client, sock, loop)
self.peername = sock.getpeername()
return sock
except (OSError, SOCKSProtocolError) as e:
exception = e
# Don't close the socket because of an asyncio bug
# see https://github.com/kyuupichan/aiorpcX/issues/8
return exception | [
"async",
"def",
"_connect_one",
"(",
"self",
",",
"remote_address",
")",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"for",
"info",
"in",
"await",
"loop",
".",
"getaddrinfo",
"(",
"str",
"(",
"self",
".",
"address",
".",
"host",
")",
",",
"self",
".",
"address",
".",
"port",
",",
"type",
"=",
"socket",
".",
"SOCK_STREAM",
")",
":",
"# This object has state so is only good for one connection",
"client",
"=",
"self",
".",
"protocol",
"(",
"remote_address",
",",
"self",
".",
"auth",
")",
"sock",
"=",
"socket",
".",
"socket",
"(",
"family",
"=",
"info",
"[",
"0",
"]",
")",
"try",
":",
"# A non-blocking socket is required by loop socket methods",
"sock",
".",
"setblocking",
"(",
"False",
")",
"await",
"loop",
".",
"sock_connect",
"(",
"sock",
",",
"info",
"[",
"4",
"]",
")",
"await",
"self",
".",
"_handshake",
"(",
"client",
",",
"sock",
",",
"loop",
")",
"self",
".",
"peername",
"=",
"sock",
".",
"getpeername",
"(",
")",
"return",
"sock",
"except",
"(",
"OSError",
",",
"SOCKSProtocolError",
")",
"as",
"e",
":",
"exception",
"=",
"e",
"# Don't close the socket because of an asyncio bug",
"# see https://github.com/kyuupichan/aiorpcX/issues/8",
"return",
"exception"
] | Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure. | [
"Connect",
"to",
"the",
"proxy",
"and",
"perform",
"a",
"handshake",
"requesting",
"a",
"connection",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L300-L323 | train |
kyuupichan/aiorpcX | aiorpcx/socks.py | SOCKSProxy._connect | async def _connect(self, remote_addresses):
'''Connect to the proxy and perform a handshake requesting a connection to each address in
addresses.
Return an (open_socket, remote_address) pair on success.
'''
assert remote_addresses
exceptions = []
for remote_address in remote_addresses:
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
return sock, remote_address
exceptions.append(sock)
strings = set(f'{exc!r}' for exc in exceptions)
raise (exceptions[0] if len(strings) == 1 else
OSError(f'multiple exceptions: {", ".join(strings)}')) | python | async def _connect(self, remote_addresses):
'''Connect to the proxy and perform a handshake requesting a connection to each address in
addresses.
Return an (open_socket, remote_address) pair on success.
'''
assert remote_addresses
exceptions = []
for remote_address in remote_addresses:
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
return sock, remote_address
exceptions.append(sock)
strings = set(f'{exc!r}' for exc in exceptions)
raise (exceptions[0] if len(strings) == 1 else
OSError(f'multiple exceptions: {", ".join(strings)}')) | [
"async",
"def",
"_connect",
"(",
"self",
",",
"remote_addresses",
")",
":",
"assert",
"remote_addresses",
"exceptions",
"=",
"[",
"]",
"for",
"remote_address",
"in",
"remote_addresses",
":",
"sock",
"=",
"await",
"self",
".",
"_connect_one",
"(",
"remote_address",
")",
"if",
"isinstance",
"(",
"sock",
",",
"socket",
".",
"socket",
")",
":",
"return",
"sock",
",",
"remote_address",
"exceptions",
".",
"append",
"(",
"sock",
")",
"strings",
"=",
"set",
"(",
"f'{exc!r}'",
"for",
"exc",
"in",
"exceptions",
")",
"raise",
"(",
"exceptions",
"[",
"0",
"]",
"if",
"len",
"(",
"strings",
")",
"==",
"1",
"else",
"OSError",
"(",
"f'multiple exceptions: {\", \".join(strings)}'",
")",
")"
] | Connect to the proxy and perform a handshake requesting a connection to each address in
addresses.
Return an (open_socket, remote_address) pair on success. | [
"Connect",
"to",
"the",
"proxy",
"and",
"perform",
"a",
"handshake",
"requesting",
"a",
"connection",
"to",
"each",
"address",
"in",
"addresses",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L325-L342 | train |
kyuupichan/aiorpcX | aiorpcx/socks.py | SOCKSProxy._detect_proxy | async def _detect_proxy(self):
'''Return True if it appears we can connect to a SOCKS proxy,
otherwise False.
'''
if self.protocol is SOCKS4a:
remote_address = NetAddress('www.apple.com', 80)
else:
remote_address = NetAddress('8.8.8.8', 53)
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
sock.close()
return True
# SOCKSFailure indicates something failed, but that we are likely talking to a
# proxy
return isinstance(sock, SOCKSFailure) | python | async def _detect_proxy(self):
'''Return True if it appears we can connect to a SOCKS proxy,
otherwise False.
'''
if self.protocol is SOCKS4a:
remote_address = NetAddress('www.apple.com', 80)
else:
remote_address = NetAddress('8.8.8.8', 53)
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
sock.close()
return True
# SOCKSFailure indicates something failed, but that we are likely talking to a
# proxy
return isinstance(sock, SOCKSFailure) | [
"async",
"def",
"_detect_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"protocol",
"is",
"SOCKS4a",
":",
"remote_address",
"=",
"NetAddress",
"(",
"'www.apple.com'",
",",
"80",
")",
"else",
":",
"remote_address",
"=",
"NetAddress",
"(",
"'8.8.8.8'",
",",
"53",
")",
"sock",
"=",
"await",
"self",
".",
"_connect_one",
"(",
"remote_address",
")",
"if",
"isinstance",
"(",
"sock",
",",
"socket",
".",
"socket",
")",
":",
"sock",
".",
"close",
"(",
")",
"return",
"True",
"# SOCKSFailure indicates something failed, but that we are likely talking to a",
"# proxy",
"return",
"isinstance",
"(",
"sock",
",",
"SOCKSFailure",
")"
] | Return True if it appears we can connect to a SOCKS proxy,
otherwise False. | [
"Return",
"True",
"if",
"it",
"appears",
"we",
"can",
"connect",
"to",
"a",
"SOCKS",
"proxy",
"otherwise",
"False",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L344-L360 | train |
kyuupichan/aiorpcX | aiorpcx/socks.py | SOCKSProxy.auto_detect_at_host | async def auto_detect_at_host(cls, host, ports, auth):
'''Try to detect a SOCKS proxy on a host on one of the ports.
Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not
mean it is functioning - for example, it may have no network connectivity.
If no proxy is detected return None.
'''
for port in ports:
proxy = await cls.auto_detect_at_address(NetAddress(host, port), auth)
if proxy:
return proxy
return None | python | async def auto_detect_at_host(cls, host, ports, auth):
'''Try to detect a SOCKS proxy on a host on one of the ports.
Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not
mean it is functioning - for example, it may have no network connectivity.
If no proxy is detected return None.
'''
for port in ports:
proxy = await cls.auto_detect_at_address(NetAddress(host, port), auth)
if proxy:
return proxy
return None | [
"async",
"def",
"auto_detect_at_host",
"(",
"cls",
",",
"host",
",",
"ports",
",",
"auth",
")",
":",
"for",
"port",
"in",
"ports",
":",
"proxy",
"=",
"await",
"cls",
".",
"auto_detect_at_address",
"(",
"NetAddress",
"(",
"host",
",",
"port",
")",
",",
"auth",
")",
"if",
"proxy",
":",
"return",
"proxy",
"return",
"None"
] | Try to detect a SOCKS proxy on a host on one of the ports.
Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not
mean it is functioning - for example, it may have no network connectivity.
If no proxy is detected return None. | [
"Try",
"to",
"detect",
"a",
"SOCKS",
"proxy",
"on",
"a",
"host",
"on",
"one",
"of",
"the",
"ports",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L380-L393 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | Connector.create_connection | async def create_connection(self):
'''Initiate a connection.'''
connector = self.proxy or self.loop
return await connector.create_connection(
self.session_factory, self.host, self.port, **self.kwargs) | python | async def create_connection(self):
'''Initiate a connection.'''
connector = self.proxy or self.loop
return await connector.create_connection(
self.session_factory, self.host, self.port, **self.kwargs) | [
"async",
"def",
"create_connection",
"(",
"self",
")",
":",
"connector",
"=",
"self",
".",
"proxy",
"or",
"self",
".",
"loop",
"return",
"await",
"connector",
".",
"create_connection",
"(",
"self",
".",
"session_factory",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"*",
"*",
"self",
".",
"kwargs",
")"
] | Initiate a connection. | [
"Initiate",
"a",
"connection",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L73-L77 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | SessionBase.data_received | def data_received(self, framed_message):
'''Called by asyncio when a message comes in.'''
if self.verbosity >= 4:
self.logger.debug(f'Received framed message {framed_message}')
self.recv_size += len(framed_message)
self.bump_cost(len(framed_message) * self.bw_cost_per_byte)
self.framer.received_bytes(framed_message) | python | def data_received(self, framed_message):
'''Called by asyncio when a message comes in.'''
if self.verbosity >= 4:
self.logger.debug(f'Received framed message {framed_message}')
self.recv_size += len(framed_message)
self.bump_cost(len(framed_message) * self.bw_cost_per_byte)
self.framer.received_bytes(framed_message) | [
"def",
"data_received",
"(",
"self",
",",
"framed_message",
")",
":",
"if",
"self",
".",
"verbosity",
">=",
"4",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f'Received framed message {framed_message}'",
")",
"self",
".",
"recv_size",
"+=",
"len",
"(",
"framed_message",
")",
"self",
".",
"bump_cost",
"(",
"len",
"(",
"framed_message",
")",
"*",
"self",
".",
"bw_cost_per_byte",
")",
"self",
".",
"framer",
".",
"received_bytes",
"(",
"framed_message",
")"
] | Called by asyncio when a message comes in. | [
"Called",
"by",
"asyncio",
"when",
"a",
"message",
"comes",
"in",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L224-L230 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | SessionBase.pause_writing | def pause_writing(self):
'''Transport calls when the send buffer is full.'''
if not self.is_closing():
self._can_send.clear()
self.transport.pause_reading() | python | def pause_writing(self):
'''Transport calls when the send buffer is full.'''
if not self.is_closing():
self._can_send.clear()
self.transport.pause_reading() | [
"def",
"pause_writing",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_closing",
"(",
")",
":",
"self",
".",
"_can_send",
".",
"clear",
"(",
")",
"self",
".",
"transport",
".",
"pause_reading",
"(",
")"
] | Transport calls when the send buffer is full. | [
"Transport",
"calls",
"when",
"the",
"send",
"buffer",
"is",
"full",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L232-L236 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | SessionBase.resume_writing | def resume_writing(self):
'''Transport calls when the send buffer has room.'''
if not self._can_send.is_set():
self._can_send.set()
self.transport.resume_reading() | python | def resume_writing(self):
'''Transport calls when the send buffer has room.'''
if not self._can_send.is_set():
self._can_send.set()
self.transport.resume_reading() | [
"def",
"resume_writing",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_can_send",
".",
"is_set",
"(",
")",
":",
"self",
".",
"_can_send",
".",
"set",
"(",
")",
"self",
".",
"transport",
".",
"resume_reading",
"(",
")"
] | Transport calls when the send buffer has room. | [
"Transport",
"calls",
"when",
"the",
"send",
"buffer",
"has",
"room",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L238-L242 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | SessionBase.connection_made | def connection_made(self, transport):
'''Called by asyncio when a connection is established.
Derived classes overriding this method must call this first.'''
self.transport = transport
# If the Socks proxy was used then _proxy and _remote_address are already set
if self._proxy is None:
# This would throw if called on a closed SSL transport. Fixed in asyncio in
# Python 3.6.1 and 3.5.4
peername = transport.get_extra_info('peername')
self._remote_address = NetAddress(peername[0], peername[1])
self._task = spawn_sync(self._process_messages(), loop=self.loop) | python | def connection_made(self, transport):
'''Called by asyncio when a connection is established.
Derived classes overriding this method must call this first.'''
self.transport = transport
# If the Socks proxy was used then _proxy and _remote_address are already set
if self._proxy is None:
# This would throw if called on a closed SSL transport. Fixed in asyncio in
# Python 3.6.1 and 3.5.4
peername = transport.get_extra_info('peername')
self._remote_address = NetAddress(peername[0], peername[1])
self._task = spawn_sync(self._process_messages(), loop=self.loop) | [
"def",
"connection_made",
"(",
"self",
",",
"transport",
")",
":",
"self",
".",
"transport",
"=",
"transport",
"# If the Socks proxy was used then _proxy and _remote_address are already set",
"if",
"self",
".",
"_proxy",
"is",
"None",
":",
"# This would throw if called on a closed SSL transport. Fixed in asyncio in",
"# Python 3.6.1 and 3.5.4",
"peername",
"=",
"transport",
".",
"get_extra_info",
"(",
"'peername'",
")",
"self",
".",
"_remote_address",
"=",
"NetAddress",
"(",
"peername",
"[",
"0",
"]",
",",
"peername",
"[",
"1",
"]",
")",
"self",
".",
"_task",
"=",
"spawn_sync",
"(",
"self",
".",
"_process_messages",
"(",
")",
",",
"loop",
"=",
"self",
".",
"loop",
")"
] | Called by asyncio when a connection is established.
Derived classes overriding this method must call this first. | [
"Called",
"by",
"asyncio",
"when",
"a",
"connection",
"is",
"established",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L244-L255 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | SessionBase.connection_lost | def connection_lost(self, exc):
'''Called by asyncio when the connection closes.
Tear down things done in connection_made.'''
# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246
if self.transport:
self.transport = None
self.closed_event.set()
# Release waiting tasks
self._can_send.set()
# Cancelling directly leads to self-cancellation problems for member
# functions await-ing self.close()
self.loop.call_soon(self._task.cancel) | python | def connection_lost(self, exc):
'''Called by asyncio when the connection closes.
Tear down things done in connection_made.'''
# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246
if self.transport:
self.transport = None
self.closed_event.set()
# Release waiting tasks
self._can_send.set()
# Cancelling directly leads to self-cancellation problems for member
# functions await-ing self.close()
self.loop.call_soon(self._task.cancel) | [
"def",
"connection_lost",
"(",
"self",
",",
"exc",
")",
":",
"# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246",
"if",
"self",
".",
"transport",
":",
"self",
".",
"transport",
"=",
"None",
"self",
".",
"closed_event",
".",
"set",
"(",
")",
"# Release waiting tasks",
"self",
".",
"_can_send",
".",
"set",
"(",
")",
"# Cancelling directly leads to self-cancellation problems for member",
"# functions await-ing self.close()",
"self",
".",
"loop",
".",
"call_soon",
"(",
"self",
".",
"_task",
".",
"cancel",
")"
] | Called by asyncio when the connection closes.
Tear down things done in connection_made. | [
"Called",
"by",
"asyncio",
"when",
"the",
"connection",
"closes",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L257-L269 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | SessionBase.recalc_concurrency | def recalc_concurrency(self):
'''Call to recalculate sleeps and concurrency for the session. Called automatically if
cost has drifted significantly. Otherwise can be called at regular intervals if
desired.
'''
# Refund resource usage proportionally to elapsed time; the bump passed is negative
now = time.time()
self.cost = max(0, self.cost - (now - self._cost_time) * self.cost_decay_per_sec)
self._cost_time = now
self._cost_last = self.cost
# Setting cost_hard_limit <= 0 means to not limit concurrency
value = self._incoming_concurrency.max_concurrent
cost_soft_range = self.cost_hard_limit - self.cost_soft_limit
if cost_soft_range <= 0:
return
cost = self.cost + self.extra_cost()
self._cost_fraction = max(0.0, (cost - self.cost_soft_limit) / cost_soft_range)
target = max(0, ceil((1.0 - self._cost_fraction) * self.initial_concurrent))
if abs(target - value) > 1:
self.logger.info(f'changing task concurrency from {value} to {target}')
self._incoming_concurrency.set_target(target) | python | def recalc_concurrency(self):
'''Call to recalculate sleeps and concurrency for the session. Called automatically if
cost has drifted significantly. Otherwise can be called at regular intervals if
desired.
'''
# Refund resource usage proportionally to elapsed time; the bump passed is negative
now = time.time()
self.cost = max(0, self.cost - (now - self._cost_time) * self.cost_decay_per_sec)
self._cost_time = now
self._cost_last = self.cost
# Setting cost_hard_limit <= 0 means to not limit concurrency
value = self._incoming_concurrency.max_concurrent
cost_soft_range = self.cost_hard_limit - self.cost_soft_limit
if cost_soft_range <= 0:
return
cost = self.cost + self.extra_cost()
self._cost_fraction = max(0.0, (cost - self.cost_soft_limit) / cost_soft_range)
target = max(0, ceil((1.0 - self._cost_fraction) * self.initial_concurrent))
if abs(target - value) > 1:
self.logger.info(f'changing task concurrency from {value} to {target}')
self._incoming_concurrency.set_target(target) | [
"def",
"recalc_concurrency",
"(",
"self",
")",
":",
"# Refund resource usage proportionally to elapsed time; the bump passed is negative",
"now",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"cost",
"=",
"max",
"(",
"0",
",",
"self",
".",
"cost",
"-",
"(",
"now",
"-",
"self",
".",
"_cost_time",
")",
"*",
"self",
".",
"cost_decay_per_sec",
")",
"self",
".",
"_cost_time",
"=",
"now",
"self",
".",
"_cost_last",
"=",
"self",
".",
"cost",
"# Setting cost_hard_limit <= 0 means to not limit concurrency",
"value",
"=",
"self",
".",
"_incoming_concurrency",
".",
"max_concurrent",
"cost_soft_range",
"=",
"self",
".",
"cost_hard_limit",
"-",
"self",
".",
"cost_soft_limit",
"if",
"cost_soft_range",
"<=",
"0",
":",
"return",
"cost",
"=",
"self",
".",
"cost",
"+",
"self",
".",
"extra_cost",
"(",
")",
"self",
".",
"_cost_fraction",
"=",
"max",
"(",
"0.0",
",",
"(",
"cost",
"-",
"self",
".",
"cost_soft_limit",
")",
"/",
"cost_soft_range",
")",
"target",
"=",
"max",
"(",
"0",
",",
"ceil",
"(",
"(",
"1.0",
"-",
"self",
".",
"_cost_fraction",
")",
"*",
"self",
".",
"initial_concurrent",
")",
")",
"if",
"abs",
"(",
"target",
"-",
"value",
")",
">",
"1",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f'changing task concurrency from {value} to {target}'",
")",
"self",
".",
"_incoming_concurrency",
".",
"set_target",
"(",
"target",
")"
] | Call to recalculate sleeps and concurrency for the session. Called automatically if
cost has drifted significantly. Otherwise can be called at regular intervals if
desired. | [
"Call",
"to",
"recalculate",
"sleeps",
"and",
"concurrency",
"for",
"the",
"session",
".",
"Called",
"automatically",
"if",
"cost",
"has",
"drifted",
"significantly",
".",
"Otherwise",
"can",
"be",
"called",
"at",
"regular",
"intervals",
"if",
"desired",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L282-L305 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | SessionBase.close | async def close(self, *, force_after=30):
'''Close the connection and return when closed.'''
if self.transport:
self.transport.close()
try:
async with timeout_after(force_after):
await self.closed_event.wait()
except TaskTimeout:
self.abort()
await self.closed_event.wait() | python | async def close(self, *, force_after=30):
'''Close the connection and return when closed.'''
if self.transport:
self.transport.close()
try:
async with timeout_after(force_after):
await self.closed_event.wait()
except TaskTimeout:
self.abort()
await self.closed_event.wait() | [
"async",
"def",
"close",
"(",
"self",
",",
"*",
",",
"force_after",
"=",
"30",
")",
":",
"if",
"self",
".",
"transport",
":",
"self",
".",
"transport",
".",
"close",
"(",
")",
"try",
":",
"async",
"with",
"timeout_after",
"(",
"force_after",
")",
":",
"await",
"self",
".",
"closed_event",
".",
"wait",
"(",
")",
"except",
"TaskTimeout",
":",
"self",
".",
"abort",
"(",
")",
"await",
"self",
".",
"closed_event",
".",
"wait",
"(",
")"
] | Close the connection and return when closed. | [
"Close",
"the",
"connection",
"and",
"return",
"when",
"closed",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L342-L351 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | RPCSession.send_request | async def send_request(self, method, args=()):
'''Send an RPC request over the network.'''
message, event = self.connection.send_request(Request(method, args))
return await self._send_concurrent(message, event, 1) | python | async def send_request(self, method, args=()):
'''Send an RPC request over the network.'''
message, event = self.connection.send_request(Request(method, args))
return await self._send_concurrent(message, event, 1) | [
"async",
"def",
"send_request",
"(",
"self",
",",
"method",
",",
"args",
"=",
"(",
")",
")",
":",
"message",
",",
"event",
"=",
"self",
".",
"connection",
".",
"send_request",
"(",
"Request",
"(",
"method",
",",
"args",
")",
")",
"return",
"await",
"self",
".",
"_send_concurrent",
"(",
"message",
",",
"event",
",",
"1",
")"
] | Send an RPC request over the network. | [
"Send",
"an",
"RPC",
"request",
"over",
"the",
"network",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L631-L634 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | RPCSession.send_notification | async def send_notification(self, method, args=()):
'''Send an RPC notification over the network.'''
message = self.connection.send_notification(Notification(method, args))
await self._send_message(message) | python | async def send_notification(self, method, args=()):
'''Send an RPC notification over the network.'''
message = self.connection.send_notification(Notification(method, args))
await self._send_message(message) | [
"async",
"def",
"send_notification",
"(",
"self",
",",
"method",
",",
"args",
"=",
"(",
")",
")",
":",
"message",
"=",
"self",
".",
"connection",
".",
"send_notification",
"(",
"Notification",
"(",
"method",
",",
"args",
")",
")",
"await",
"self",
".",
"_send_message",
"(",
"message",
")"
] | Send an RPC notification over the network. | [
"Send",
"an",
"RPC",
"notification",
"over",
"the",
"network",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L636-L639 | train |
kyuupichan/aiorpcX | aiorpcx/session.py | Server.close | async def close(self):
'''Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections.
'''
if self.server:
self.server.close()
await self.server.wait_closed()
self.server = None | python | async def close(self):
'''Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections.
'''
if self.server:
self.server.close()
await self.server.wait_closed()
self.server = None | [
"async",
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"server",
":",
"self",
".",
"server",
".",
"close",
"(",
")",
"await",
"self",
".",
"server",
".",
"wait_closed",
"(",
")",
"self",
".",
"server",
"=",
"None"
] | Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections. | [
"Close",
"the",
"listening",
"socket",
".",
"This",
"does",
"not",
"close",
"any",
"ServerSession",
"objects",
"created",
"to",
"handle",
"incoming",
"connections",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L674-L681 | train |
kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPC._message_to_payload | def _message_to_payload(cls, message):
'''Returns a Python object or a ProtocolError.'''
try:
return json.loads(message.decode())
except UnicodeDecodeError:
message = 'messages must be encoded in UTF-8'
except json.JSONDecodeError:
message = 'invalid JSON'
raise cls._error(cls.PARSE_ERROR, message, True, None) | python | def _message_to_payload(cls, message):
'''Returns a Python object or a ProtocolError.'''
try:
return json.loads(message.decode())
except UnicodeDecodeError:
message = 'messages must be encoded in UTF-8'
except json.JSONDecodeError:
message = 'invalid JSON'
raise cls._error(cls.PARSE_ERROR, message, True, None) | [
"def",
"_message_to_payload",
"(",
"cls",
",",
"message",
")",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"message",
".",
"decode",
"(",
")",
")",
"except",
"UnicodeDecodeError",
":",
"message",
"=",
"'messages must be encoded in UTF-8'",
"except",
"json",
".",
"JSONDecodeError",
":",
"message",
"=",
"'invalid JSON'",
"raise",
"cls",
".",
"_error",
"(",
"cls",
".",
"PARSE_ERROR",
",",
"message",
",",
"True",
",",
"None",
")"
] | Returns a Python object or a ProtocolError. | [
"Returns",
"a",
"Python",
"object",
"or",
"a",
"ProtocolError",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L223-L231 | train |
kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPC.batch_message | def batch_message(cls, batch, request_ids):
'''Convert a request Batch to a message.'''
assert isinstance(batch, Batch)
if not cls.allow_batches:
raise ProtocolError.invalid_request(
'protocol does not permit batches')
id_iter = iter(request_ids)
rm = cls.request_message
nm = cls.notification_message
parts = (rm(request, next(id_iter)) if isinstance(request, Request)
else nm(request) for request in batch)
return cls.batch_message_from_parts(parts) | python | def batch_message(cls, batch, request_ids):
'''Convert a request Batch to a message.'''
assert isinstance(batch, Batch)
if not cls.allow_batches:
raise ProtocolError.invalid_request(
'protocol does not permit batches')
id_iter = iter(request_ids)
rm = cls.request_message
nm = cls.notification_message
parts = (rm(request, next(id_iter)) if isinstance(request, Request)
else nm(request) for request in batch)
return cls.batch_message_from_parts(parts) | [
"def",
"batch_message",
"(",
"cls",
",",
"batch",
",",
"request_ids",
")",
":",
"assert",
"isinstance",
"(",
"batch",
",",
"Batch",
")",
"if",
"not",
"cls",
".",
"allow_batches",
":",
"raise",
"ProtocolError",
".",
"invalid_request",
"(",
"'protocol does not permit batches'",
")",
"id_iter",
"=",
"iter",
"(",
"request_ids",
")",
"rm",
"=",
"cls",
".",
"request_message",
"nm",
"=",
"cls",
".",
"notification_message",
"parts",
"=",
"(",
"rm",
"(",
"request",
",",
"next",
"(",
"id_iter",
")",
")",
"if",
"isinstance",
"(",
"request",
",",
"Request",
")",
"else",
"nm",
"(",
"request",
")",
"for",
"request",
"in",
"batch",
")",
"return",
"cls",
".",
"batch_message_from_parts",
"(",
"parts",
")"
] | Convert a request Batch to a message. | [
"Convert",
"a",
"request",
"Batch",
"to",
"a",
"message",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L305-L316 | train |
kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPC.batch_message_from_parts | def batch_message_from_parts(cls, messages):
'''Convert messages, one per batch item, into a batch message. At
least one message must be passed.
'''
# Comma-separate the messages and wrap the lot in square brackets
middle = b', '.join(messages)
if not middle:
raise ProtocolError.empty_batch()
return b''.join([b'[', middle, b']']) | python | def batch_message_from_parts(cls, messages):
'''Convert messages, one per batch item, into a batch message. At
least one message must be passed.
'''
# Comma-separate the messages and wrap the lot in square brackets
middle = b', '.join(messages)
if not middle:
raise ProtocolError.empty_batch()
return b''.join([b'[', middle, b']']) | [
"def",
"batch_message_from_parts",
"(",
"cls",
",",
"messages",
")",
":",
"# Comma-separate the messages and wrap the lot in square brackets",
"middle",
"=",
"b', '",
".",
"join",
"(",
"messages",
")",
"if",
"not",
"middle",
":",
"raise",
"ProtocolError",
".",
"empty_batch",
"(",
")",
"return",
"b''",
".",
"join",
"(",
"[",
"b'['",
",",
"middle",
",",
"b']'",
"]",
")"
] | Convert messages, one per batch item, into a batch message. At
least one message must be passed. | [
"Convert",
"messages",
"one",
"per",
"batch",
"item",
"into",
"a",
"batch",
"message",
".",
"At",
"least",
"one",
"message",
"must",
"be",
"passed",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L319-L327 | train |
kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPC.encode_payload | def encode_payload(cls, payload):
'''Encode a Python object as JSON and convert it to bytes.'''
try:
return json.dumps(payload).encode()
except TypeError:
msg = f'JSON payload encoding error: {payload}'
raise ProtocolError(cls.INTERNAL_ERROR, msg) from None | python | def encode_payload(cls, payload):
'''Encode a Python object as JSON and convert it to bytes.'''
try:
return json.dumps(payload).encode()
except TypeError:
msg = f'JSON payload encoding error: {payload}'
raise ProtocolError(cls.INTERNAL_ERROR, msg) from None | [
"def",
"encode_payload",
"(",
"cls",
",",
"payload",
")",
":",
"try",
":",
"return",
"json",
".",
"dumps",
"(",
"payload",
")",
".",
"encode",
"(",
")",
"except",
"TypeError",
":",
"msg",
"=",
"f'JSON payload encoding error: {payload}'",
"raise",
"ProtocolError",
"(",
"cls",
".",
"INTERNAL_ERROR",
",",
"msg",
")",
"from",
"None"
] | Encode a Python object as JSON and convert it to bytes. | [
"Encode",
"a",
"Python",
"object",
"as",
"JSON",
"and",
"convert",
"it",
"to",
"bytes",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L330-L336 | train |
kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPCAutoDetect.detect_protocol | def detect_protocol(cls, message):
'''Attempt to detect the protocol from the message.'''
main = cls._message_to_payload(message)
def protocol_for_payload(payload):
if not isinstance(payload, dict):
return JSONRPCLoose # Will error
# Obey an explicit "jsonrpc"
version = payload.get('jsonrpc')
if version == '2.0':
return JSONRPCv2
if version == '1.0':
return JSONRPCv1
# Now to decide between JSONRPCLoose and JSONRPCv1 if possible
if 'result' in payload and 'error' in payload:
return JSONRPCv1
return JSONRPCLoose
if isinstance(main, list):
parts = set(protocol_for_payload(payload) for payload in main)
# If all same protocol, return it
if len(parts) == 1:
return parts.pop()
# If strict protocol detected, return it, preferring JSONRPCv2.
# This means a batch of JSONRPCv1 will fail
for protocol in (JSONRPCv2, JSONRPCv1):
if protocol in parts:
return protocol
# Will error if no parts
return JSONRPCLoose
return protocol_for_payload(main) | python | def detect_protocol(cls, message):
'''Attempt to detect the protocol from the message.'''
main = cls._message_to_payload(message)
def protocol_for_payload(payload):
if not isinstance(payload, dict):
return JSONRPCLoose # Will error
# Obey an explicit "jsonrpc"
version = payload.get('jsonrpc')
if version == '2.0':
return JSONRPCv2
if version == '1.0':
return JSONRPCv1
# Now to decide between JSONRPCLoose and JSONRPCv1 if possible
if 'result' in payload and 'error' in payload:
return JSONRPCv1
return JSONRPCLoose
if isinstance(main, list):
parts = set(protocol_for_payload(payload) for payload in main)
# If all same protocol, return it
if len(parts) == 1:
return parts.pop()
# If strict protocol detected, return it, preferring JSONRPCv2.
# This means a batch of JSONRPCv1 will fail
for protocol in (JSONRPCv2, JSONRPCv1):
if protocol in parts:
return protocol
# Will error if no parts
return JSONRPCLoose
return protocol_for_payload(main) | [
"def",
"detect_protocol",
"(",
"cls",
",",
"message",
")",
":",
"main",
"=",
"cls",
".",
"_message_to_payload",
"(",
"message",
")",
"def",
"protocol_for_payload",
"(",
"payload",
")",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"dict",
")",
":",
"return",
"JSONRPCLoose",
"# Will error",
"# Obey an explicit \"jsonrpc\"",
"version",
"=",
"payload",
".",
"get",
"(",
"'jsonrpc'",
")",
"if",
"version",
"==",
"'2.0'",
":",
"return",
"JSONRPCv2",
"if",
"version",
"==",
"'1.0'",
":",
"return",
"JSONRPCv1",
"# Now to decide between JSONRPCLoose and JSONRPCv1 if possible",
"if",
"'result'",
"in",
"payload",
"and",
"'error'",
"in",
"payload",
":",
"return",
"JSONRPCv1",
"return",
"JSONRPCLoose",
"if",
"isinstance",
"(",
"main",
",",
"list",
")",
":",
"parts",
"=",
"set",
"(",
"protocol_for_payload",
"(",
"payload",
")",
"for",
"payload",
"in",
"main",
")",
"# If all same protocol, return it",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"return",
"parts",
".",
"pop",
"(",
")",
"# If strict protocol detected, return it, preferring JSONRPCv2.",
"# This means a batch of JSONRPCv1 will fail",
"for",
"protocol",
"in",
"(",
"JSONRPCv2",
",",
"JSONRPCv1",
")",
":",
"if",
"protocol",
"in",
"parts",
":",
"return",
"protocol",
"# Will error if no parts",
"return",
"JSONRPCLoose",
"return",
"protocol_for_payload",
"(",
"main",
")"
] | Attempt to detect the protocol from the message. | [
"Attempt",
"to",
"detect",
"the",
"protocol",
"from",
"the",
"message",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L544-L576 | train |
kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPCConnection.receive_message | def receive_message(self, message):
'''Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error.
'''
if self._protocol is JSONRPCAutoDetect:
self._protocol = JSONRPCAutoDetect.detect_protocol(message)
try:
item, request_id = self._protocol.message_to_item(message)
except ProtocolError as e:
if e.response_msg_id is not id:
return self._receive_response(e, e.response_msg_id)
raise
if isinstance(item, Request):
item.send_result = partial(self._send_result, request_id)
return [item]
if isinstance(item, Notification):
return [item]
if isinstance(item, Response):
return self._receive_response(item.result, request_id)
assert isinstance(item, list)
if all(isinstance(payload, dict) and ('result' in payload or 'error' in payload)
for payload in item):
return self._receive_response_batch(item)
else:
return self._receive_request_batch(item) | python | def receive_message(self, message):
'''Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error.
'''
if self._protocol is JSONRPCAutoDetect:
self._protocol = JSONRPCAutoDetect.detect_protocol(message)
try:
item, request_id = self._protocol.message_to_item(message)
except ProtocolError as e:
if e.response_msg_id is not id:
return self._receive_response(e, e.response_msg_id)
raise
if isinstance(item, Request):
item.send_result = partial(self._send_result, request_id)
return [item]
if isinstance(item, Notification):
return [item]
if isinstance(item, Response):
return self._receive_response(item.result, request_id)
assert isinstance(item, list)
if all(isinstance(payload, dict) and ('result' in payload or 'error' in payload)
for payload in item):
return self._receive_response_batch(item)
else:
return self._receive_request_batch(item) | [
"def",
"receive_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"_protocol",
"is",
"JSONRPCAutoDetect",
":",
"self",
".",
"_protocol",
"=",
"JSONRPCAutoDetect",
".",
"detect_protocol",
"(",
"message",
")",
"try",
":",
"item",
",",
"request_id",
"=",
"self",
".",
"_protocol",
".",
"message_to_item",
"(",
"message",
")",
"except",
"ProtocolError",
"as",
"e",
":",
"if",
"e",
".",
"response_msg_id",
"is",
"not",
"id",
":",
"return",
"self",
".",
"_receive_response",
"(",
"e",
",",
"e",
".",
"response_msg_id",
")",
"raise",
"if",
"isinstance",
"(",
"item",
",",
"Request",
")",
":",
"item",
".",
"send_result",
"=",
"partial",
"(",
"self",
".",
"_send_result",
",",
"request_id",
")",
"return",
"[",
"item",
"]",
"if",
"isinstance",
"(",
"item",
",",
"Notification",
")",
":",
"return",
"[",
"item",
"]",
"if",
"isinstance",
"(",
"item",
",",
"Response",
")",
":",
"return",
"self",
".",
"_receive_response",
"(",
"item",
".",
"result",
",",
"request_id",
")",
"assert",
"isinstance",
"(",
"item",
",",
"list",
")",
"if",
"all",
"(",
"isinstance",
"(",
"payload",
",",
"dict",
")",
"and",
"(",
"'result'",
"in",
"payload",
"or",
"'error'",
"in",
"payload",
")",
"for",
"payload",
"in",
"item",
")",
":",
"return",
"self",
".",
"_receive_response_batch",
"(",
"item",
")",
"else",
":",
"return",
"self",
".",
"_receive_request_batch",
"(",
"item",
")"
] | Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error. | [
"Call",
"with",
"an",
"unframed",
"message",
"received",
"from",
"the",
"network",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L706-L737 | train |
kyuupichan/aiorpcX | aiorpcx/jsonrpc.py | JSONRPCConnection.cancel_pending_requests | def cancel_pending_requests(self):
'''Cancel all pending requests.'''
exception = CancelledError()
for _request, event in self._requests.values():
event.result = exception
event.set()
self._requests.clear() | python | def cancel_pending_requests(self):
'''Cancel all pending requests.'''
exception = CancelledError()
for _request, event in self._requests.values():
event.result = exception
event.set()
self._requests.clear() | [
"def",
"cancel_pending_requests",
"(",
"self",
")",
":",
"exception",
"=",
"CancelledError",
"(",
")",
"for",
"_request",
",",
"event",
"in",
"self",
".",
"_requests",
".",
"values",
"(",
")",
":",
"event",
".",
"result",
"=",
"exception",
"event",
".",
"set",
"(",
")",
"self",
".",
"_requests",
".",
"clear",
"(",
")"
] | Cancel all pending requests. | [
"Cancel",
"all",
"pending",
"requests",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L739-L745 | train |
kyuupichan/aiorpcX | aiorpcx/util.py | is_valid_hostname | def is_valid_hostname(hostname):
'''Return True if hostname is valid, otherwise False.'''
if not isinstance(hostname, str):
raise TypeError('hostname must be a string')
# strip exactly one dot from the right, if present
if hostname and hostname[-1] == ".":
hostname = hostname[:-1]
if not hostname or len(hostname) > 253:
return False
labels = hostname.split('.')
# the TLD must be not all-numeric
if re.match(NUMERIC_REGEX, labels[-1]):
return False
return all(LABEL_REGEX.match(label) for label in labels) | python | def is_valid_hostname(hostname):
'''Return True if hostname is valid, otherwise False.'''
if not isinstance(hostname, str):
raise TypeError('hostname must be a string')
# strip exactly one dot from the right, if present
if hostname and hostname[-1] == ".":
hostname = hostname[:-1]
if not hostname or len(hostname) > 253:
return False
labels = hostname.split('.')
# the TLD must be not all-numeric
if re.match(NUMERIC_REGEX, labels[-1]):
return False
return all(LABEL_REGEX.match(label) for label in labels) | [
"def",
"is_valid_hostname",
"(",
"hostname",
")",
":",
"if",
"not",
"isinstance",
"(",
"hostname",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'hostname must be a string'",
")",
"# strip exactly one dot from the right, if present",
"if",
"hostname",
"and",
"hostname",
"[",
"-",
"1",
"]",
"==",
"\".\"",
":",
"hostname",
"=",
"hostname",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"hostname",
"or",
"len",
"(",
"hostname",
")",
">",
"253",
":",
"return",
"False",
"labels",
"=",
"hostname",
".",
"split",
"(",
"'.'",
")",
"# the TLD must be not all-numeric",
"if",
"re",
".",
"match",
"(",
"NUMERIC_REGEX",
",",
"labels",
"[",
"-",
"1",
"]",
")",
":",
"return",
"False",
"return",
"all",
"(",
"LABEL_REGEX",
".",
"match",
"(",
"label",
")",
"for",
"label",
"in",
"labels",
")"
] | Return True if hostname is valid, otherwise False. | [
"Return",
"True",
"if",
"hostname",
"is",
"valid",
"otherwise",
"False",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L47-L60 | train |
kyuupichan/aiorpcX | aiorpcx/util.py | classify_host | def classify_host(host):
'''Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
'''
if isinstance(host, (IPv4Address, IPv6Address)):
return host
if is_valid_hostname(host):
return host
return ip_address(host) | python | def classify_host(host):
'''Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
'''
if isinstance(host, (IPv4Address, IPv6Address)):
return host
if is_valid_hostname(host):
return host
return ip_address(host) | [
"def",
"classify_host",
"(",
"host",
")",
":",
"if",
"isinstance",
"(",
"host",
",",
"(",
"IPv4Address",
",",
"IPv6Address",
")",
")",
":",
"return",
"host",
"if",
"is_valid_hostname",
"(",
"host",
")",
":",
"return",
"host",
"return",
"ip_address",
"(",
"host",
")"
] | Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname. | [
"Host",
"is",
"an",
"IPv4Address",
"IPv6Address",
"or",
"a",
"string",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L63-L77 | train |
kyuupichan/aiorpcX | aiorpcx/util.py | validate_port | def validate_port(port):
'''Validate port and return it as an integer.
A string, or its representation as an integer, is accepted.'''
if not isinstance(port, (str, int)):
raise TypeError(f'port must be an integer or string: {port}')
if isinstance(port, str) and port.isdigit():
port = int(port)
if isinstance(port, int) and 0 < port <= 65535:
return port
raise ValueError(f'invalid port: {port}') | python | def validate_port(port):
'''Validate port and return it as an integer.
A string, or its representation as an integer, is accepted.'''
if not isinstance(port, (str, int)):
raise TypeError(f'port must be an integer or string: {port}')
if isinstance(port, str) and port.isdigit():
port = int(port)
if isinstance(port, int) and 0 < port <= 65535:
return port
raise ValueError(f'invalid port: {port}') | [
"def",
"validate_port",
"(",
"port",
")",
":",
"if",
"not",
"isinstance",
"(",
"port",
",",
"(",
"str",
",",
"int",
")",
")",
":",
"raise",
"TypeError",
"(",
"f'port must be an integer or string: {port}'",
")",
"if",
"isinstance",
"(",
"port",
",",
"str",
")",
"and",
"port",
".",
"isdigit",
"(",
")",
":",
"port",
"=",
"int",
"(",
"port",
")",
"if",
"isinstance",
"(",
"port",
",",
"int",
")",
"and",
"0",
"<",
"port",
"<=",
"65535",
":",
"return",
"port",
"raise",
"ValueError",
"(",
"f'invalid port: {port}'",
")"
] | Validate port and return it as an integer.
A string, or its representation as an integer, is accepted. | [
"Validate",
"port",
"and",
"return",
"it",
"as",
"an",
"integer",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L80-L90 | train |
kyuupichan/aiorpcX | aiorpcx/util.py | validate_protocol | def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower() | python | def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower() | [
"def",
"validate_protocol",
"(",
"protocol",
")",
":",
"if",
"not",
"re",
".",
"match",
"(",
"PROTOCOL_REGEX",
",",
"protocol",
")",
":",
"raise",
"ValueError",
"(",
"f'invalid protocol: {protocol}'",
")",
"return",
"protocol",
".",
"lower",
"(",
")"
] | Validate a protocol, a string, and return it. | [
"Validate",
"a",
"protocol",
"a",
"string",
"and",
"return",
"it",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L93-L97 | train |
kyuupichan/aiorpcX | aiorpcx/util.py | is_async_call | def is_async_call(func):
'''inspect.iscoroutinefunction that looks through partials.'''
while isinstance(func, partial):
func = func.func
return inspect.iscoroutinefunction(func) | python | def is_async_call(func):
'''inspect.iscoroutinefunction that looks through partials.'''
while isinstance(func, partial):
func = func.func
return inspect.iscoroutinefunction(func) | [
"def",
"is_async_call",
"(",
"func",
")",
":",
"while",
"isinstance",
"(",
"func",
",",
"partial",
")",
":",
"func",
"=",
"func",
".",
"func",
"return",
"inspect",
".",
"iscoroutinefunction",
"(",
"func",
")"
] | inspect.iscoroutinefunction that looks through partials. | [
"inspect",
".",
"iscoroutinefunction",
"that",
"looks",
"through",
"partials",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L261-L265 | train |
kyuupichan/aiorpcX | aiorpcx/util.py | Service.from_string | def from_string(cls, string, *, default_func=None):
'''Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
'''
if not isinstance(string, str):
raise TypeError(f'service must be a string: {string}')
parts = string.split('://', 1)
if len(parts) == 2:
protocol, address = parts
else:
item, = parts
protocol = None
if default_func:
if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):
protocol, address = item, ''
else:
protocol, address = default_func(None, ServicePart.PROTOCOL), item
if not protocol:
raise ValueError(f'invalid service string: {string}')
if default_func:
default_func = partial(default_func, protocol.lower())
address = NetAddress.from_string(address, default_func=default_func)
return cls(protocol, address) | python | def from_string(cls, string, *, default_func=None):
'''Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
'''
if not isinstance(string, str):
raise TypeError(f'service must be a string: {string}')
parts = string.split('://', 1)
if len(parts) == 2:
protocol, address = parts
else:
item, = parts
protocol = None
if default_func:
if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):
protocol, address = item, ''
else:
protocol, address = default_func(None, ServicePart.PROTOCOL), item
if not protocol:
raise ValueError(f'invalid service string: {string}')
if default_func:
default_func = partial(default_func, protocol.lower())
address = NetAddress.from_string(address, default_func=default_func)
return cls(protocol, address) | [
"def",
"from_string",
"(",
"cls",
",",
"string",
",",
"*",
",",
"default_func",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"string",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"f'service must be a string: {string}'",
")",
"parts",
"=",
"string",
".",
"split",
"(",
"'://'",
",",
"1",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"protocol",
",",
"address",
"=",
"parts",
"else",
":",
"item",
",",
"=",
"parts",
"protocol",
"=",
"None",
"if",
"default_func",
":",
"if",
"default_func",
"(",
"item",
",",
"ServicePart",
".",
"HOST",
")",
"and",
"default_func",
"(",
"item",
",",
"ServicePart",
".",
"PORT",
")",
":",
"protocol",
",",
"address",
"=",
"item",
",",
"''",
"else",
":",
"protocol",
",",
"address",
"=",
"default_func",
"(",
"None",
",",
"ServicePart",
".",
"PROTOCOL",
")",
",",
"item",
"if",
"not",
"protocol",
":",
"raise",
"ValueError",
"(",
"f'invalid service string: {string}'",
")",
"if",
"default_func",
":",
"default_func",
"=",
"partial",
"(",
"default_func",
",",
"protocol",
".",
"lower",
"(",
")",
")",
"address",
"=",
"NetAddress",
".",
"from_string",
"(",
"address",
",",
"default_func",
"=",
"default_func",
")",
"return",
"cls",
"(",
"protocol",
",",
"address",
")"
] | Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part. | [
"Construct",
"a",
"Service",
"from",
"a",
"string",
"."
] | 707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0 | https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L218-L244 | train |
monarch-initiative/dipper | dipper/sources/OMIA.py | OMIA.scrub | def scrub(self):
"""
The XML file seems to have mixed-encoding;
we scrub out the control characters
from the file for processing.
i.e.?i
omia.xml:1555328.28: PCDATA invalid Char value 2
<field name="journal">Bulletin et Memoires de la Societe Centrale de Medic
:return:
"""
LOG.info("Scrubbing out the nasty characters that break our parser.")
myfile = '/'.join((self.rawdir, self.files['data']['file']))
tmpfile = '/'.join((self.rawdir, self.files['data']['file']+'.tmp.gz'))
tmp = gzip.open(tmpfile, 'wb')
du = DipperUtil()
with gzip.open(myfile, 'rb') as fh:
filereader = io.TextIOWrapper(fh, newline="")
for line in filereader:
line = du.remove_control_characters(line) + '\n'
tmp.write(line.encode('utf-8'))
tmp.close()
# TEC I do not like this at all. original data must be preserved as is.
# also may be heavy handed as chars which do not break the parser
# are stripped as well (i.e. tabs and newlines)
# move the temp file
LOG.info("Replacing the original data with the scrubbed file.")
shutil.move(tmpfile, myfile)
return | python | def scrub(self):
"""
The XML file seems to have mixed-encoding;
we scrub out the control characters
from the file for processing.
i.e.?i
omia.xml:1555328.28: PCDATA invalid Char value 2
<field name="journal">Bulletin et Memoires de la Societe Centrale de Medic
:return:
"""
LOG.info("Scrubbing out the nasty characters that break our parser.")
myfile = '/'.join((self.rawdir, self.files['data']['file']))
tmpfile = '/'.join((self.rawdir, self.files['data']['file']+'.tmp.gz'))
tmp = gzip.open(tmpfile, 'wb')
du = DipperUtil()
with gzip.open(myfile, 'rb') as fh:
filereader = io.TextIOWrapper(fh, newline="")
for line in filereader:
line = du.remove_control_characters(line) + '\n'
tmp.write(line.encode('utf-8'))
tmp.close()
# TEC I do not like this at all. original data must be preserved as is.
# also may be heavy handed as chars which do not break the parser
# are stripped as well (i.e. tabs and newlines)
# move the temp file
LOG.info("Replacing the original data with the scrubbed file.")
shutil.move(tmpfile, myfile)
return | [
"def",
"scrub",
"(",
"self",
")",
":",
"LOG",
".",
"info",
"(",
"\"Scrubbing out the nasty characters that break our parser.\"",
")",
"myfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'data'",
"]",
"[",
"'file'",
"]",
")",
")",
"tmpfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'data'",
"]",
"[",
"'file'",
"]",
"+",
"'.tmp.gz'",
")",
")",
"tmp",
"=",
"gzip",
".",
"open",
"(",
"tmpfile",
",",
"'wb'",
")",
"du",
"=",
"DipperUtil",
"(",
")",
"with",
"gzip",
".",
"open",
"(",
"myfile",
",",
"'rb'",
")",
"as",
"fh",
":",
"filereader",
"=",
"io",
".",
"TextIOWrapper",
"(",
"fh",
",",
"newline",
"=",
"\"\"",
")",
"for",
"line",
"in",
"filereader",
":",
"line",
"=",
"du",
".",
"remove_control_characters",
"(",
"line",
")",
"+",
"'\\n'",
"tmp",
".",
"write",
"(",
"line",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"tmp",
".",
"close",
"(",
")",
"# TEC I do not like this at all. original data must be preserved as is.",
"# also may be heavy handed as chars which do not break the parser",
"# are stripped as well (i.e. tabs and newlines)",
"# move the temp file",
"LOG",
".",
"info",
"(",
"\"Replacing the original data with the scrubbed file.\"",
")",
"shutil",
".",
"move",
"(",
"tmpfile",
",",
"myfile",
")",
"return"
] | The XML file seems to have mixed-encoding;
we scrub out the control characters
from the file for processing.
i.e.?i
omia.xml:1555328.28: PCDATA invalid Char value 2
<field name="journal">Bulletin et Memoires de la Societe Centrale de Medic
:return: | [
"The",
"XML",
"file",
"seems",
"to",
"have",
"mixed",
"-",
"encoding",
";",
"we",
"scrub",
"out",
"the",
"control",
"characters",
"from",
"the",
"file",
"for",
"processing",
"."
] | 24cc80db355bbe15776edc5c7b41e0886959ba41 | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L202-L234 | train |
monarch-initiative/dipper | dipper/sources/OMIA.py | OMIA.process_associations | def process_associations(self, limit):
"""
Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
f = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(f, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
self.process_xml_table(
elem, 'Article_Breed', self._process_article_breed_row, limit)
self.process_xml_table(
elem, 'Article_Phene', self._process_article_phene_row, limit)
self.process_xml_table(
elem, 'Breed_Phene', self._process_breed_phene_row, limit)
self.process_xml_table(
elem, 'Lida_Links', self._process_lida_links_row, limit)
self.process_xml_table(
elem, 'Phene_Gene', self._process_phene_gene_row, limit)
self.process_xml_table(
elem, 'Group_MPO', self._process_group_mpo_row, limit)
f.close()
return | python | def process_associations(self, limit):
"""
Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
f = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(f, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
self.process_xml_table(
elem, 'Article_Breed', self._process_article_breed_row, limit)
self.process_xml_table(
elem, 'Article_Phene', self._process_article_phene_row, limit)
self.process_xml_table(
elem, 'Breed_Phene', self._process_breed_phene_row, limit)
self.process_xml_table(
elem, 'Lida_Links', self._process_lida_links_row, limit)
self.process_xml_table(
elem, 'Phene_Gene', self._process_phene_gene_row, limit)
self.process_xml_table(
elem, 'Group_MPO', self._process_group_mpo_row, limit)
f.close()
return | [
"def",
"process_associations",
"(",
"self",
",",
"limit",
")",
":",
"myfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'data'",
"]",
"[",
"'file'",
"]",
")",
")",
"f",
"=",
"gzip",
".",
"open",
"(",
"myfile",
",",
"'rb'",
")",
"filereader",
"=",
"io",
".",
"TextIOWrapper",
"(",
"f",
",",
"newline",
"=",
"\"\"",
")",
"filereader",
".",
"readline",
"(",
")",
"# remove the xml declaration line",
"for",
"event",
",",
"elem",
"in",
"ET",
".",
"iterparse",
"(",
"filereader",
")",
":",
"# iterparse is not deprecated",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Article_Breed'",
",",
"self",
".",
"_process_article_breed_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Article_Phene'",
",",
"self",
".",
"_process_article_phene_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Breed_Phene'",
",",
"self",
".",
"_process_breed_phene_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Lida_Links'",
",",
"self",
".",
"_process_lida_links_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Phene_Gene'",
",",
"self",
".",
"_process_phene_gene_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Group_MPO'",
",",
"self",
".",
"_process_group_mpo_row",
",",
"limit",
")",
"f",
".",
"close",
"(",
")",
"return"
] | Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return: | [
"Loop",
"through",
"the",
"xml",
"file",
"and",
"process",
"the",
"article",
"-",
"breed",
"article",
"-",
"phene",
"breed",
"-",
"phene",
"phene",
"-",
"gene",
"associations",
"and",
"the",
"external",
"links",
"to",
"LIDA",
"."
] | 24cc80db355bbe15776edc5c7b41e0886959ba41 | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L362-L390 | train |
monarch-initiative/dipper | dipper/sources/OMIA.py | OMIA._process_article_phene_row | def _process_article_phene_row(self, row):
"""
Linking articles to species-specific phenes.
:param row:
:return:
"""
# article_id, phene_id, added_by
# look up the article in the hashmap
phenotype_id = self.id_hash['phene'].get(row['phene_id'])
article_id = self.id_hash['article'].get(row['article_id'])
omia_id = self._get_omia_id_from_phene_id(phenotype_id)
if self.test_mode or omia_id not in self.test_ids['disease'] \
or phenotype_id is None or article_id is None:
return
# make a triple, where the article is about the phenotype
self.graph.addTriple(
article_id,
self.globaltt['is_about'], phenotype_id)
return | python | def _process_article_phene_row(self, row):
"""
Linking articles to species-specific phenes.
:param row:
:return:
"""
# article_id, phene_id, added_by
# look up the article in the hashmap
phenotype_id = self.id_hash['phene'].get(row['phene_id'])
article_id = self.id_hash['article'].get(row['article_id'])
omia_id = self._get_omia_id_from_phene_id(phenotype_id)
if self.test_mode or omia_id not in self.test_ids['disease'] \
or phenotype_id is None or article_id is None:
return
# make a triple, where the article is about the phenotype
self.graph.addTriple(
article_id,
self.globaltt['is_about'], phenotype_id)
return | [
"def",
"_process_article_phene_row",
"(",
"self",
",",
"row",
")",
":",
"# article_id, phene_id, added_by",
"# look up the article in the hashmap",
"phenotype_id",
"=",
"self",
".",
"id_hash",
"[",
"'phene'",
"]",
".",
"get",
"(",
"row",
"[",
"'phene_id'",
"]",
")",
"article_id",
"=",
"self",
".",
"id_hash",
"[",
"'article'",
"]",
".",
"get",
"(",
"row",
"[",
"'article_id'",
"]",
")",
"omia_id",
"=",
"self",
".",
"_get_omia_id_from_phene_id",
"(",
"phenotype_id",
")",
"if",
"self",
".",
"test_mode",
"or",
"omia_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'disease'",
"]",
"or",
"phenotype_id",
"is",
"None",
"or",
"article_id",
"is",
"None",
":",
"return",
"# make a triple, where the article is about the phenotype",
"self",
".",
"graph",
".",
"addTriple",
"(",
"article_id",
",",
"self",
".",
"globaltt",
"[",
"'is_about'",
"]",
",",
"phenotype_id",
")",
"return"
] | Linking articles to species-specific phenes.
:param row:
:return: | [
"Linking",
"articles",
"to",
"species",
"-",
"specific",
"phenes",
"."
] | 24cc80db355bbe15776edc5c7b41e0886959ba41 | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L645-L667 | train |
monarch-initiative/dipper | dipper/sources/OMIA.py | OMIA.filter_keep_phenotype_entry_ids | def filter_keep_phenotype_entry_ids(self, entry):
'''
doubt this should be kept
'''
omim_id = str(entry['mimNumber'])
otype = self.globaltt['obsolete']
if omim_id in self.omim_type:
otype = self.omim_type[omim_id]
if otype == self.globaltt['obsolete'] and omim_id in self.omim_replaced:
omim_id = self.omim_replaced[omim_id]
otype = self.omim_type[omim_id]
# else: # removed or multiple
if otype not in (
self.globaltt['Phenotype'], self.globaltt['has_affected_feature']):
omim_id = None
return omim_id | python | def filter_keep_phenotype_entry_ids(self, entry):
'''
doubt this should be kept
'''
omim_id = str(entry['mimNumber'])
otype = self.globaltt['obsolete']
if omim_id in self.omim_type:
otype = self.omim_type[omim_id]
if otype == self.globaltt['obsolete'] and omim_id in self.omim_replaced:
omim_id = self.omim_replaced[omim_id]
otype = self.omim_type[omim_id]
# else: # removed or multiple
if otype not in (
self.globaltt['Phenotype'], self.globaltt['has_affected_feature']):
omim_id = None
return omim_id | [
"def",
"filter_keep_phenotype_entry_ids",
"(",
"self",
",",
"entry",
")",
":",
"omim_id",
"=",
"str",
"(",
"entry",
"[",
"'mimNumber'",
"]",
")",
"otype",
"=",
"self",
".",
"globaltt",
"[",
"'obsolete'",
"]",
"if",
"omim_id",
"in",
"self",
".",
"omim_type",
":",
"otype",
"=",
"self",
".",
"omim_type",
"[",
"omim_id",
"]",
"if",
"otype",
"==",
"self",
".",
"globaltt",
"[",
"'obsolete'",
"]",
"and",
"omim_id",
"in",
"self",
".",
"omim_replaced",
":",
"omim_id",
"=",
"self",
".",
"omim_replaced",
"[",
"omim_id",
"]",
"otype",
"=",
"self",
".",
"omim_type",
"[",
"omim_id",
"]",
"# else: # removed or multiple",
"if",
"otype",
"not",
"in",
"(",
"self",
".",
"globaltt",
"[",
"'Phenotype'",
"]",
",",
"self",
".",
"globaltt",
"[",
"'has_affected_feature'",
"]",
")",
":",
"omim_id",
"=",
"None",
"return",
"omim_id"
] | doubt this should be kept | [
"doubt",
"this",
"should",
"be",
"kept"
] | 24cc80db355bbe15776edc5c7b41e0886959ba41 | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L825-L841 | train |
monarch-initiative/dipper | dipper/sources/ClinVarXML_alpha.py | make_spo | def make_spo(sub, prd, obj):
'''
Decorates the three given strings as a line of ntriples
'''
# To establish string as a curie and expand,
# we use a global curie_map(.yaml)
# sub are allways uri (unless a bnode)
# prd are allways uri (unless prd is 'a')
# should fail loudly if curie does not exist
if prd == 'a':
prd = 'rdf:type'
try:
(subcuri, subid) = re.split(r':', sub)
except Exception:
LOG.error("not a Subject Curie '%s'", sub)
raise ValueError
try:
(prdcuri, prdid) = re.split(r':', prd)
except Exception:
LOG.error("not a Predicate Curie '%s'", prd)
raise ValueError
objt = ''
# object is a curie or bnode or literal [string|number]
objcuri = None
match = re.match(CURIERE, obj)
if match is not None:
try:
(objcuri, objid) = re.split(r':', obj)
except ValueError:
match = None
if match is not None and objcuri in CURIEMAP:
objt = CURIEMAP[objcuri] + objid.strip()
# allow unexpanded bnodes in object
if objcuri != '_' or CURIEMAP[objcuri] != '_:b':
objt = '<' + objt + '>'
elif obj.isnumeric():
objt = '"' + obj + '"'
else:
# Literals may not contain the characters ", LF, CR '\'
# except in their escaped forms. internal quotes as well.
obj = obj.strip('"').replace('\\', '\\\\').replace('"', '\'')
obj = obj.replace('\n', '\\n').replace('\r', '\\r')
objt = '"' + obj + '"'
# allow unexpanded bnodes in subject
if subcuri is not None and subcuri in CURIEMAP and \
prdcuri is not None and prdcuri in CURIEMAP:
subjt = CURIEMAP[subcuri] + subid.strip()
if subcuri != '_' or CURIEMAP[subcuri] != '_:b':
subjt = '<' + subjt + '>'
return subjt + ' <' + CURIEMAP[prdcuri] + prdid.strip() + '> ' + objt + ' .'
else:
LOG.error(
'Cant work with: <%s> %s , <%s> %s, %s',
subcuri, subid, prdcuri, prdid, objt)
return None | python | def make_spo(sub, prd, obj):
'''
Decorates the three given strings as a line of ntriples
'''
# To establish string as a curie and expand,
# we use a global curie_map(.yaml)
# sub are allways uri (unless a bnode)
# prd are allways uri (unless prd is 'a')
# should fail loudly if curie does not exist
if prd == 'a':
prd = 'rdf:type'
try:
(subcuri, subid) = re.split(r':', sub)
except Exception:
LOG.error("not a Subject Curie '%s'", sub)
raise ValueError
try:
(prdcuri, prdid) = re.split(r':', prd)
except Exception:
LOG.error("not a Predicate Curie '%s'", prd)
raise ValueError
objt = ''
# object is a curie or bnode or literal [string|number]
objcuri = None
match = re.match(CURIERE, obj)
if match is not None:
try:
(objcuri, objid) = re.split(r':', obj)
except ValueError:
match = None
if match is not None and objcuri in CURIEMAP:
objt = CURIEMAP[objcuri] + objid.strip()
# allow unexpanded bnodes in object
if objcuri != '_' or CURIEMAP[objcuri] != '_:b':
objt = '<' + objt + '>'
elif obj.isnumeric():
objt = '"' + obj + '"'
else:
# Literals may not contain the characters ", LF, CR '\'
# except in their escaped forms. internal quotes as well.
obj = obj.strip('"').replace('\\', '\\\\').replace('"', '\'')
obj = obj.replace('\n', '\\n').replace('\r', '\\r')
objt = '"' + obj + '"'
# allow unexpanded bnodes in subject
if subcuri is not None and subcuri in CURIEMAP and \
prdcuri is not None and prdcuri in CURIEMAP:
subjt = CURIEMAP[subcuri] + subid.strip()
if subcuri != '_' or CURIEMAP[subcuri] != '_:b':
subjt = '<' + subjt + '>'
return subjt + ' <' + CURIEMAP[prdcuri] + prdid.strip() + '> ' + objt + ' .'
else:
LOG.error(
'Cant work with: <%s> %s , <%s> %s, %s',
subcuri, subid, prdcuri, prdid, objt)
return None | [
"def",
"make_spo",
"(",
"sub",
",",
"prd",
",",
"obj",
")",
":",
"# To establish string as a curie and expand,",
"# we use a global curie_map(.yaml)",
"# sub are allways uri (unless a bnode)",
"# prd are allways uri (unless prd is 'a')",
"# should fail loudly if curie does not exist",
"if",
"prd",
"==",
"'a'",
":",
"prd",
"=",
"'rdf:type'",
"try",
":",
"(",
"subcuri",
",",
"subid",
")",
"=",
"re",
".",
"split",
"(",
"r':'",
",",
"sub",
")",
"except",
"Exception",
":",
"LOG",
".",
"error",
"(",
"\"not a Subject Curie '%s'\"",
",",
"sub",
")",
"raise",
"ValueError",
"try",
":",
"(",
"prdcuri",
",",
"prdid",
")",
"=",
"re",
".",
"split",
"(",
"r':'",
",",
"prd",
")",
"except",
"Exception",
":",
"LOG",
".",
"error",
"(",
"\"not a Predicate Curie '%s'\"",
",",
"prd",
")",
"raise",
"ValueError",
"objt",
"=",
"''",
"# object is a curie or bnode or literal [string|number]",
"objcuri",
"=",
"None",
"match",
"=",
"re",
".",
"match",
"(",
"CURIERE",
",",
"obj",
")",
"if",
"match",
"is",
"not",
"None",
":",
"try",
":",
"(",
"objcuri",
",",
"objid",
")",
"=",
"re",
".",
"split",
"(",
"r':'",
",",
"obj",
")",
"except",
"ValueError",
":",
"match",
"=",
"None",
"if",
"match",
"is",
"not",
"None",
"and",
"objcuri",
"in",
"CURIEMAP",
":",
"objt",
"=",
"CURIEMAP",
"[",
"objcuri",
"]",
"+",
"objid",
".",
"strip",
"(",
")",
"# allow unexpanded bnodes in object",
"if",
"objcuri",
"!=",
"'_'",
"or",
"CURIEMAP",
"[",
"objcuri",
"]",
"!=",
"'_:b'",
":",
"objt",
"=",
"'<'",
"+",
"objt",
"+",
"'>'",
"elif",
"obj",
".",
"isnumeric",
"(",
")",
":",
"objt",
"=",
"'\"'",
"+",
"obj",
"+",
"'\"'",
"else",
":",
"# Literals may not contain the characters \", LF, CR '\\'",
"# except in their escaped forms. internal quotes as well.",
"obj",
"=",
"obj",
".",
"strip",
"(",
"'\"'",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\''",
")",
"obj",
"=",
"obj",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\n'",
")",
".",
"replace",
"(",
"'\\r'",
",",
"'\\\\r'",
")",
"objt",
"=",
"'\"'",
"+",
"obj",
"+",
"'\"'",
"# allow unexpanded bnodes in subject",
"if",
"subcuri",
"is",
"not",
"None",
"and",
"subcuri",
"in",
"CURIEMAP",
"and",
"prdcuri",
"is",
"not",
"None",
"and",
"prdcuri",
"in",
"CURIEMAP",
":",
"subjt",
"=",
"CURIEMAP",
"[",
"subcuri",
"]",
"+",
"subid",
".",
"strip",
"(",
")",
"if",
"subcuri",
"!=",
"'_'",
"or",
"CURIEMAP",
"[",
"subcuri",
"]",
"!=",
"'_:b'",
":",
"subjt",
"=",
"'<'",
"+",
"subjt",
"+",
"'>'",
"return",
"subjt",
"+",
"' <'",
"+",
"CURIEMAP",
"[",
"prdcuri",
"]",
"+",
"prdid",
".",
"strip",
"(",
")",
"+",
"'> '",
"+",
"objt",
"+",
"' .'",
"else",
":",
"LOG",
".",
"error",
"(",
"'Cant work with: <%s> %s , <%s> %s, %s'",
",",
"subcuri",
",",
"subid",
",",
"prdcuri",
",",
"prdid",
",",
"objt",
")",
"return",
"None"
] | Decorates the three given strings as a line of ntriples | [
"Decorates",
"the",
"three",
"given",
"strings",
"as",
"a",
"line",
"of",
"ntriples"
] | 24cc80db355bbe15776edc5c7b41e0886959ba41 | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ClinVarXML_alpha.py#L183-L244 | train |
monarch-initiative/dipper | dipper/sources/ClinVarXML_alpha.py | write_spo | def write_spo(sub, prd, obj):
'''
write triples to a buffer incase we decide to drop them
'''
rcvtriples.append(make_spo(sub, prd, obj)) | python | def write_spo(sub, prd, obj):
'''
write triples to a buffer incase we decide to drop them
'''
rcvtriples.append(make_spo(sub, prd, obj)) | [
"def",
"write_spo",
"(",
"sub",
",",
"prd",
",",
"obj",
")",
":",
"rcvtriples",
".",
"append",
"(",
"make_spo",
"(",
"sub",
",",
"prd",
",",
"obj",
")",
")"
] | write triples to a buffer incase we decide to drop them | [
"write",
"triples",
"to",
"a",
"buffer",
"incase",
"we",
"decide",
"to",
"drop",
"them"
] | 24cc80db355bbe15776edc5c7b41e0886959ba41 | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ClinVarXML_alpha.py#L247-L251 | train |