max_stars_repo_path
stringlengths 5
128
| max_stars_repo_name
stringlengths 8
105
| max_stars_count
int64 0
41.3k
| id
stringlengths 5
5
| content
stringlengths 19
155k
| content_cleaned
stringlengths 17
155k
| language
stringclasses 18
values | language_score
float64 0.05
1
| edu_score
float64 0.76
4.4
| edu_int_score
int64 1
4
|
---|---|---|---|---|---|---|---|---|---|
azure-mgmt-web/azure/mgmt/web/operations/diagnostics_operations.py | JonathanGailliez/azure-sdk-for-python | 1 | 14900 | <reponame>JonathanGailliez/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class DiagnosticsOperations(object):
"""DiagnosticsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API Version. Constant value: "2018-02-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-02-01"
self.config = config
def list_hosting_environment_detector_responses(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""List Hosting Environment Detector Responses.
List Hosting Environment Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: Site Name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_hosting_environment_detector_responses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_hosting_environment_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors'}
def get_hosting_environment_detector_response(
self, resource_group_name, name, detector_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get Hosting Environment Detector Response.
Get Hosting Environment Detector Response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: App Service Environment Name
:type name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_hosting_environment_detector_response.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_hosting_environment_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors/{detectorName}'}
def list_site_detector_responses(
self, resource_group_name, site_name, custom_headers=None, raw=False, **operation_config):
"""List Site Detector Responses.
List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detector_responses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors'}
def get_site_detector_response(
self, resource_group_name, site_name, detector_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get site detector response.
Get site detector response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_detector_response.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors/{detectorName}'}
def list_site_diagnostic_categories(
self, resource_group_name, site_name, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Categories.
Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiagnosticCategory
:rtype:
~azure.mgmt.web.models.DiagnosticCategoryPaged[~azure.mgmt.web.models.DiagnosticCategory]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_diagnostic_categories.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics'}
def get_site_diagnostic_category(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Category.
Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticCategory or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticCategory or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_diagnostic_category.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticCategory', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_diagnostic_category.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}'}
def list_site_analyses(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Site Analyses.
Get Site Analyses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AnalysisDefinition
:rtype:
~azure.mgmt.web.models.AnalysisDefinitionPaged[~azure.mgmt.web.models.AnalysisDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_analyses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_analyses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses'}
def get_site_analysis(
self, resource_group_name, site_name, diagnostic_category, analysis_name, custom_headers=None, raw=False, **operation_config):
"""Get Site Analysis.
Get Site Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param analysis_name: Analysis Name
:type analysis_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_analysis.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'}
def execute_site_analysis(
self, resource_group_name, site_name, diagnostic_category, analysis_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Analysis.
Execute Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name
:type analysis_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_analysis.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'}
def list_site_detectors(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Detectors.
Get Detectors.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detectors.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detectors.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors'}
def get_site_detector(
self, resource_group_name, site_name, diagnostic_category, detector_name, custom_headers=None, raw=False, **operation_config):
"""Get Detector.
Get Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param detector_name: Detector Name
:type detector_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_site_detector.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'}
def execute_site_detector(
self, resource_group_name, site_name, detector_name, diagnostic_category, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Detector.
Execute Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticDetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticDetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_detector.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticDetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'}
def list_site_detector_responses_slot(
self, resource_group_name, site_name, slot, custom_headers=None, raw=False, **operation_config):
"""List Site Detector Responses.
List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detector_responses_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detector_responses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors'}
def get_site_detector_response_slot(
self, resource_group_name, site_name, detector_name, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get site detector response.
Get site detector response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_detector_response_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_detector_response_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors/{detectorName}'}
def list_site_diagnostic_categories_slot(
self, resource_group_name, site_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Categories.
Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiagnosticCategory
:rtype:
~azure.mgmt.web.models.DiagnosticCategoryPaged[~azure.mgmt.web.models.DiagnosticCategory]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_diagnostic_categories_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics'}
def get_site_diagnostic_category_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Category.
Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticCategory or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticCategory or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_diagnostic_category_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticCategory', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_diagnostic_category_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}'}
def list_site_analyses_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Site Analyses.
Get Site Analyses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AnalysisDefinition
:rtype:
~azure.mgmt.web.models.AnalysisDefinitionPaged[~azure.mgmt.web.models.AnalysisDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_analyses_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_analyses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses'}
def get_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Site Analysis.
Get Site Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param analysis_name: Analysis Name
:type analysis_name: str
:param slot: Slot - optional
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'}
def execute_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Analysis.
Execute Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name
:type analysis_name: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'}
def list_site_detectors_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detectors.
Get Detectors.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detectors_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detectors_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors'}
def get_site_detector_slot(
self, resource_group_name, site_name, diagnostic_category, detector_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detector.
Get Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param detector_name: Detector Name
:type detector_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_site_detector_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'}
def execute_site_detector_slot(
self, resource_group_name, site_name, detector_name, diagnostic_category, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Detector.
Execute Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticDetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticDetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_detector_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticDetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'}
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class DiagnosticsOperations(object):
"""DiagnosticsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API Version. Constant value: "2018-02-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-02-01"
self.config = config
def list_hosting_environment_detector_responses(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""List Hosting Environment Detector Responses.
List Hosting Environment Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: Site Name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_hosting_environment_detector_responses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_hosting_environment_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors'}
def get_hosting_environment_detector_response(
self, resource_group_name, name, detector_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get Hosting Environment Detector Response.
Get Hosting Environment Detector Response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: App Service Environment Name
:type name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_hosting_environment_detector_response.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_hosting_environment_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors/{detectorName}'}
def list_site_detector_responses(
self, resource_group_name, site_name, custom_headers=None, raw=False, **operation_config):
"""List Site Detector Responses.
List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detector_responses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors'}
def get_site_detector_response(
self, resource_group_name, site_name, detector_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get site detector response.
Get site detector response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_detector_response.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors/{detectorName}'}
def list_site_diagnostic_categories(
self, resource_group_name, site_name, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Categories.
Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiagnosticCategory
:rtype:
~azure.mgmt.web.models.DiagnosticCategoryPaged[~azure.mgmt.web.models.DiagnosticCategory]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_diagnostic_categories.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics'}
def get_site_diagnostic_category(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Category.
Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticCategory or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticCategory or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_diagnostic_category.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticCategory', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_diagnostic_category.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}'}
def list_site_analyses(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Site Analyses.
Get Site Analyses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AnalysisDefinition
:rtype:
~azure.mgmt.web.models.AnalysisDefinitionPaged[~azure.mgmt.web.models.AnalysisDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_analyses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_analyses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses'}
def get_site_analysis(
self, resource_group_name, site_name, diagnostic_category, analysis_name, custom_headers=None, raw=False, **operation_config):
"""Get Site Analysis.
Get Site Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param analysis_name: Analysis Name
:type analysis_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_analysis.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'}
def execute_site_analysis(
self, resource_group_name, site_name, diagnostic_category, analysis_name, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Analysis.
Execute Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name
:type analysis_name: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_analysis.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'}
def list_site_detectors(
self, resource_group_name, site_name, diagnostic_category, custom_headers=None, raw=False, **operation_config):
"""Get Detectors.
Get Detectors.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detectors.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detectors.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors'}
def get_site_detector(
self, resource_group_name, site_name, diagnostic_category, detector_name, custom_headers=None, raw=False, **operation_config):
"""Get Detector.
Get Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param detector_name: Detector Name
:type detector_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_site_detector.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'}
def execute_site_detector(
self, resource_group_name, site_name, detector_name, diagnostic_category, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Detector.
Execute Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticDetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticDetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_detector.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticDetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'}
def list_site_detector_responses_slot(
self, resource_group_name, site_name, slot, custom_headers=None, raw=False, **operation_config):
"""List Site Detector Responses.
List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorResponse
:rtype:
~azure.mgmt.web.models.DetectorResponsePaged[~azure.mgmt.web.models.DetectorResponse]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detector_responses_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorResponsePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detector_responses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors'}
def get_site_detector_response_slot(
self, resource_group_name, site_name, detector_name, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Get site detector response.
Get site detector response.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_detector_response_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_detector_response_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors/{detectorName}'}
def list_site_diagnostic_categories_slot(
self, resource_group_name, site_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Categories.
Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiagnosticCategory
:rtype:
~azure.mgmt.web.models.DiagnosticCategoryPaged[~azure.mgmt.web.models.DiagnosticCategory]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiagnosticCategoryPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_diagnostic_categories_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics'}
def get_site_diagnostic_category_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Diagnostics Category.
Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticCategory or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticCategory or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_diagnostic_category_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticCategory', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_diagnostic_category_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}'}
def list_site_analyses_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Site Analyses.
Get Site Analyses.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AnalysisDefinition
:rtype:
~azure.mgmt.web.models.AnalysisDefinitionPaged[~azure.mgmt.web.models.AnalysisDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_analyses_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_analyses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses'}
def get_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Site Analysis.
Get Site Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param analysis_name: Analysis Name
:type analysis_name: str
:param slot: Slot - optional
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'}
def execute_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Analysis.
Execute Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name
:type analysis_name: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'}
def list_site_detectors_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detectors.
Get Detectors.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detectors_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detectors_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors'}
def get_site_detector_slot(
self, resource_group_name, site_name, diagnostic_category, detector_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detector.
Get Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param detector_name: Detector Name
:type detector_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_site_detector_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'}
def execute_site_detector_slot(
self, resource_group_name, site_name, detector_name, diagnostic_category, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Detector.
Execute Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticDetectorResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticDetectorResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_detector_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticDetectorResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'} | pt | 0.154807 | 1.805375 | 2 |
Dataset/Leetcode/valid/98/736.py | kkcookies99/UAST | 0 | 14901 | <reponame>kkcookies99/UAST
class Solution:
def XXX(self, root: TreeNode) -> bool:
stack = []
cur = root
last = float("-inf")
while cur or stack:
while cur:
stack.append(cur)
cur = cur.left
cur = stack.pop()
if cur.val > last:
last = cur.val
else:
return False
cur = cur.right
return True
| class Solution:
def XXX(self, root: TreeNode) -> bool:
stack = []
cur = root
last = float("-inf")
while cur or stack:
while cur:
stack.append(cur)
cur = cur.left
cur = stack.pop()
if cur.val > last:
last = cur.val
else:
return False
cur = cur.right
return True | none | 1 | 3.289557 | 3 |
data/ck/check_data.py | jorgimello/meta-learning-fer | 4 | 14902 | <reponame>jorgimello/meta-learning-fer<gh_stars>1-10
import numpy as np
import os, cv2
imgs = np.load('test_set_ck_extended_no_resize.npy')
lbls = np.load('test_labels_ck_extended_no_resize.npy')
for i in range(imgs.shape[0]):
print (lbls[i])
cv2.imshow('img', imgs[i])
cv2.waitKey(0)
| import numpy as np
import os, cv2
imgs = np.load('test_set_ck_extended_no_resize.npy')
lbls = np.load('test_labels_ck_extended_no_resize.npy')
for i in range(imgs.shape[0]):
print (lbls[i])
cv2.imshow('img', imgs[i])
cv2.waitKey(0) | none | 1 | 2.537697 | 3 |
app/db/schemas/users.py | ergo-pad/paideia-api | 0 | 14903 | <filename>app/db/schemas/users.py
from pydantic import BaseModel
import typing as t
### SCHEMAS FOR USERS ###
class UserBase(BaseModel):
alias: str
primary_wallet_address_id: t.Optional[int]
profile_img_url: t.Optional[str]
is_active: bool = True
is_superuser: bool = False
class UserOut(UserBase):
pass
class UserCreate(UserBase):
password: str
class Config:
orm_mode = True
class UserEdit(UserBase):
password: t.Optional[str] = None
class Config:
orm_mode = True
class User(UserBase):
id: int
class Config:
orm_mode = True
class CreateErgoAddress(BaseModel):
user_id: int
address: str
is_smart_contract: bool
class ErgoAddress(CreateErgoAddress):
id: int
class Config:
orm_mode = True
| <filename>app/db/schemas/users.py
from pydantic import BaseModel
import typing as t
### SCHEMAS FOR USERS ###
class UserBase(BaseModel):
alias: str
primary_wallet_address_id: t.Optional[int]
profile_img_url: t.Optional[str]
is_active: bool = True
is_superuser: bool = False
class UserOut(UserBase):
pass
class UserCreate(UserBase):
password: str
class Config:
orm_mode = True
class UserEdit(UserBase):
password: t.Optional[str] = None
class Config:
orm_mode = True
class User(UserBase):
id: int
class Config:
orm_mode = True
class CreateErgoAddress(BaseModel):
user_id: int
address: str
is_smart_contract: bool
class ErgoAddress(CreateErgoAddress):
id: int
class Config:
orm_mode = True
| en | 0.75399 | 2.483841 | 2 |
custom_components/discord_game/sensor.py | Myztillx/discord_game | 0 | 14904 | <gh_stars>0
import asyncio
import json
import logging
import re
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from discord import ActivityType, Spotify, Game, Streaming, CustomActivity, Activity, Member, User
from homeassistant.components.notify import PLATFORM_SCHEMA
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['discord.py==1.5.1']
CONF_TOKEN = 'token'
CONF_MEMBERS = 'members'
CONF_IMAGE_FORMAT = 'image_format'
DOMAIN = 'sensor'
ENTITY_ID_FORMAT = "sensor.discord_{}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_MEMBERS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_IMAGE_FORMAT, default='webp'): vol.In(['png', 'webp', 'jpeg', 'jpg']),
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
import discord
token = config.get(CONF_TOKEN)
image_format = config.get(CONF_IMAGE_FORMAT)
intents = discord.Intents.default()
intents.members = True
intents.presences = True
bot = discord.Client(loop=hass.loop, intents=intents)
await bot.login(token)
async def async_stop_server(event):
await bot.logout()
async def start_server(event):
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_server)
await bot.start(token)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_server)
@bot.event
async def on_error(error, *args, **kwargs):
raise
def update_discord_entity(watcher: DiscordAsyncMemberState, discord_member: Member):
watcher._state = discord_member.status
activity_state = None
game = None
game_state = None
game_details = None
game_image_small = None
game_image_large = None
game_image_small_text = None
game_image_large_text = None
streaming = None
streaming_details = None
streaming_url = None
listening = None
listening_details = None
listening_url = None
spotify_artists = None
spotify_title = None
spotify_album = None
spotify_album_cover_url = None
spotify_track_id = None
spotify_duration = None
spotify_start = None
spotify_end = None
watching = None
watching_details = None
watching_url = None
custom_status = None
custom_emoji = None
for activity in discord_member.activities:
if activity.type == ActivityType.playing:
if isinstance(activity, Game):
activity: Game
game = activity.name
continue
else:
activity: Activity
game = activity.name
game_state = activity.state
game_details = activity.details
game_image_small = activity.small_image_url
game_image_large = activity.large_image_url
game_image_small_text = activity.small_image_text
game_image_large_text = activity.large_image_text
continue
if activity.type == ActivityType.streaming:
activity: Streaming
streaming = activity.name
streaming_details = activity.details
streaming_url = activity.url
continue
if activity.type == ActivityType.listening:
if isinstance(activity, Spotify):
activity: Spotify
listening = activity.title
spotify_artists = ", ".join(activity.artists)
spotify_title = activity.title
spotify_album = activity.album
spotify_album_cover_url = activity.album_cover_url
spotify_track_id = activity.track_id
spotify_duration = str(activity.duration)
spotify_start = str(activity.start)
spotify_end = str(activity.end)
continue
else:
activity: Activity
activity_state = activity.state
listening = activity.name
listening_details = activity.details
listening_url = activity.url
continue
if activity.type == ActivityType.watching:
activity: Activity
activity_state = activity.state
watching = activity.name
watching_details = activity.details
watching_url = activity.url
continue
if activity.type == ActivityType.custom:
activity: CustomActivity
activity_state = activity.state
custom_status = activity.name
custom_emoji = activity.emoji.name if activity.emoji else None
continue
watcher._game = game
watcher._game_state = game_state
watcher._game_details = game_details
watcher._game_image_small = game_image_small
watcher._game_image_large = game_image_large
watcher._game_image_small_text = game_image_small_text
watcher._game_image_large_text = game_image_large_text
watcher._streaming = streaming
watcher._streaming_url = streaming_url
watcher._streaming_details = streaming_details
watcher._listening = listening
watcher._listening_url = listening_url
watcher._listening_details = listening_details
watcher._spotify_artist = spotify_artists
watcher._spotify_title = spotify_title
watcher._spotify_album = spotify_album
watcher._spotify_album_cover_url = spotify_album_cover_url
watcher._spotify_track_id = spotify_track_id
watcher._spotify_duration = spotify_duration
watcher._spotify_start = spotify_start
watcher._spotify_end = spotify_end
watcher._watching = watching
watcher._watching_url = watching_url
watcher._watching_details = watching_details
watcher._activity_state = activity_state
watcher._custom_status = custom_status
watcher._custom_emoji = custom_emoji
watcher.async_schedule_update_ha_state()
def update_discord_entity_user(watcher: DiscordAsyncMemberState, discord_user: User):
watcher._avatar_url = discord_user.avatar_url_as(format=None, static_format=image_format, size=1024).__str__()
watcher._user_id = discord_user.id
watcher.async_schedule_update_ha_state(True)
@bot.event
async def on_ready():
users = {"{}".format(user): user for user in bot.users}
members = {"{}".format(member): member for member in list(bot.get_all_members())}
for name, watcher in watchers.items():
if users.get(name) is not None:
update_discord_entity_user(watcher, users.get(name))
if members.get(name) is not None:
update_discord_entity(watcher, members.get(name))
@bot.event
async def on_member_update(before: Member, after: Member):
watcher = watchers.get("{}".format(after))
if watcher is not None:
update_discord_entity(watcher, after)
@bot.event
async def on_user_update(before: User, after: User):
watcher: DiscordAsyncMemberState = watchers.get("{}".format(after))
if watcher is not None:
update_discord_entity_user(watcher, after)
watchers = {}
for member in config.get(CONF_MEMBERS):
if re.match(r"^.*#[0-9]{4}", member):
watcher: DiscordAsyncMemberState = DiscordAsyncMemberState(hass, bot, member)
watchers[watcher.name] = watcher
elif re.match(r"^[0-9]{,20}", member): #Up to 20 digits because 2^64 (snowflake-length) is 20 digits long
user = await bot.fetch_user(member)
if user:
watcher: DiscordAsyncMemberState = DiscordAsyncMemberState(hass, bot, "{}#{}".format(user.name,user.discriminator))
watchers[watcher.name] = watcher
if len(watchers) > 0:
async_add_entities(watchers.values())
return True
else:
return False
class DiscordAsyncMemberState(Entity):
def __init__(self, hass, client, member):
self._member = member
self._hass = hass
self._client = client
self._state = 'unknown'
self._game = None
self._game_state = None
self._game_details = None
self._game_image_small = None
self._game_image_large = None
self._game_image_small_text = None
self._game_image_large_text = None
self._streaming = None
self._streaming_url = None
self._streaming_details = None
self._listening = None
self._listening_url = None
self._listening_details = None
self._spotify_artist = None
self._spotify_title = None
self._spotify_album = None
self._spotify_album_cover_url = None
self._spotify_track_id = None
self._spotify_duration = None
self._spotify_start = None
self._spotify_end = None
self._watching = None
self._watching_url = None
self._watching_details = None
self._avatar_url = None
self._user_id = None
self._custom_status = None
self._custom_emoji = None
@property
def should_poll(self) -> bool:
return False
@property
def state(self) -> str:
return self._state
@property
def entity_id(self):
"""Return the entity ID."""
# 1st Regex; keep a-z0-9 [](){} characters, replace with "_"
# 2nd Regex; keep only a-z0-9 and single non-leading and non-trailing "_" characters, replace everything else with ""
return ENTITY_ID_FORMAT.format(re.sub(r'([^a-z0-9_]|^_+|_+$|(_)\2+)', '', re.sub('[^a-z0-9 \[\]\(\)\{\}\"\']', '_', self._member.lower())))
@property
def name(self):
return self._member
@property
def entity_picture(self):
return self._avatar_url
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'avatar_url': self._avatar_url,
'game': self._game,
'game_state': self._game_state,
'game_details': self._game_details,
'game_image_small': self._game_image_small,
'game_image_large': self._game_image_large,
'game_image_small_text': self._game_image_small_text,
'game_image_large_text': self._game_image_large_text,
'streaming': self._streaming,
'streaming_url': self._streaming_url,
'streaming_details': self._streaming_details,
'listening': self._listening,
'listening_url': self._listening_url,
'listening_details': self._listening_details,
'spotify_artist': self._spotify_artist,
'spotify_title': self._spotify_title,
'spotify_album': self._spotify_album,
'spotify_album_cover_url': self._spotify_album_cover_url,
'spotify_track_id': self._spotify_track_id,
'spotify_duration': self._spotify_duration,
'spotify_start': self._spotify_start,
'spotify_end': self._spotify_end,
'watching': self._watching,
'watching_url': self._watching_url,
'watching_details': self._watching_details,
'custom_status': self._custom_status,
'custom_emoji': self._custom_emoji
}
| import asyncio
import json
import logging
import re
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from discord import ActivityType, Spotify, Game, Streaming, CustomActivity, Activity, Member, User
from homeassistant.components.notify import PLATFORM_SCHEMA
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['discord.py==1.5.1']
CONF_TOKEN = 'token'
CONF_MEMBERS = 'members'
CONF_IMAGE_FORMAT = 'image_format'
DOMAIN = 'sensor'
ENTITY_ID_FORMAT = "sensor.discord_{}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_MEMBERS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_IMAGE_FORMAT, default='webp'): vol.In(['png', 'webp', 'jpeg', 'jpg']),
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
import discord
token = config.get(CONF_TOKEN)
image_format = config.get(CONF_IMAGE_FORMAT)
intents = discord.Intents.default()
intents.members = True
intents.presences = True
bot = discord.Client(loop=hass.loop, intents=intents)
await bot.login(token)
async def async_stop_server(event):
await bot.logout()
async def start_server(event):
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_server)
await bot.start(token)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_server)
@bot.event
async def on_error(error, *args, **kwargs):
raise
def update_discord_entity(watcher: DiscordAsyncMemberState, discord_member: Member):
watcher._state = discord_member.status
activity_state = None
game = None
game_state = None
game_details = None
game_image_small = None
game_image_large = None
game_image_small_text = None
game_image_large_text = None
streaming = None
streaming_details = None
streaming_url = None
listening = None
listening_details = None
listening_url = None
spotify_artists = None
spotify_title = None
spotify_album = None
spotify_album_cover_url = None
spotify_track_id = None
spotify_duration = None
spotify_start = None
spotify_end = None
watching = None
watching_details = None
watching_url = None
custom_status = None
custom_emoji = None
for activity in discord_member.activities:
if activity.type == ActivityType.playing:
if isinstance(activity, Game):
activity: Game
game = activity.name
continue
else:
activity: Activity
game = activity.name
game_state = activity.state
game_details = activity.details
game_image_small = activity.small_image_url
game_image_large = activity.large_image_url
game_image_small_text = activity.small_image_text
game_image_large_text = activity.large_image_text
continue
if activity.type == ActivityType.streaming:
activity: Streaming
streaming = activity.name
streaming_details = activity.details
streaming_url = activity.url
continue
if activity.type == ActivityType.listening:
if isinstance(activity, Spotify):
activity: Spotify
listening = activity.title
spotify_artists = ", ".join(activity.artists)
spotify_title = activity.title
spotify_album = activity.album
spotify_album_cover_url = activity.album_cover_url
spotify_track_id = activity.track_id
spotify_duration = str(activity.duration)
spotify_start = str(activity.start)
spotify_end = str(activity.end)
continue
else:
activity: Activity
activity_state = activity.state
listening = activity.name
listening_details = activity.details
listening_url = activity.url
continue
if activity.type == ActivityType.watching:
activity: Activity
activity_state = activity.state
watching = activity.name
watching_details = activity.details
watching_url = activity.url
continue
if activity.type == ActivityType.custom:
activity: CustomActivity
activity_state = activity.state
custom_status = activity.name
custom_emoji = activity.emoji.name if activity.emoji else None
continue
watcher._game = game
watcher._game_state = game_state
watcher._game_details = game_details
watcher._game_image_small = game_image_small
watcher._game_image_large = game_image_large
watcher._game_image_small_text = game_image_small_text
watcher._game_image_large_text = game_image_large_text
watcher._streaming = streaming
watcher._streaming_url = streaming_url
watcher._streaming_details = streaming_details
watcher._listening = listening
watcher._listening_url = listening_url
watcher._listening_details = listening_details
watcher._spotify_artist = spotify_artists
watcher._spotify_title = spotify_title
watcher._spotify_album = spotify_album
watcher._spotify_album_cover_url = spotify_album_cover_url
watcher._spotify_track_id = spotify_track_id
watcher._spotify_duration = spotify_duration
watcher._spotify_start = spotify_start
watcher._spotify_end = spotify_end
watcher._watching = watching
watcher._watching_url = watching_url
watcher._watching_details = watching_details
watcher._activity_state = activity_state
watcher._custom_status = custom_status
watcher._custom_emoji = custom_emoji
watcher.async_schedule_update_ha_state()
def update_discord_entity_user(watcher: DiscordAsyncMemberState, discord_user: User):
watcher._avatar_url = discord_user.avatar_url_as(format=None, static_format=image_format, size=1024).__str__()
watcher._user_id = discord_user.id
watcher.async_schedule_update_ha_state(True)
@bot.event
async def on_ready():
users = {"{}".format(user): user for user in bot.users}
members = {"{}".format(member): member for member in list(bot.get_all_members())}
for name, watcher in watchers.items():
if users.get(name) is not None:
update_discord_entity_user(watcher, users.get(name))
if members.get(name) is not None:
update_discord_entity(watcher, members.get(name))
@bot.event
async def on_member_update(before: Member, after: Member):
watcher = watchers.get("{}".format(after))
if watcher is not None:
update_discord_entity(watcher, after)
@bot.event
async def on_user_update(before: User, after: User):
watcher: DiscordAsyncMemberState = watchers.get("{}".format(after))
if watcher is not None:
update_discord_entity_user(watcher, after)
watchers = {}
for member in config.get(CONF_MEMBERS):
if re.match(r"^.*#[0-9]{4}", member):
watcher: DiscordAsyncMemberState = DiscordAsyncMemberState(hass, bot, member)
watchers[watcher.name] = watcher
elif re.match(r"^[0-9]{,20}", member): #Up to 20 digits because 2^64 (snowflake-length) is 20 digits long
user = await bot.fetch_user(member)
if user:
watcher: DiscordAsyncMemberState = DiscordAsyncMemberState(hass, bot, "{}#{}".format(user.name,user.discriminator))
watchers[watcher.name] = watcher
if len(watchers) > 0:
async_add_entities(watchers.values())
return True
else:
return False
class DiscordAsyncMemberState(Entity):
def __init__(self, hass, client, member):
self._member = member
self._hass = hass
self._client = client
self._state = 'unknown'
self._game = None
self._game_state = None
self._game_details = None
self._game_image_small = None
self._game_image_large = None
self._game_image_small_text = None
self._game_image_large_text = None
self._streaming = None
self._streaming_url = None
self._streaming_details = None
self._listening = None
self._listening_url = None
self._listening_details = None
self._spotify_artist = None
self._spotify_title = None
self._spotify_album = None
self._spotify_album_cover_url = None
self._spotify_track_id = None
self._spotify_duration = None
self._spotify_start = None
self._spotify_end = None
self._watching = None
self._watching_url = None
self._watching_details = None
self._avatar_url = None
self._user_id = None
self._custom_status = None
self._custom_emoji = None
@property
def should_poll(self) -> bool:
return False
@property
def state(self) -> str:
return self._state
@property
def entity_id(self):
"""Return the entity ID."""
# 1st Regex; keep a-z0-9 [](){} characters, replace with "_"
# 2nd Regex; keep only a-z0-9 and single non-leading and non-trailing "_" characters, replace everything else with ""
return ENTITY_ID_FORMAT.format(re.sub(r'([^a-z0-9_]|^_+|_+$|(_)\2+)', '', re.sub('[^a-z0-9 \[\]\(\)\{\}\"\']', '_', self._member.lower())))
@property
def name(self):
return self._member
@property
def entity_picture(self):
return self._avatar_url
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'avatar_url': self._avatar_url,
'game': self._game,
'game_state': self._game_state,
'game_details': self._game_details,
'game_image_small': self._game_image_small,
'game_image_large': self._game_image_large,
'game_image_small_text': self._game_image_small_text,
'game_image_large_text': self._game_image_large_text,
'streaming': self._streaming,
'streaming_url': self._streaming_url,
'streaming_details': self._streaming_details,
'listening': self._listening,
'listening_url': self._listening_url,
'listening_details': self._listening_details,
'spotify_artist': self._spotify_artist,
'spotify_title': self._spotify_title,
'spotify_album': self._spotify_album,
'spotify_album_cover_url': self._spotify_album_cover_url,
'spotify_track_id': self._spotify_track_id,
'spotify_duration': self._spotify_duration,
'spotify_start': self._spotify_start,
'spotify_end': self._spotify_end,
'watching': self._watching,
'watching_url': self._watching_url,
'watching_details': self._watching_details,
'custom_status': self._custom_status,
'custom_emoji': self._custom_emoji
} | it | 0.176269 | 2.091811 | 2 |
Mundo 1/ex_014.py | Shock3/Python_Exercicios | 0 | 14905 | """
Escreva um programa que converta uma temperatura,
digitando em graus Celsius e converta para graus Fahrenheit.
"""
celsius = int(input('Digite a temperatura: '))
fahrenheit = (celsius / 5) * 9 + 32
Kelvin = celsius + 273
print(f'A temperatura {celsius}°C em Fahrenheit é {fahrenheit}°F')
print(f'E em Kevin fica {Kelvin} K')
| """
Escreva um programa que converta uma temperatura,
digitando em graus Celsius e converta para graus Fahrenheit.
"""
celsius = int(input('Digite a temperatura: '))
fahrenheit = (celsius / 5) * 9 + 32
Kelvin = celsius + 273
print(f'A temperatura {celsius}°C em Fahrenheit é {fahrenheit}°F')
print(f'E em Kevin fica {Kelvin} K')
| pt | 0.211639 | 4.224044 | 4 |
opensecrets/__init__.py | ndanielsen/py-opensecrets | 1 | 14906 | <reponame>ndanielsen/py-opensecrets<filename>opensecrets/__init__.py
from .crpapi import CRP
| from .crpapi import CRP | none | 1 | 1.096217 | 1 |
fumblr/services/imgur.py | jonoco/fumblr | 2 | 14907 | <gh_stars>1-10
from fumblr.keys import IMGUR_SECRET, IMGUR_ID
from imgurpython import ImgurClient, helpers
import os
import base64
API_URL = 'https://api.imgur.com/3/'
def get_client():
"""
Get an API client for Imgur
Returns:
Imgur client if it is available
"""
try:
return ImgurClient(IMGUR_ID, IMGUR_SECRET)
except helpers.error.ImgurClientError:
print(f'Error: imgur client error - id: {IMGUR_ID} secret: {IMGUR_SECRET}')
def delete_image(deletehash):
"""
Delete image from Imgur with given deletehash
Args:
deletehash: Hash id of image to delete
Returns:
Response from Imgur of image deletion if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.delete_image(deletehash)
except:
return False
def upload_image(path):
"""
Upload image at system path to Imgur
Example of response data from Imgur upload:
{'size': 3527,
'title': None,
'animated': False,
'deletehash': 'YkK79ucEtDDn1b9',
'views': 0,
'width': 187,
'account_url': None,
'in_gallery': False,
'name': '',
'section': None,
'account_id': 0,
'type': 'image/png',
'datetime': 1473926225,
'description': None,
'height': 242,
'bandwidth': 0,
'id': 'AEvnA7h',
'favorite': False,
'nsfw': None,
'link': 'http://i.imgur.com/AEvnA7h.png',
'is_ad': False,
'vote': None}
Args:
path: System path of image
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_path = os.path.abspath(path)
upload = client.upload_from_path(image_path)
return upload
def upload(image):
"""
Upload image to Imgur from file
Args:
image: File object
Returns:
Imgur response object
"""
client = get_client()
if client:
contents = image.read()
b64 = base64.b64encode(contents)
data = {
'image': b64,
'type': 'base64'
}
return client.make_request('POST', 'upload', data, True)
def upload_from_url(url):
"""
Upload image to Imgur from url
Args:
url: URL of image
Returns:
Imgur Response object if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.upload_from_url(url)
except helpers.error.ImgurClientError:
print('Error: imgur client error')
return False
def get_image(id):
"""
Return image data for image with given id
Args:
id: Imgur image id
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_data = client.get_image(id)
return image_data
| from fumblr.keys import IMGUR_SECRET, IMGUR_ID
from imgurpython import ImgurClient, helpers
import os
import base64
API_URL = 'https://api.imgur.com/3/'
def get_client():
"""
Get an API client for Imgur
Returns:
Imgur client if it is available
"""
try:
return ImgurClient(IMGUR_ID, IMGUR_SECRET)
except helpers.error.ImgurClientError:
print(f'Error: imgur client error - id: {IMGUR_ID} secret: {IMGUR_SECRET}')
def delete_image(deletehash):
"""
Delete image from Imgur with given deletehash
Args:
deletehash: Hash id of image to delete
Returns:
Response from Imgur of image deletion if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.delete_image(deletehash)
except:
return False
def upload_image(path):
"""
Upload image at system path to Imgur
Example of response data from Imgur upload:
{'size': 3527,
'title': None,
'animated': False,
'deletehash': 'YkK79ucEtDDn1b9',
'views': 0,
'width': 187,
'account_url': None,
'in_gallery': False,
'name': '',
'section': None,
'account_id': 0,
'type': 'image/png',
'datetime': 1473926225,
'description': None,
'height': 242,
'bandwidth': 0,
'id': 'AEvnA7h',
'favorite': False,
'nsfw': None,
'link': 'http://i.imgur.com/AEvnA7h.png',
'is_ad': False,
'vote': None}
Args:
path: System path of image
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_path = os.path.abspath(path)
upload = client.upload_from_path(image_path)
return upload
def upload(image):
"""
Upload image to Imgur from file
Args:
image: File object
Returns:
Imgur response object
"""
client = get_client()
if client:
contents = image.read()
b64 = base64.b64encode(contents)
data = {
'image': b64,
'type': 'base64'
}
return client.make_request('POST', 'upload', data, True)
def upload_from_url(url):
"""
Upload image to Imgur from url
Args:
url: URL of image
Returns:
Imgur Response object if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.upload_from_url(url)
except helpers.error.ImgurClientError:
print('Error: imgur client error')
return False
def get_image(id):
"""
Return image data for image with given id
Args:
id: Imgur image id
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_data = client.get_image(id)
return image_data | pt | 0.10028 | 2.798728 | 3 |
source/src/molecular-unfolding/lambda/AthenaTableLambda/app.py | awslabs/quantum-ready-solution-for-drug-discovery | 10 | 14908 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import boto3
import botocore
import os
import logging
import time
import json
import datetime
log = logging.getLogger()
log.setLevel('INFO')
bucket = os.environ['BUCKET']
region = os.environ['AWS_REGION']
solution_version = os.environ.get('SOLUTION_VERSION', 'v1.0.0')
solution_id = os.environ.get('SOLUTION_ID')
user_agent_config = {
'user_agent_extra': f'AwsSolution/{solution_id}/{solution_version}',
'region_name': region
}
default_config = botocore.config.Config(**user_agent_config)
athena_client = boto3.client('athena', config=default_config)
def handler(event, context):
s3_prefix = event['s3_prefix']
table_prefix = event["stackName"]
log.info(f"table_prefix: {table_prefix}, s3_prefix: {s3_prefix}")
table_name = f"{table_prefix}_qc_batch_evaluation_metrics_hist"
view_name = f"{table_prefix}_qc_batch_evaluation_metrics"
ATHENA_OUTPUT_LOCATION = f"s3://{bucket}/{s3_prefix}/athena-out/"
location = f"s3://{bucket}/{s3_prefix}/batch_evaluation_metrics/"
createDBSql = "CREATE DATABASE IF NOT EXISTS qc_db"
dropTableSql = f"DROP TABLE IF EXISTS qc_db.{table_name}"
createTableSql = f'''
CREATE EXTERNAL TABLE IF NOT EXISTS qc_db.{table_name} (
Execution_Id string,
Compute_Type string,
Resolver string,
Complexity integer,
End_To_End_Time float,
Running_Time float,
Time_Info string,
Start_Time string,
Experiment_Name string,
Task_Id string,
Model_Name string,
Model_FileName string,
Scenario string,
Resource string,
Model_Param string,
Opt_Param string,
Create_Time string,
Result_Detail string,
Result_Location string
) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' LINES TERMINATED BY '\\n' LOCATION '{location}'
'''
createViewSql = f"CREATE OR REPLACE VIEW qc_db.{view_name} AS SELECT h1.* FROM qc_db.{table_name} h1, (SELECT DISTINCT Execution_Id, Start_Time FROM qc_db.{table_name} ORDER BY Start_Time DESC LIMIT 20) h2 WHERE (h1.Execution_Id = h2.Execution_Id)" #nosec B608
querySql = f"SELECT * FROM qc_db.{view_name}" #nosec B608
sqlStmSeq = [createDBSql, dropTableSql, createTableSql, createViewSql, querySql]
for sqlStm in sqlStmSeq:
log.info(sqlStm)
response = athena_client.start_query_execution(
QueryString=sqlStm,
ResultConfiguration={
'OutputLocation': ATHENA_OUTPUT_LOCATION
}
)
execution_id = response['QueryExecutionId']
wait_for_complete(execution_id)
log.info("all done")
return {
'queryResult': ATHENA_OUTPUT_LOCATION,
'endTime': datetime.datetime.utcnow().isoformat()
}
def wait_for_complete(execution_id):
log.info("execution_id:{}".format(execution_id))
response = athena_client.get_query_execution(
QueryExecutionId=execution_id
)
while True:
status = response['QueryExecution']['Status']
log.info("State: {}".format(status['State']))
if status['State'] == 'SUCCEEDED':
return status
elif status['State'] in ['QUEUED', 'RUNNING']:
time.sleep(3)
response = athena_client.get_query_execution(
QueryExecutionId=execution_id
)
else:
log.error(json.dumps(response, default=str))
raise Exception(json.dumps(response, default=str))
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import boto3
import botocore
import os
import logging
import time
import json
import datetime
log = logging.getLogger()
log.setLevel('INFO')
bucket = os.environ['BUCKET']
region = os.environ['AWS_REGION']
solution_version = os.environ.get('SOLUTION_VERSION', 'v1.0.0')
solution_id = os.environ.get('SOLUTION_ID')
user_agent_config = {
'user_agent_extra': f'AwsSolution/{solution_id}/{solution_version}',
'region_name': region
}
default_config = botocore.config.Config(**user_agent_config)
athena_client = boto3.client('athena', config=default_config)
def handler(event, context):
s3_prefix = event['s3_prefix']
table_prefix = event["stackName"]
log.info(f"table_prefix: {table_prefix}, s3_prefix: {s3_prefix}")
table_name = f"{table_prefix}_qc_batch_evaluation_metrics_hist"
view_name = f"{table_prefix}_qc_batch_evaluation_metrics"
ATHENA_OUTPUT_LOCATION = f"s3://{bucket}/{s3_prefix}/athena-out/"
location = f"s3://{bucket}/{s3_prefix}/batch_evaluation_metrics/"
createDBSql = "CREATE DATABASE IF NOT EXISTS qc_db"
dropTableSql = f"DROP TABLE IF EXISTS qc_db.{table_name}"
createTableSql = f'''
CREATE EXTERNAL TABLE IF NOT EXISTS qc_db.{table_name} (
Execution_Id string,
Compute_Type string,
Resolver string,
Complexity integer,
End_To_End_Time float,
Running_Time float,
Time_Info string,
Start_Time string,
Experiment_Name string,
Task_Id string,
Model_Name string,
Model_FileName string,
Scenario string,
Resource string,
Model_Param string,
Opt_Param string,
Create_Time string,
Result_Detail string,
Result_Location string
) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' LINES TERMINATED BY '\\n' LOCATION '{location}'
'''
createViewSql = f"CREATE OR REPLACE VIEW qc_db.{view_name} AS SELECT h1.* FROM qc_db.{table_name} h1, (SELECT DISTINCT Execution_Id, Start_Time FROM qc_db.{table_name} ORDER BY Start_Time DESC LIMIT 20) h2 WHERE (h1.Execution_Id = h2.Execution_Id)" #nosec B608
querySql = f"SELECT * FROM qc_db.{view_name}" #nosec B608
sqlStmSeq = [createDBSql, dropTableSql, createTableSql, createViewSql, querySql]
for sqlStm in sqlStmSeq:
log.info(sqlStm)
response = athena_client.start_query_execution(
QueryString=sqlStm,
ResultConfiguration={
'OutputLocation': ATHENA_OUTPUT_LOCATION
}
)
execution_id = response['QueryExecutionId']
wait_for_complete(execution_id)
log.info("all done")
return {
'queryResult': ATHENA_OUTPUT_LOCATION,
'endTime': datetime.datetime.utcnow().isoformat()
}
def wait_for_complete(execution_id):
log.info("execution_id:{}".format(execution_id))
response = athena_client.get_query_execution(
QueryExecutionId=execution_id
)
while True:
status = response['QueryExecution']['Status']
log.info("State: {}".format(status['State']))
if status['State'] == 'SUCCEEDED':
return status
elif status['State'] in ['QUEUED', 'RUNNING']:
time.sleep(3)
response = athena_client.get_query_execution(
QueryExecutionId=execution_id
)
else:
log.error(json.dumps(response, default=str))
raise Exception(json.dumps(response, default=str))
| pt | 0.181007 | 1.765784 | 2 |
osf/management/commands/populate_custom_taxonomies.py | gaybro8777/osf.io | 628 | 14909 | import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject
from osf.models.provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
def validate_input(custom_provider, data, provider_type='osf.preprintprovider', copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
logger.info('Validating data')
includes = data.get('include', [])
excludes = data.get('exclude', [])
customs = data.get('custom', {})
merges = data.get('merge', {})
if copy:
included_subjects = rules_to_subjects(custom_provider.subjects_acceptable)
else:
assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes'
for text in includes:
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text)
included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children()
logger.info('Successfully validated `include`')
for text in excludes:
try:
Subject.objects.get(provider=BEPRESS_PROVIDER, text=text)
except Subject.DoesNotExist:
raise RuntimeError('Unable to find excluded subject with text {}'.format(text))
assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text)
included_subjects = included_subjects.exclude(text__in=excludes)
logger.info('Successfully validated `exclude`')
for cust_name, map_dict in customs.items():
assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name)
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))
if map_dict.get('parent'): # Null parent possible
assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])
# TODO: hierarchy length validation? Probably more trouble than worth here, done on .save
logger.info('Successfully validated `custom`')
included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()])
for merged_from, merged_into in merges.items():
assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from)
assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into)
included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys())
missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in Preprint.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True))
if not add_missing:
assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True)))
if isinstance(custom_provider, PreprintProvider):
assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.'
logger.info('Successfully validated mapping completeness')
return list(missing_subjects) if add_missing else None
def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None):
logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id))
bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text)
custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider)
custom_subj.save()
# This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded.
# It could also be problematic if they didn't, if any of those children are used by existing preprints.
# TODO: Determine correct resolution
for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True):
create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj)
def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None):
tries = 0
subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable))
if missing and add_missing:
subjects_to_copy = subjects_to_copy + missing
while len(subjects_to_copy):
previous_len = len(subjects_to_copy)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))
for subj in list(subjects_to_copy):
if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text):
subjects_to_copy.remove(subj)
elif add_missing and subj.parent and subj.parent not in subjects_to_copy:
# Dirty
subjects_to_copy.append(subj.parent)
previous_len += 1
else:
logger.warn('Failed. Retrying next iteration')
new_len = len(subjects_to_copy)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))
def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None):
if copy:
create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing)
else:
for root_text in includes:
create_subjects_recursive(custom_provider, root_text, excludes)
def map_custom_subject(custom_provider, name, parent, mapping):
logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent'))
if parent:
parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first()
else:
parent_subject = None
bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping)
if parent and not parent_subject:
return False
custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject)
custom_subject.save()
return True
def do_custom_mapping(custom_provider, customs):
tries = 0
unmapped_customs = customs
while len(unmapped_customs):
previous_len = len(unmapped_customs)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')
successes = []
for cust_name, map_dict in unmapped_customs.items():
if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')):
successes.append(cust_name)
else:
logger.warn('Failed. Retrying next iteration')
[unmapped_customs.pop(key) for key in successes]
new_len = len(unmapped_customs)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input')
def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False):
for preprint in Preprint.objects.filter(provider=custom_provider):
logger.info('Preparing to migrate preprint {}'.format(preprint.id))
old_hier = preprint.subject_hierarchy
subjects_to_map = [hier[-1] for hier in old_hier]
merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True))
subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys())
aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids
aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)]
old_subjects = list(preprint.subjects.values_list('id', flat=True))
preprint.subjects.clear()
for hier in aliased_hiers:
validate_subject_hierarchy([s._id for s in hier])
for s in hier:
preprint.subjects.add(s)
# Update preprint in SHARE
if not dry_run:
on_preprint_updated(preprint._id, old_subjects=old_subjects)
preprint.reload()
new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())]
logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier))
def migrate(provider=None, provider_type='osf.preprintprovider', share_title=None, data=None, dry_run=False, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
if not BEPRESS_PROVIDER:
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
custom_provider = AbstractProvider.objects.filter(_id=provider, type=provider_type).first()
assert custom_provider, 'Unable to find specified provider: {}'.format(provider)
assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider'
assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy'
if isinstance(custom_provider, PreprintProvider) and custom_provider.share_title in [None, '', 'bepress']:
if not share_title:
raise RuntimeError('`--share-title` is required if not already set on the provider')
custom_provider.share_title = share_title
custom_provider.save()
missing = validate_input(custom_provider, data, provider_type=provider_type, copy=copy, add_missing=add_missing)
do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing)
do_custom_mapping(custom_provider, data.get('custom', {}))
map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--data',
action='store',
dest='data',
help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'
'\n"exclude": [<list of children to exclude from included trees>],'
'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'
'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}',
)
parser.add_argument(
'--provider',
action='store',
dest='provider',
required=True,
help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.'
)
parser.add_argument(
'--from-subjects-acceptable',
action='store_true',
dest='from_subjects_acceptable',
help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used'
)
parser.add_argument(
'--add-missing',
action='store_true',
dest='add_missing',
help='Adds "used-but-not-included" subjects.'
)
parser.add_argument(
'--share-title',
action='store',
type=str,
dest='share_title',
help='Sets <provider>.share_title. Ignored if already set on provider, required if not.'
)
parser.add_argument(
'--type',
action='store',
type=str,
dest='provider_type',
help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]'
)
def handle(self, *args, **options):
global BEPRESS_PROVIDER
provider_type = options.get('provider_type') or 'osf.preprintprovider'
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
dry_run = options.get('dry_run')
provider = options['provider']
data = json.loads(options['data'] or '{}')
share_title = options.get('share_title')
copy = options.get('from_subjects_acceptable')
add_missing = options.get('add_missing')
if copy:
data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True))
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
migrate(provider=provider, share_title=share_title, provider_type=provider_type, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
| import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject
from osf.models.provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
def validate_input(custom_provider, data, provider_type='osf.preprintprovider', copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
logger.info('Validating data')
includes = data.get('include', [])
excludes = data.get('exclude', [])
customs = data.get('custom', {})
merges = data.get('merge', {})
if copy:
included_subjects = rules_to_subjects(custom_provider.subjects_acceptable)
else:
assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes'
for text in includes:
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text)
included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children()
logger.info('Successfully validated `include`')
for text in excludes:
try:
Subject.objects.get(provider=BEPRESS_PROVIDER, text=text)
except Subject.DoesNotExist:
raise RuntimeError('Unable to find excluded subject with text {}'.format(text))
assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text)
included_subjects = included_subjects.exclude(text__in=excludes)
logger.info('Successfully validated `exclude`')
for cust_name, map_dict in customs.items():
assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name)
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))
if map_dict.get('parent'): # Null parent possible
assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])
# TODO: hierarchy length validation? Probably more trouble than worth here, done on .save
logger.info('Successfully validated `custom`')
included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()])
for merged_from, merged_into in merges.items():
assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from)
assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into)
included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys())
missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in Preprint.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True))
if not add_missing:
assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True)))
if isinstance(custom_provider, PreprintProvider):
assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.'
logger.info('Successfully validated mapping completeness')
return list(missing_subjects) if add_missing else None
def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None):
logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id))
bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text)
custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider)
custom_subj.save()
# This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded.
# It could also be problematic if they didn't, if any of those children are used by existing preprints.
# TODO: Determine correct resolution
for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True):
create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj)
def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None):
tries = 0
subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable))
if missing and add_missing:
subjects_to_copy = subjects_to_copy + missing
while len(subjects_to_copy):
previous_len = len(subjects_to_copy)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))
for subj in list(subjects_to_copy):
if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text):
subjects_to_copy.remove(subj)
elif add_missing and subj.parent and subj.parent not in subjects_to_copy:
# Dirty
subjects_to_copy.append(subj.parent)
previous_len += 1
else:
logger.warn('Failed. Retrying next iteration')
new_len = len(subjects_to_copy)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))
def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None):
if copy:
create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing)
else:
for root_text in includes:
create_subjects_recursive(custom_provider, root_text, excludes)
def map_custom_subject(custom_provider, name, parent, mapping):
logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent'))
if parent:
parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first()
else:
parent_subject = None
bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping)
if parent and not parent_subject:
return False
custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject)
custom_subject.save()
return True
def do_custom_mapping(custom_provider, customs):
tries = 0
unmapped_customs = customs
while len(unmapped_customs):
previous_len = len(unmapped_customs)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')
successes = []
for cust_name, map_dict in unmapped_customs.items():
if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')):
successes.append(cust_name)
else:
logger.warn('Failed. Retrying next iteration')
[unmapped_customs.pop(key) for key in successes]
new_len = len(unmapped_customs)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input')
def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False):
for preprint in Preprint.objects.filter(provider=custom_provider):
logger.info('Preparing to migrate preprint {}'.format(preprint.id))
old_hier = preprint.subject_hierarchy
subjects_to_map = [hier[-1] for hier in old_hier]
merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True))
subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys())
aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids
aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)]
old_subjects = list(preprint.subjects.values_list('id', flat=True))
preprint.subjects.clear()
for hier in aliased_hiers:
validate_subject_hierarchy([s._id for s in hier])
for s in hier:
preprint.subjects.add(s)
# Update preprint in SHARE
if not dry_run:
on_preprint_updated(preprint._id, old_subjects=old_subjects)
preprint.reload()
new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())]
logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier))
def migrate(provider=None, provider_type='osf.preprintprovider', share_title=None, data=None, dry_run=False, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
if not BEPRESS_PROVIDER:
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
custom_provider = AbstractProvider.objects.filter(_id=provider, type=provider_type).first()
assert custom_provider, 'Unable to find specified provider: {}'.format(provider)
assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider'
assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy'
if isinstance(custom_provider, PreprintProvider) and custom_provider.share_title in [None, '', 'bepress']:
if not share_title:
raise RuntimeError('`--share-title` is required if not already set on the provider')
custom_provider.share_title = share_title
custom_provider.save()
missing = validate_input(custom_provider, data, provider_type=provider_type, copy=copy, add_missing=add_missing)
do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing)
do_custom_mapping(custom_provider, data.get('custom', {}))
map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--data',
action='store',
dest='data',
help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'
'\n"exclude": [<list of children to exclude from included trees>],'
'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'
'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}',
)
parser.add_argument(
'--provider',
action='store',
dest='provider',
required=True,
help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.'
)
parser.add_argument(
'--from-subjects-acceptable',
action='store_true',
dest='from_subjects_acceptable',
help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used'
)
parser.add_argument(
'--add-missing',
action='store_true',
dest='add_missing',
help='Adds "used-but-not-included" subjects.'
)
parser.add_argument(
'--share-title',
action='store',
type=str,
dest='share_title',
help='Sets <provider>.share_title. Ignored if already set on provider, required if not.'
)
parser.add_argument(
'--type',
action='store',
type=str,
dest='provider_type',
help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]'
)
def handle(self, *args, **options):
global BEPRESS_PROVIDER
provider_type = options.get('provider_type') or 'osf.preprintprovider'
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
dry_run = options.get('dry_run')
provider = options['provider']
data = json.loads(options['data'] or '{}')
share_title = options.get('share_title')
copy = options.get('from_subjects_acceptable')
add_missing = options.get('add_missing')
if copy:
data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True))
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
migrate(provider=provider, share_title=share_title, provider_type=provider_type, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
| pt | 0.180776 | 1.922264 | 2 |
examples/animated_rsh.py | sophiaas/e3nn | 1 | 14910 | <gh_stars>1-10
# pylint: disable=not-callable, no-member, invalid-name, missing-docstring, line-too-long
import math
import os
import subprocess
import argparse
import shutil
import tqdm
import plotly.graph_objs as go
import torch
from e3nn import o3, rsh
def rsh_surface(l, m, scale, tr, rot):
n = 50
a = torch.linspace(0, 2 * math.pi, 2 * n)
b = torch.linspace(0, math.pi, n)
a, b = torch.meshgrid(a, b)
f = rsh.spherical_harmonics_alpha_beta([l], a, b)
f = torch.einsum('ij,...j->...i', o3.irr_repr(l, *rot), f)
f = f[..., l + m]
r = o3.angles_to_xyz(a, b)
x, y, z = r[:, :, 0], r[:, :, 1], r[:, :, 2]
r = f.abs()
x = scale * r * x + tr[0]
y = scale * r * y + tr[1]
z = scale * r * z + tr[2]
max_value = 0.5
return go.Surface(
x=x.numpy(),
y=y.numpy(),
z=z.numpy(),
surfacecolor=f.numpy(),
showscale=False,
cmin=-max_value,
cmax=max_value,
colorscale=[[0, 'rgb(0,50,255)'], [0.5, 'rgb(200,200,200)'], [1, 'rgb(255,50,0)']],
)
def main(lmax, resolution, steps):
scale = 0.5 * math.sqrt(4 * math.pi) / math.sqrt(2 * lmax + 1)
axis = dict(
showbackground=False,
showticklabels=False,
showgrid=False,
zeroline=False,
title='',
nticks=3,
range=[-lmax / 2 - 0.5, lmax / 2 + 0.5]
)
layout = dict(
width=resolution,
height=resolution,
scene=dict(
xaxis=axis,
yaxis=axis,
zaxis=axis,
aspectmode='manual',
aspectratio=dict(x=1, y=1, z=1),
camera=dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=0, y=-1.3, z=0),
projection=dict(type='perspective'),
),
),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin=dict(l=0, r=0, t=0, b=0)
)
if os.path.exists('sh'):
shutil.rmtree('sh')
os.makedirs('sh')
for i in tqdm.tqdm(range(steps)):
rot = 2 * math.pi * i / steps
a, b, c = 0, math.pi / 4, 0
abc = o3.compose(-c, -b, -a, *o3.compose(0, 0, rot, a, b, c))
surfaces = [
rsh_surface(l, m, scale, [l + (m if m < 0 else 0) - lmax / 2, 0, lmax / 2 - l + (m if m > 0 else 0)], abc)
for l in range(lmax + 1)
for m in range(-l, l + 1)
]
fig = go.Figure(surfaces, layout=layout)
fig.write_image('sh/{:03d}.png'.format(i))
subprocess.check_output(["convert", "-delay", "3", "-loop", "0", "-dispose", "2", "sh/*.png", "output.gif"])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--lmax", type=int, default=2)
parser.add_argument("--resolution", type=int, default=500)
parser.add_argument("--steps", type=int, default=30)
args = parser.parse_args()
main(args.lmax, args.resolution, args.steps)
| # pylint: disable=not-callable, no-member, invalid-name, missing-docstring, line-too-long
import math
import os
import subprocess
import argparse
import shutil
import tqdm
import plotly.graph_objs as go
import torch
from e3nn import o3, rsh
def rsh_surface(l, m, scale, tr, rot):
n = 50
a = torch.linspace(0, 2 * math.pi, 2 * n)
b = torch.linspace(0, math.pi, n)
a, b = torch.meshgrid(a, b)
f = rsh.spherical_harmonics_alpha_beta([l], a, b)
f = torch.einsum('ij,...j->...i', o3.irr_repr(l, *rot), f)
f = f[..., l + m]
r = o3.angles_to_xyz(a, b)
x, y, z = r[:, :, 0], r[:, :, 1], r[:, :, 2]
r = f.abs()
x = scale * r * x + tr[0]
y = scale * r * y + tr[1]
z = scale * r * z + tr[2]
max_value = 0.5
return go.Surface(
x=x.numpy(),
y=y.numpy(),
z=z.numpy(),
surfacecolor=f.numpy(),
showscale=False,
cmin=-max_value,
cmax=max_value,
colorscale=[[0, 'rgb(0,50,255)'], [0.5, 'rgb(200,200,200)'], [1, 'rgb(255,50,0)']],
)
def main(lmax, resolution, steps):
scale = 0.5 * math.sqrt(4 * math.pi) / math.sqrt(2 * lmax + 1)
axis = dict(
showbackground=False,
showticklabels=False,
showgrid=False,
zeroline=False,
title='',
nticks=3,
range=[-lmax / 2 - 0.5, lmax / 2 + 0.5]
)
layout = dict(
width=resolution,
height=resolution,
scene=dict(
xaxis=axis,
yaxis=axis,
zaxis=axis,
aspectmode='manual',
aspectratio=dict(x=1, y=1, z=1),
camera=dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=0, y=-1.3, z=0),
projection=dict(type='perspective'),
),
),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin=dict(l=0, r=0, t=0, b=0)
)
if os.path.exists('sh'):
shutil.rmtree('sh')
os.makedirs('sh')
for i in tqdm.tqdm(range(steps)):
rot = 2 * math.pi * i / steps
a, b, c = 0, math.pi / 4, 0
abc = o3.compose(-c, -b, -a, *o3.compose(0, 0, rot, a, b, c))
surfaces = [
rsh_surface(l, m, scale, [l + (m if m < 0 else 0) - lmax / 2, 0, lmax / 2 - l + (m if m > 0 else 0)], abc)
for l in range(lmax + 1)
for m in range(-l, l + 1)
]
fig = go.Figure(surfaces, layout=layout)
fig.write_image('sh/{:03d}.png'.format(i))
subprocess.check_output(["convert", "-delay", "3", "-loop", "0", "-dispose", "2", "sh/*.png", "output.gif"])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--lmax", type=int, default=2)
parser.add_argument("--resolution", type=int, default=500)
parser.add_argument("--steps", type=int, default=30)
args = parser.parse_args()
main(args.lmax, args.resolution, args.steps) | it | 0.136778 | 2.135426 | 2 |
errores.py | fbzavaleta/DS_Software_Stack | 0 | 14911 | #
E_LEN = "No es posible operar vectores de diferente módulo"
| #
E_LEN = "No es posible operar vectores de diferente módulo"
| en | 0.426104 | 1.090533 | 1 |
dataset.py | gzaraunitn/TA3N | 0 | 14912 | import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
import random
from os import listdir
from os.path import join, splitext
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from PIL import Image, ImageFilter, ImageFile
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
init(autoreset=True)
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_dataload,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.t7', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_dataload = num_dataload
if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list() # read all the video files
def _load_feature(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
feat_path = os.path.join(directory, self.image_tmpl.format(idx))
try:
feat = [torch.load(feat_path)]
except:
print(Back.RED + feat_path)
return feat
elif self.modality == 'Flow':
x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))
y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))
return [x_feat, y_feat]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
# repeat the list if the length is less than num_dataload (especially for target data)
n_repeat = self.num_dataload//len(self.video_list)
n_left = self.num_dataload%len(self.video_list)
self.video_list = self.video_list*n_repeat + self.video_list[:n_left]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
#np.random.seed(1)
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1]
offsets = np.append(id_select, id_expand)
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
frames = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_feats = self._load_feature(record.path, p)
frames.extend(seg_feats)
if p < record.num_frames:
p += 1
# process_data = self.transform(frames)
process_data = torch.stack(frames)
return process_data, record.label
def __len__(self):
return len(self.video_list)
class VideoDataset(data.Dataset):
def __init__(
self,
folder,
n_frames,
frame_size=224,
separator="_"
):
self.folder = folder
self.num_segments = n_frames
self.frame_size = frame_size
self.data_transform = transforms.Compose(
[
transforms.Resize(self.frame_size),
transforms.CenterCrop(self.frame_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
self.separator = separator
self.classes = [c for c in sorted(listdir(folder))]
self.videos_with_classes = []
for c_index, c in enumerate(self.classes):
c_path = join(self.folder, c)
videos = listdir(c_path)
for v in videos:
v_path = join(c_path, v)
num_frames = len(listdir(v_path))
if num_frames >= self.num_segments:
pair = (v_path, c_index)
self.videos_with_classes.append(pair)
def _get_test_indices(self, num_frames):
num_min = self.num_segments
num_select = num_frames
if num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]
) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = (
np.ones(self.num_segments - num_select, dtype=int)
* id_select[id_select[0] - 1]
)
offsets = np.append(id_select, id_expand)
return offsets
def __getitem__(self, index):
video, label = self.videos_with_classes[index]
frames_temp = sorted(
listdir(video),
key=lambda path: int(path.split(self.separator)[-1].split(".")[0]),
)
frames = [f for f in frames_temp if f.endswith('jpg') or f.endswith('jpeg')]
num_frames = len(frames)
data = []
segment_indices = self._get_test_indices(num_frames)
for index in segment_indices:
frame = frames[index]
frame_path = join(video, frame)
frame_img = Image.open(frame_path)
frame_feat = self.data_transform(frame_img)
data.append(frame_feat)
tensor = torch.stack(data)
return tensor, label
def __len__(self):
return len(self.videos_with_classes) | import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
import random
from os import listdir
from os.path import join, splitext
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from PIL import Image, ImageFilter, ImageFile
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
init(autoreset=True)
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_dataload,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.t7', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_dataload = num_dataload
if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list() # read all the video files
def _load_feature(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
feat_path = os.path.join(directory, self.image_tmpl.format(idx))
try:
feat = [torch.load(feat_path)]
except:
print(Back.RED + feat_path)
return feat
elif self.modality == 'Flow':
x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))
y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))
return [x_feat, y_feat]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
# repeat the list if the length is less than num_dataload (especially for target data)
n_repeat = self.num_dataload//len(self.video_list)
n_left = self.num_dataload%len(self.video_list)
self.video_list = self.video_list*n_repeat + self.video_list[:n_left]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
#np.random.seed(1)
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1]
offsets = np.append(id_select, id_expand)
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
frames = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_feats = self._load_feature(record.path, p)
frames.extend(seg_feats)
if p < record.num_frames:
p += 1
# process_data = self.transform(frames)
process_data = torch.stack(frames)
return process_data, record.label
def __len__(self):
return len(self.video_list)
class VideoDataset(data.Dataset):
def __init__(
self,
folder,
n_frames,
frame_size=224,
separator="_"
):
self.folder = folder
self.num_segments = n_frames
self.frame_size = frame_size
self.data_transform = transforms.Compose(
[
transforms.Resize(self.frame_size),
transforms.CenterCrop(self.frame_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
self.separator = separator
self.classes = [c for c in sorted(listdir(folder))]
self.videos_with_classes = []
for c_index, c in enumerate(self.classes):
c_path = join(self.folder, c)
videos = listdir(c_path)
for v in videos:
v_path = join(c_path, v)
num_frames = len(listdir(v_path))
if num_frames >= self.num_segments:
pair = (v_path, c_index)
self.videos_with_classes.append(pair)
def _get_test_indices(self, num_frames):
num_min = self.num_segments
num_select = num_frames
if num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]
) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = (
np.ones(self.num_segments - num_select, dtype=int)
* id_select[id_select[0] - 1]
)
offsets = np.append(id_select, id_expand)
return offsets
def __getitem__(self, index):
video, label = self.videos_with_classes[index]
frames_temp = sorted(
listdir(video),
key=lambda path: int(path.split(self.separator)[-1].split(".")[0]),
)
frames = [f for f in frames_temp if f.endswith('jpg') or f.endswith('jpeg')]
num_frames = len(frames)
data = []
segment_indices = self._get_test_indices(num_frames)
for index in segment_indices:
frame = frames[index]
frame_path = join(video, frame)
frame_img = Image.open(frame_path)
frame_feat = self.data_transform(frame_img)
data.append(frame_feat)
tensor = torch.stack(data)
return tensor, label
def __len__(self):
return len(self.videos_with_classes) | pt | 0.192342 | 2.125883 | 2 |
edm_web1/middleware/errors.py | zhouli121018/nodejsgm | 0 | 14913 | # -*- coding: utf-8 -*-
from django.http import HttpResponseForbidden
from django.template import loader
from django.utils.translation import ugettext_lazy as _
# 普通用户
def _requred_forbid(msg):
t = loader.get_template('limit_ip.html')
content = t.render({'message': msg })
return HttpResponseForbidden(content)
_msg = _(u'请求太频繁,请等待30s后重试(Request too often)。')
limitip_requred_forbid = _requred_forbid(_msg)
| # -*- coding: utf-8 -*-
from django.http import HttpResponseForbidden
from django.template import loader
from django.utils.translation import ugettext_lazy as _
# 普通用户
def _requred_forbid(msg):
t = loader.get_template('limit_ip.html')
content = t.render({'message': msg })
return HttpResponseForbidden(content)
_msg = _(u'请求太频繁,请等待30s后重试(Request too often)。')
limitip_requred_forbid = _requred_forbid(_msg)
| zh | 0.46406 | 1.956479 | 2 |
data/windows/dr16/mask.py | dnidever/apogee | 5 | 14914 | <filename>data/windows/dr16/mask.py
from apogee.aspcap import aspcap
from apogee.aspcap import mask
els=aspcap.elems()
for el in els[0]: mask.mkmask(el,globalmask='mask_v02_aspcap.txt')
| <filename>data/windows/dr16/mask.py
from apogee.aspcap import aspcap
from apogee.aspcap import mask
els=aspcap.elems()
for el in els[0]: mask.mkmask(el,globalmask='mask_v02_aspcap.txt')
| none | 1 | 2.081232 | 2 |
dragonfly/opt/unittest_cp_random_multiobjective_optimiser.py | anonymous-submission000/mobo | 1 | 14915 | <reponame>anonymous-submission000/mobo
"""
Unit tests for Random CP optimiser on Cartesian product domains.
-- <EMAIL>
"""
# pylint: disable=invalid-name
# pylint: disable=abstract-class-little-used
import os
from . import random_multiobjective_optimiser
from ..exd.cp_domain_utils import get_raw_point_from_processed_point, \
load_config_file
from ..exd.experiment_caller import get_multifunction_caller_from_config
from ..exd.worker_manager import SyntheticWorkerManager
# Local imports
from ..test_data.multiobjective_hartmann.multiobjective_hartmann \
import objectives as moo_hartmann
from ..test_data.multiobjective_park.multiobjective_park \
import objectives as moo_park
from ..utils.base_test_class import BaseTestClass, execute_tests
from ..utils.reporters import get_reporter
class CPMultiObjectiveOptimiserBaseTestCase(object):
""" Base test class for optimisers on Cartesian product spaces. """
# pylint: disable=no-member
def setUp(self):
""" Set up. """
self.max_capital = 20
self._child_set_up()
self.worker_manager_1 = SyntheticWorkerManager(1, time_distro='const')
self.worker_manager_3 = SyntheticWorkerManager(3, time_distro='halfnormal')
file_dir = os.path.dirname(os.path.realpath(__file__))
test_data_pardir = os.path.dirname(file_dir)
self.opt_problems = [
(test_data_pardir + '/test_data/multiobjective_hartmann/config.json',
(moo_hartmann,)),
(test_data_pardir + '/test_data/multiobjective_park/config.json', (moo_park,)),
]
def _child_set_up(self):
""" Child set up. """
pass
@classmethod
def _child_instantiate_optimiser(cls, func_caller, worker_manager, options, reporter):
""" Instantiate the optimiser. """
raise NotImplementedError('Implement in a child class.')
@classmethod
def _run_optimiser(cls, raw_funcs, domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs):
""" Run the optimiser from given args. """
raise NotImplementedError('Implement in a child class.')
def test_instantiation(self):
""" Tests instantiation of the optimiser. """
self.report('Test instantiation of multi-objective optimiser.')
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing instantiation of optimiser for %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
config = load_config_file(dcf)
multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config)
optimiser = self._child_instantiate_optimiser(
multi_func_caller, self.worker_manager_3, options=None,
reporter=get_reporter('silent'))
self.report('Instantiated %s object.' % (type(optimiser)))
for attr in dir(optimiser):
if not attr.startswith('_'):
self.report('optimiser.%s = %s' % (attr, str(getattr(optimiser, attr))),
'test_result')
def _test_optimiser_results(self, raw_prob_funcs, pareto_vals, pareto_points,
history, dcf):
""" Tests optimiser results. """
config = load_config_file(dcf)
multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config)
raw_pareto_points = [get_raw_point_from_processed_point(pop, config.domain,
config.domain_orderings.index_ordering,
config.domain_orderings.dim_ordering)
for pop in pareto_points]
self.report('Pareto opt point [-1]: proc=%s, raw=%s.' % (pareto_points[-1],
raw_pareto_points[-1]))
saved_in_history = [key for key, _ in list(history.__dict__.items()) if not
key.startswith('__')]
self.report('Stored in history: %s.' % (saved_in_history), 'test_result')
assert len(history.curr_pareto_vals) == len(history.curr_pareto_points)
for val in pareto_vals:
assert len(val) == multi_func_caller.num_funcs
for pt in pareto_points:
assert len(pt) == config.domain.num_domains
self.report('Pareto optimal points: %s.' % (pareto_points))
self.report('Pareto optimal values: %s.' % (pareto_vals))
def test_optimisation_single(self):
""" Test optimisation with a single worker. """
self.report('')
self.report('Testing %s with one worker.' % (type(self)))
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 1 worker on %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_1.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_1, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
def test_optimisation_asynchronous(self):
""" Testing random optimiser with three asynchronous workers. """
self.report('')
self.report('Testing %s with three asynchronous workers.' % (type(self)))
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 3 asynchronous workers on %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_3.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_3, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
class CPRandomMultiObjectiveOptimiserTestCase(
CPMultiObjectiveOptimiserBaseTestCase, BaseTestClass):
""" Unit tests for random multi-objective optimisation. """
@classmethod
def _child_instantiate_optimiser(cls, multi_func_caller, worker_manager, options,
reporter):
""" Instantiate optimiser. """
return random_multiobjective_optimiser.CPRandomMultiObjectiveOptimiser(
multi_func_caller, worker_manager, options, reporter)
@classmethod
def _run_optimiser(cls, raw_prob_funcs, domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs):
""" Runs multi-objective optimiser. """
rmoo = random_multiobjective_optimiser
return rmoo.cp_random_multiobjective_optimisation_from_raw_args(raw_prob_funcs,
domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs)
if __name__ == '__main__':
execute_tests()
| """
Unit tests for Random CP optimiser on Cartesian product domains.
-- <EMAIL>
"""
# pylint: disable=invalid-name
# pylint: disable=abstract-class-little-used
import os
from . import random_multiobjective_optimiser
from ..exd.cp_domain_utils import get_raw_point_from_processed_point, \
load_config_file
from ..exd.experiment_caller import get_multifunction_caller_from_config
from ..exd.worker_manager import SyntheticWorkerManager
# Local imports
from ..test_data.multiobjective_hartmann.multiobjective_hartmann \
import objectives as moo_hartmann
from ..test_data.multiobjective_park.multiobjective_park \
import objectives as moo_park
from ..utils.base_test_class import BaseTestClass, execute_tests
from ..utils.reporters import get_reporter
class CPMultiObjectiveOptimiserBaseTestCase(object):
""" Base test class for optimisers on Cartesian product spaces. """
# pylint: disable=no-member
def setUp(self):
""" Set up. """
self.max_capital = 20
self._child_set_up()
self.worker_manager_1 = SyntheticWorkerManager(1, time_distro='const')
self.worker_manager_3 = SyntheticWorkerManager(3, time_distro='halfnormal')
file_dir = os.path.dirname(os.path.realpath(__file__))
test_data_pardir = os.path.dirname(file_dir)
self.opt_problems = [
(test_data_pardir + '/test_data/multiobjective_hartmann/config.json',
(moo_hartmann,)),
(test_data_pardir + '/test_data/multiobjective_park/config.json', (moo_park,)),
]
def _child_set_up(self):
""" Child set up. """
pass
@classmethod
def _child_instantiate_optimiser(cls, func_caller, worker_manager, options, reporter):
""" Instantiate the optimiser. """
raise NotImplementedError('Implement in a child class.')
@classmethod
def _run_optimiser(cls, raw_funcs, domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs):
""" Run the optimiser from given args. """
raise NotImplementedError('Implement in a child class.')
def test_instantiation(self):
""" Tests instantiation of the optimiser. """
self.report('Test instantiation of multi-objective optimiser.')
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing instantiation of optimiser for %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
config = load_config_file(dcf)
multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config)
optimiser = self._child_instantiate_optimiser(
multi_func_caller, self.worker_manager_3, options=None,
reporter=get_reporter('silent'))
self.report('Instantiated %s object.' % (type(optimiser)))
for attr in dir(optimiser):
if not attr.startswith('_'):
self.report('optimiser.%s = %s' % (attr, str(getattr(optimiser, attr))),
'test_result')
def _test_optimiser_results(self, raw_prob_funcs, pareto_vals, pareto_points,
history, dcf):
""" Tests optimiser results. """
config = load_config_file(dcf)
multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config)
raw_pareto_points = [get_raw_point_from_processed_point(pop, config.domain,
config.domain_orderings.index_ordering,
config.domain_orderings.dim_ordering)
for pop in pareto_points]
self.report('Pareto opt point [-1]: proc=%s, raw=%s.' % (pareto_points[-1],
raw_pareto_points[-1]))
saved_in_history = [key for key, _ in list(history.__dict__.items()) if not
key.startswith('__')]
self.report('Stored in history: %s.' % (saved_in_history), 'test_result')
assert len(history.curr_pareto_vals) == len(history.curr_pareto_points)
for val in pareto_vals:
assert len(val) == multi_func_caller.num_funcs
for pt in pareto_points:
assert len(pt) == config.domain.num_domains
self.report('Pareto optimal points: %s.' % (pareto_points))
self.report('Pareto optimal values: %s.' % (pareto_vals))
def test_optimisation_single(self):
""" Test optimisation with a single worker. """
self.report('')
self.report('Testing %s with one worker.' % (type(self)))
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 1 worker on %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_1.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_1, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
def test_optimisation_asynchronous(self):
""" Testing random optimiser with three asynchronous workers. """
self.report('')
self.report('Testing %s with three asynchronous workers.' % (type(self)))
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 3 asynchronous workers on %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_3.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_3, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
class CPRandomMultiObjectiveOptimiserTestCase(
CPMultiObjectiveOptimiserBaseTestCase, BaseTestClass):
""" Unit tests for random multi-objective optimisation. """
@classmethod
def _child_instantiate_optimiser(cls, multi_func_caller, worker_manager, options,
reporter):
""" Instantiate optimiser. """
return random_multiobjective_optimiser.CPRandomMultiObjectiveOptimiser(
multi_func_caller, worker_manager, options, reporter)
@classmethod
def _run_optimiser(cls, raw_prob_funcs, domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs):
""" Runs multi-objective optimiser. """
rmoo = random_multiobjective_optimiser
return rmoo.cp_random_multiobjective_optimisation_from_raw_args(raw_prob_funcs,
domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs)
if __name__ == '__main__':
execute_tests() | pt | 0.140272 | 2.179605 | 2 |
corehq/apps/app_manager/app_schemas/casedb_schema.py | dimagilg/commcare-hq | 1 | 14916 | <filename>corehq/apps/app_manager/app_schemas/casedb_schema.py
from corehq import toggles
from corehq.apps.app_manager.app_schemas.case_properties import (
ParentCasePropertyBuilder,
get_usercase_properties,
)
from corehq.apps.app_manager.const import USERCASE_TYPE
from corehq.apps.app_manager.util import is_usercase_in_use
from corehq.apps.data_dictionary.util import get_case_property_description_dict
def get_casedb_schema(form):
"""Get case database schema definition for vellum to display as an external data source.
This lists all case types and their properties for the given app.
"""
app = form.get_app()
base_case_type = form.get_module().case_type if form.requires_case() else None
builder = ParentCasePropertyBuilder.for_app(app, ['case_name'], include_parent_properties=False)
related = builder.get_parent_type_map(None)
map = builder.get_properties_by_case_type()
descriptions_dict = get_case_property_description_dict(app.domain)
if base_case_type:
# Generate hierarchy of case types, represented as a list of lists of strings:
# [[base_case_type], [parent_type1, parent_type2...], [grandparent_type1, grandparent_type2...]]
# Vellum case management only supports three levels
generation_names = ['case', 'parent', 'grandparent']
generations = [[] for g in generation_names]
def _add_ancestors(ctype, generation):
if generation < len(generation_names):
generations[generation].append(ctype)
for parent in related.get(ctype, {}).get('parent', []):
_add_ancestors(parent, generation + 1)
_add_ancestors(base_case_type, 0)
# Remove any duplicate types or empty generations
generations = [set(g) for g in generations if len(g)]
else:
generations = []
subsets = [{
"id": generation_names[i],
"name": "{} ({})".format(generation_names[i], " or ".join(ctypes)) if i > 0 else base_case_type,
"structure": {
p: {"description": descriptions_dict.get(t, {}).get(p, '')}
for t in ctypes for p in map[t]},
"related": {"parent": {
"hashtag": "#case/" + generation_names[i + 1],
"subset": generation_names[i + 1],
"key": "@case_id",
}} if i < len(generations) - 1 else None,
} for i, ctypes in enumerate(generations)]
if is_usercase_in_use(app.domain):
subsets.append({
"id": USERCASE_TYPE,
"name": "user",
"key": "@case_type",
"structure": {p: {} for p in get_usercase_properties(app)[USERCASE_TYPE]},
})
return {
"id": "casedb",
"uri": "jr://instance/casedb",
"name": "case",
"path": "/casedb/case",
"structure": {},
"subsets": subsets,
}
| <filename>corehq/apps/app_manager/app_schemas/casedb_schema.py
from corehq import toggles
from corehq.apps.app_manager.app_schemas.case_properties import (
ParentCasePropertyBuilder,
get_usercase_properties,
)
from corehq.apps.app_manager.const import USERCASE_TYPE
from corehq.apps.app_manager.util import is_usercase_in_use
from corehq.apps.data_dictionary.util import get_case_property_description_dict
def get_casedb_schema(form):
"""Get case database schema definition for vellum to display as an external data source.
This lists all case types and their properties for the given app.
"""
app = form.get_app()
base_case_type = form.get_module().case_type if form.requires_case() else None
builder = ParentCasePropertyBuilder.for_app(app, ['case_name'], include_parent_properties=False)
related = builder.get_parent_type_map(None)
map = builder.get_properties_by_case_type()
descriptions_dict = get_case_property_description_dict(app.domain)
if base_case_type:
# Generate hierarchy of case types, represented as a list of lists of strings:
# [[base_case_type], [parent_type1, parent_type2...], [grandparent_type1, grandparent_type2...]]
# Vellum case management only supports three levels
generation_names = ['case', 'parent', 'grandparent']
generations = [[] for g in generation_names]
def _add_ancestors(ctype, generation):
if generation < len(generation_names):
generations[generation].append(ctype)
for parent in related.get(ctype, {}).get('parent', []):
_add_ancestors(parent, generation + 1)
_add_ancestors(base_case_type, 0)
# Remove any duplicate types or empty generations
generations = [set(g) for g in generations if len(g)]
else:
generations = []
subsets = [{
"id": generation_names[i],
"name": "{} ({})".format(generation_names[i], " or ".join(ctypes)) if i > 0 else base_case_type,
"structure": {
p: {"description": descriptions_dict.get(t, {}).get(p, '')}
for t in ctypes for p in map[t]},
"related": {"parent": {
"hashtag": "#case/" + generation_names[i + 1],
"subset": generation_names[i + 1],
"key": "@case_id",
}} if i < len(generations) - 1 else None,
} for i, ctypes in enumerate(generations)]
if is_usercase_in_use(app.domain):
subsets.append({
"id": USERCASE_TYPE,
"name": "user",
"key": "@case_type",
"structure": {p: {} for p in get_usercase_properties(app)[USERCASE_TYPE]},
})
return {
"id": "casedb",
"uri": "jr://instance/casedb",
"name": "case",
"path": "/casedb/case",
"structure": {},
"subsets": subsets,
}
| pt | 0.154037 | 1.787946 | 2 |
python/testData/keywordCompletion/noMatchInCondition.py | 06needhamt/intellij-community | 0 | 14917 | <gh_stars>0
matches = True
if mat<caret> | matches = True
if mat<caret> | none | 1 | 1.189679 | 1 |
bookshelf/bookshelf/model_aerospike.py | fakeskimo/as2bt | 0 | 14918 | import math
import aerospike
from aerospike import predicates as p
from aerospike import exception as ex
from flask import current_app
aerospike_host = current_app.config['AEROSPIKE_HOST']
aerospike_port = current_app.config['AEROSPIKE_PORT']
namespace = current_app.config['AEROSPIKE_NAMESPACE']
set_name = current_app.config['AEROSPIKE_SET_NAME']
n_replicas = 1
config = {
'hosts': [
(aerospike_host, aerospike_port)
],
'policies': {
'timeout': 1000 # milliseconds
}
}
client = aerospike.client(config).connect()
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
def init_app(app):
pass
# if there is no more record, return -1 as next
def list(limit=10, cursor=None):
if cursor:
start = int(cursor)
else:
start = 0
end = start + limit
records = []
for i in range(start, end):
rec = read(str(i))
if rec:
records.append(rec)
if end >= __get_objs_cnt__():
next_key = -1
else:
next_key = len(records)
return records, next_key
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
# if there is no more record, return -1 as next
def list_by_user(user_id, limit=10, cursor=None):
if cursor:
start = cursor
else:
start = 0
query = client.query(namespace, set_name)
query.where(p.equals('createdById', user_id))
records = []
results = query.results()
if cursor:
start = cursor
else:
start = 0
cnt = 0
records = []
for i, result in enumerate(results):
if cnt >= limit:
break
if i < start:
continue
else:
rec = result[2]
records.append(rec)
cnt += 1
if cnt == limit:
next_key = cnt
else:
next_key = -1
return records, next_key
def __get_objs_cnt__():
info = client.info("sets" + "/" + namespace + "/" + set_name)
for value in info.values():
info_str = value[1]
try:
start_idx = info_str.index("=") + 1
end_idx = info_str.index(":")
n_str = info_str[start_idx:end_idx]
return math.ceil(int(n_str) / n_replicas)
except ValueError:
return 0
def create(data, id=None):
if id:
key = str(id)
else:
key = str(__get_objs_cnt__())
data['id'] = key
client.put((namespace, set_name, key), data)
return read(key)
def read(id):
try:
(key, metadata) = client.exists((namespace, set_name, id))
(key, metadata, record) = client.get((namespace, set_name, id))
return record
except ex.RecordNotFound:
print("Record not found:", id)
return None
except ex.AerospikeError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
return None
def update(data, id):
if client.exists((namespace, set_name, id)):
delete(id)
return create(data, id)
def delete(id):
client.remove((namespace, set_name, id))
| import math
import aerospike
from aerospike import predicates as p
from aerospike import exception as ex
from flask import current_app
aerospike_host = current_app.config['AEROSPIKE_HOST']
aerospike_port = current_app.config['AEROSPIKE_PORT']
namespace = current_app.config['AEROSPIKE_NAMESPACE']
set_name = current_app.config['AEROSPIKE_SET_NAME']
n_replicas = 1
config = {
'hosts': [
(aerospike_host, aerospike_port)
],
'policies': {
'timeout': 1000 # milliseconds
}
}
client = aerospike.client(config).connect()
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
def init_app(app):
pass
# if there is no more record, return -1 as next
def list(limit=10, cursor=None):
if cursor:
start = int(cursor)
else:
start = 0
end = start + limit
records = []
for i in range(start, end):
rec = read(str(i))
if rec:
records.append(rec)
if end >= __get_objs_cnt__():
next_key = -1
else:
next_key = len(records)
return records, next_key
# cannot limit the number of rows, only percent
# there is no start offset option
# https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2
# https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532
# https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query
# if there is no more record, return -1 as next
def list_by_user(user_id, limit=10, cursor=None):
if cursor:
start = cursor
else:
start = 0
query = client.query(namespace, set_name)
query.where(p.equals('createdById', user_id))
records = []
results = query.results()
if cursor:
start = cursor
else:
start = 0
cnt = 0
records = []
for i, result in enumerate(results):
if cnt >= limit:
break
if i < start:
continue
else:
rec = result[2]
records.append(rec)
cnt += 1
if cnt == limit:
next_key = cnt
else:
next_key = -1
return records, next_key
def __get_objs_cnt__():
info = client.info("sets" + "/" + namespace + "/" + set_name)
for value in info.values():
info_str = value[1]
try:
start_idx = info_str.index("=") + 1
end_idx = info_str.index(":")
n_str = info_str[start_idx:end_idx]
return math.ceil(int(n_str) / n_replicas)
except ValueError:
return 0
def create(data, id=None):
if id:
key = str(id)
else:
key = str(__get_objs_cnt__())
data['id'] = key
client.put((namespace, set_name, key), data)
return read(key)
def read(id):
try:
(key, metadata) = client.exists((namespace, set_name, id))
(key, metadata, record) = client.get((namespace, set_name, id))
return record
except ex.RecordNotFound:
print("Record not found:", id)
return None
except ex.AerospikeError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
return None
def update(data, id):
if client.exists((namespace, set_name, id)):
delete(id)
return create(data, id)
def delete(id):
client.remove((namespace, set_name, id))
| pt | 0.129269 | 2.412918 | 2 |
examples/EC2.py | nimRobotics/fnirslib | 0 | 14919 | """
author: @nimrobotics
description: calculates the effective connectivity between regions and plots them
"""
import numpy as np
import scipy.io
import glob
import sys
sys.path.append('../utils')
from plots import plotData
dir = "./process3/" #directory of the data
outdir = 'process3/' #directory to save the plots
regions = 3 #number of regions
files = glob.glob(dir+'/*_.mat') # get all the files in the directory
for file in files:
print('Processing condition: ', file)
data = scipy.io.loadmat(file) #load data from the directory
fval = data['fval'] #fval
pval = data['pval'] #pval
sig = data['sig'] #sig
cd = data['cd'] #cd
print('fval shape: ',fval.shape)
print('\nfval \n',fval)
print('pval shape: ',pval.shape)
print('sig shape: ',sig.shape)
print('\nsig \n',sig)
print(cd.shape)
# elementwise multiplication of fval and sig(0/1)
fval_sig = np.multiply(fval, sig)
print(fval_sig.shape)
print('\nfval_sig \n',fval_sig)
# fval_sig = np.mean(fval_sig, axis=2) # average over files
# print(fval_sig.shape)
# fval = np.mean(fval, axis=2)
labels = ['PFC', 'PM-MC', 'VC'] #labels for the regions
condition = file.split('/')[-1].split('.')[0] #get the condition name
plot = plotData(fval_sig, labels, outdir, colormap='viridis', dpi=300, title='EC: '+condition, filename='EC_'+condition +'.png')
plot.matrixPlot()
plot.circularPlot()
| """
author: @nimrobotics
description: calculates the effective connectivity between regions and plots them
"""
import numpy as np
import scipy.io
import glob
import sys
sys.path.append('../utils')
from plots import plotData
dir = "./process3/" #directory of the data
outdir = 'process3/' #directory to save the plots
regions = 3 #number of regions
files = glob.glob(dir+'/*_.mat') # get all the files in the directory
for file in files:
print('Processing condition: ', file)
data = scipy.io.loadmat(file) #load data from the directory
fval = data['fval'] #fval
pval = data['pval'] #pval
sig = data['sig'] #sig
cd = data['cd'] #cd
print('fval shape: ',fval.shape)
print('\nfval \n',fval)
print('pval shape: ',pval.shape)
print('sig shape: ',sig.shape)
print('\nsig \n',sig)
print(cd.shape)
# elementwise multiplication of fval and sig(0/1)
fval_sig = np.multiply(fval, sig)
print(fval_sig.shape)
print('\nfval_sig \n',fval_sig)
# fval_sig = np.mean(fval_sig, axis=2) # average over files
# print(fval_sig.shape)
# fval = np.mean(fval, axis=2)
labels = ['PFC', 'PM-MC', 'VC'] #labels for the regions
condition = file.split('/')[-1].split('.')[0] #get the condition name
plot = plotData(fval_sig, labels, outdir, colormap='viridis', dpi=300, title='EC: '+condition, filename='EC_'+condition +'.png')
plot.matrixPlot()
plot.circularPlot()
| pt | 0.142855 | 2.544826 | 3 |
feed/models.py | Lisgevan/DJANGO-101-PROJECT-COPY | 0 | 14920 | from django.db import models
from sorl.thumbnail import ImageField
# Create your models here.
class Post(models.Model):
text = models.CharField(max_length=140, blank=False, null=False)
image = ImageField()
def __str__(self):
return self.text | from django.db import models
from sorl.thumbnail import ImageField
# Create your models here.
class Post(models.Model):
text = models.CharField(max_length=140, blank=False, null=False)
image = ImageField()
def __str__(self):
return self.text | it | 0.231347 | 2.320508 | 2 |
gcp/docker/infrastructure/rapids_lib.py | ethem-kinginthenorth/cloud-ml-examples | 1 | 14921 | <filename>gcp/docker/infrastructure/rapids_lib.py
# os
import sys, os, time, logging
# CPU DS stack
import pandas as pd
import numpy as np
import sklearn
# GPU DS stack [ rapids ]
import gcsfs
# scaling library
import dask
# data ingestion [ CPU ]
from pyarrow import orc as pyarrow_orc
# ML models
from sklearn import ensemble
import xgboost
# data set splits
from sklearn.model_selection import train_test_split as sklearn_train_test_split
# device query
##hack
try:
import cudf, cuml
from cuml.preprocessing.model_selection import train_test_split as cuml_train_test_split
import pynvml
import cupy
except:
print("Caught import failures -- probably missing GPU")
# memory query
import psutil
# i/o
import logging, json, pprint
default_sagemaker_paths = {
'base': '/opt/ml',
'code': '/opt/ml/code',
'data': '/opt/ml/input',
'train_data': '/opt/ml/input/data/training',
'hyperparams': '/opt/ml/input/config/hyperparameters.json',
'model': '/opt/ml/model',
'output': '/opt/ml/output',
}
class RapidsCloudML(object):
def __init__(self, cloud_type='AWS',
model_type='XGBoost',
data_type='ORC',
compute_type='single-GPU',
n_workers=-1,
verbose_estimator=False,
CSP_paths=default_sagemaker_paths):
self.CSP_paths = CSP_paths
self.cloud_type = cloud_type
self.model_type = model_type
self.data_type = data_type
self.compute_type = compute_type
self.verbose_estimator = verbose_estimator
self.n_workers = self.parse_compute(n_workers)
self.query_memory()
def _read_orc(self, filename):
if ('CPU' in self.compute_type):
if (filename.startswith('gs://')):
fs = gcsfs.GCSFileSystem()
with fs.open(filename, mode='rb') as file:
dataset = pyarrow_orc.ORCFile(file).read().to_pandas()
else:
with open(filename, mode='rb') as file:
dataset = pyarrow_orc.ORCFile(file).read().to_pandas()
elif ('GPU' in self.compute_type):
dataset = cudf.read_orc(filename)
return dataset
def _read_csv(self, filename, col_labels):
if ('CPU' in self.compute_type):
dataset = pd.read_csv(filename, names=col_labels)
elif ('GPU' in self.compute_type):
dataset = cudf.read_csv(filename, names=col_labels)
return dataset
def load_data(self, filename='dataset.orc', col_labels=None, y_label='ArrDelayBinary'):
target_filename = self.CSP_paths['train_data'] + '/' + filename
self.log_to_file(f'\n> loading dataset from {target_filename}...\n')
with PerfTimer() as ingestion_timer:
if 'ORC' in self.data_type:
dataset = self._read_orc(target_filename)
elif 'CSV' in self.data_type:
dataset = self._read_csv(target_filename, names=col_labels)
self.log_to_file(f'ingestion completed in {ingestion_timer.duration}')
self.log_to_file(f'dataset descriptors: {dataset.shape}\n {dataset.dtypes}\n {dataset.columns}\n')
return dataset, col_labels, y_label, ingestion_timer.duration
def split_data(self, dataset, y_label, train_size=.8, random_state=0, shuffle=True):
"""
split dataset into train and test subset
NOTE: assumes the first column of the dataset is the classification labels
! in the case of sklearn, we manually filter this column in the split call
! in the case of cuml, the filtering happens internally
"""
self.log_to_file('\tsplitting train and test data')
start_time = time.perf_counter()
with PerfTimer() as split_timer:
if 'CPU' in self.compute_type:
X_train, X_test, y_train, y_test = sklearn_train_test_split(dataset.loc[:, dataset.columns != y_label],
dataset[y_label], train_size=train_size,
shuffle=shuffle, random_state=random_state)
elif 'GPU' in self.compute_type:
X_train, X_test, y_train, y_test = cuml_train_test_split(X=dataset, y=y_label, train_size=train_size,
shuffle=shuffle, random_state=random_state)
self.log_to_file(f'\t> split completed in {split_timer.duration}')
return X_train, X_test, y_train, y_test, split_timer.duration
def train_model(self, X_train, y_train, model_params):
self.log_to_file(f'\ttraining {self.model_type} estimator w/ hyper-params')
pprint.pprint(model_params, indent=10)
print(f"model type: {self.model_type}\n compute type: {self.compute_type}\n dataset dtype: {type(X_train)}")
try:
if self.model_type == 'XGBoost':
trained_model, training_time = self.fit_xgboost(X_train, y_train, model_params)
elif self.model_type == 'RandomForest':
trained_model, training_time = self.fit_random_forest(X_train, y_train, model_params)
except Exception as error:
self.log_to_file('!error during model training: ' + str(error))
raise
self.log_to_file(f'\t> finished training in {training_time:.4f} s')
return trained_model, training_time
# train dlmc.xgboost model
def fit_xgboost(self, X_train, y_train, model_params):
with PerfTimer() as train_timer:
train_DMatrix = xgboost.DMatrix(data=X_train, label=y_train)
trained_model = xgboost.train(dtrain=train_DMatrix,
params=model_params,
num_boost_round=model_params['num_boost_round'],
verbose_eval=self.verbose_estimator)
return trained_model, train_timer.duration
# fit_xgboost_multi_GPU ()
# fit_random_forest_multi_GPU ()
# train cuml.random-forest model
def fit_random_forest(self, X_train, y_train, model_params):
if 'CPU' in self.compute_type:
rf_model = sklearn.ensemble.RandomForestClassifier(n_estimators=model_params['n_estimators'],
max_depth=model_params['max_depth'],
max_features=model_params['max_features'],
n_jobs=int(self.n_workers),
verbose=self.verbose_estimator)
elif 'GPU' in self.compute_type:
rf_model = cuml.ensemble.RandomForestClassifier(n_estimators=model_params['n_estimators'],
max_depth=model_params['max_depth'],
n_bins=model_params['n_bins'],
max_features=model_params['max_features'],
verbose=self.verbose_estimator)
with PerfTimer() as train_timer:
trained_model = rf_model.fit(X_train, y_train)
return trained_model, train_timer.duration
def evaluate_test_perf(self, trained_model, X_test, y_test):
self.log_to_file(f'\tinferencing on test set')
with PerfTimer() as inference_timer:
try:
if self.model_type == 'XGBoost':
test_DMatrix = xgboost.DMatrix(data=X_test, label=y_test)
test_accuracy = 1 - float(trained_model.eval(test_DMatrix).split(':')[1])
elif self.model_type == 'RandomForest':
# y_test = cudf.DataFrame({'label': y_test.astype('int32') })
test_accuracy = trained_model.score(X_test, y_test.astype('int32'))
except Exception as error:
self.log_to_file('!error during inference: ' + str(error))
raise
self.log_to_file(f'\t> finished inference in {inference_timer.duration:.4f} s')
return test_accuracy, inference_timer.duration
# TODO: FIL inference [ ? ]
# evaluate_perf_FIL(self, trained_model, X_test, y_test ):
# TODO: global_best_model.save()
def save_best_model(self, global_best_model=None):
pass
# ------------------------------------------------------
# end of data science logic
# ------------------------------------------------------
def parse_compute(self, n_workers=None):
if 'CPU' in self.compute_type or 'GPU' in self.compute_type:
available_devices = self.query_compute()
if n_workers == -1:
n_workers = available_devices
assert (n_workers <= available_devices)
self.log_to_file(f'compute type: {self.compute_type}, n_workers: {n_workers}')
else:
raise Exception('unsupported compute type')
return n_workers
def query_compute(self):
available_devices = None
if 'CPU' in self.compute_type:
available_devices = os.cpu_count()
self.log_to_file(f'detected {available_devices} CPUs')
elif 'GPU' in self.compute_type:
available_devices = cupy.cuda.runtime.getDeviceCount()
self.log_to_file(f'detected {available_devices} GPUs')
return available_devices
# TODO: enumerate all visible GPUs [ ? ]
def query_memory(self):
def print_device_memory(memory, device_ID=-1):
memory_free_GB = np.array(memory.free) / np.array(10e8)
memory_used_GB = np.array(memory.used) / np.array(10e8)
memory_total_GB = np.array(memory.total) / np.array(10e8)
if device_ID != -1:
self.log_to_file(f'device ID = {device_ID}')
self.log_to_file(f'memory free, used, total: {memory_free_GB}, {memory_used_GB}, {memory_total_GB}')
if 'CPU' in self.compute_type:
print_device_memory(psutil.virtual_memory())
elif 'GPU' in self.compute_type:
pynvml.nvmlInit()
for iGPU in range(self.n_workers):
handle = pynvml.nvmlDeviceGetHandleByIndex(iGPU)
print_device_memory(pynvml.nvmlDeviceGetMemoryInfo(handle))
def set_up_logging(self):
logging_path = self.CSP_paths['output'] + '/log.txt'
logging.basicConfig(filename=logging_path,
level=logging.INFO)
def log_to_file(self, text):
logging.info(text)
print(text)
def environment_check(self):
self.check_dirs()
if self.cloud_type == 'AWS':
try:
self.list_files('/opt/ml')
self.log_to_file(os.environ['SM_NUM_GPUS'])
self.log_to_file(os.environ['SM_TRAINING_ENV'])
self.log_to_file(os.environ['SM_CHANNEL_TRAIN'])
self.log_to_file(os.environ['SM_HPS'])
except:
pass
else:
pass
def check_dirs(self):
self.log_to_file('\n> checking for sagemaker paths...\n')
directories_to_check = self.CSP_paths
for iDir, val in directories_to_check.items():
self.log_to_file(f'{val}, exists : {os.path.exists(val)}')
self.log_to_file(f'working directory = {os.getcwd()}')
def list_files(self, startpath):
print(f'\n> listing contents of {startpath}\n')
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
# perf_counter = highest available timer resolution
class PerfTimer:
def __init__(self):
self.start = None
self.duration = None
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.duration = time.perf_counter() - self.start
'''
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier.fit
n_estimators=100,
criterion='gini',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None
'''
| <filename>gcp/docker/infrastructure/rapids_lib.py
# os
import sys, os, time, logging
# CPU DS stack
import pandas as pd
import numpy as np
import sklearn
# GPU DS stack [ rapids ]
import gcsfs
# scaling library
import dask
# data ingestion [ CPU ]
from pyarrow import orc as pyarrow_orc
# ML models
from sklearn import ensemble
import xgboost
# data set splits
from sklearn.model_selection import train_test_split as sklearn_train_test_split
# device query
##hack
try:
import cudf, cuml
from cuml.preprocessing.model_selection import train_test_split as cuml_train_test_split
import pynvml
import cupy
except:
print("Caught import failures -- probably missing GPU")
# memory query
import psutil
# i/o
import logging, json, pprint
default_sagemaker_paths = {
'base': '/opt/ml',
'code': '/opt/ml/code',
'data': '/opt/ml/input',
'train_data': '/opt/ml/input/data/training',
'hyperparams': '/opt/ml/input/config/hyperparameters.json',
'model': '/opt/ml/model',
'output': '/opt/ml/output',
}
class RapidsCloudML(object):
def __init__(self, cloud_type='AWS',
model_type='XGBoost',
data_type='ORC',
compute_type='single-GPU',
n_workers=-1,
verbose_estimator=False,
CSP_paths=default_sagemaker_paths):
self.CSP_paths = CSP_paths
self.cloud_type = cloud_type
self.model_type = model_type
self.data_type = data_type
self.compute_type = compute_type
self.verbose_estimator = verbose_estimator
self.n_workers = self.parse_compute(n_workers)
self.query_memory()
def _read_orc(self, filename):
if ('CPU' in self.compute_type):
if (filename.startswith('gs://')):
fs = gcsfs.GCSFileSystem()
with fs.open(filename, mode='rb') as file:
dataset = pyarrow_orc.ORCFile(file).read().to_pandas()
else:
with open(filename, mode='rb') as file:
dataset = pyarrow_orc.ORCFile(file).read().to_pandas()
elif ('GPU' in self.compute_type):
dataset = cudf.read_orc(filename)
return dataset
def _read_csv(self, filename, col_labels):
if ('CPU' in self.compute_type):
dataset = pd.read_csv(filename, names=col_labels)
elif ('GPU' in self.compute_type):
dataset = cudf.read_csv(filename, names=col_labels)
return dataset
def load_data(self, filename='dataset.orc', col_labels=None, y_label='ArrDelayBinary'):
target_filename = self.CSP_paths['train_data'] + '/' + filename
self.log_to_file(f'\n> loading dataset from {target_filename}...\n')
with PerfTimer() as ingestion_timer:
if 'ORC' in self.data_type:
dataset = self._read_orc(target_filename)
elif 'CSV' in self.data_type:
dataset = self._read_csv(target_filename, names=col_labels)
self.log_to_file(f'ingestion completed in {ingestion_timer.duration}')
self.log_to_file(f'dataset descriptors: {dataset.shape}\n {dataset.dtypes}\n {dataset.columns}\n')
return dataset, col_labels, y_label, ingestion_timer.duration
def split_data(self, dataset, y_label, train_size=.8, random_state=0, shuffle=True):
"""
split dataset into train and test subset
NOTE: assumes the first column of the dataset is the classification labels
! in the case of sklearn, we manually filter this column in the split call
! in the case of cuml, the filtering happens internally
"""
self.log_to_file('\tsplitting train and test data')
start_time = time.perf_counter()
with PerfTimer() as split_timer:
if 'CPU' in self.compute_type:
X_train, X_test, y_train, y_test = sklearn_train_test_split(dataset.loc[:, dataset.columns != y_label],
dataset[y_label], train_size=train_size,
shuffle=shuffle, random_state=random_state)
elif 'GPU' in self.compute_type:
X_train, X_test, y_train, y_test = cuml_train_test_split(X=dataset, y=y_label, train_size=train_size,
shuffle=shuffle, random_state=random_state)
self.log_to_file(f'\t> split completed in {split_timer.duration}')
return X_train, X_test, y_train, y_test, split_timer.duration
def train_model(self, X_train, y_train, model_params):
self.log_to_file(f'\ttraining {self.model_type} estimator w/ hyper-params')
pprint.pprint(model_params, indent=10)
print(f"model type: {self.model_type}\n compute type: {self.compute_type}\n dataset dtype: {type(X_train)}")
try:
if self.model_type == 'XGBoost':
trained_model, training_time = self.fit_xgboost(X_train, y_train, model_params)
elif self.model_type == 'RandomForest':
trained_model, training_time = self.fit_random_forest(X_train, y_train, model_params)
except Exception as error:
self.log_to_file('!error during model training: ' + str(error))
raise
self.log_to_file(f'\t> finished training in {training_time:.4f} s')
return trained_model, training_time
# train dlmc.xgboost model
def fit_xgboost(self, X_train, y_train, model_params):
with PerfTimer() as train_timer:
train_DMatrix = xgboost.DMatrix(data=X_train, label=y_train)
trained_model = xgboost.train(dtrain=train_DMatrix,
params=model_params,
num_boost_round=model_params['num_boost_round'],
verbose_eval=self.verbose_estimator)
return trained_model, train_timer.duration
# fit_xgboost_multi_GPU ()
# fit_random_forest_multi_GPU ()
# train cuml.random-forest model
def fit_random_forest(self, X_train, y_train, model_params):
if 'CPU' in self.compute_type:
rf_model = sklearn.ensemble.RandomForestClassifier(n_estimators=model_params['n_estimators'],
max_depth=model_params['max_depth'],
max_features=model_params['max_features'],
n_jobs=int(self.n_workers),
verbose=self.verbose_estimator)
elif 'GPU' in self.compute_type:
rf_model = cuml.ensemble.RandomForestClassifier(n_estimators=model_params['n_estimators'],
max_depth=model_params['max_depth'],
n_bins=model_params['n_bins'],
max_features=model_params['max_features'],
verbose=self.verbose_estimator)
with PerfTimer() as train_timer:
trained_model = rf_model.fit(X_train, y_train)
return trained_model, train_timer.duration
def evaluate_test_perf(self, trained_model, X_test, y_test):
self.log_to_file(f'\tinferencing on test set')
with PerfTimer() as inference_timer:
try:
if self.model_type == 'XGBoost':
test_DMatrix = xgboost.DMatrix(data=X_test, label=y_test)
test_accuracy = 1 - float(trained_model.eval(test_DMatrix).split(':')[1])
elif self.model_type == 'RandomForest':
# y_test = cudf.DataFrame({'label': y_test.astype('int32') })
test_accuracy = trained_model.score(X_test, y_test.astype('int32'))
except Exception as error:
self.log_to_file('!error during inference: ' + str(error))
raise
self.log_to_file(f'\t> finished inference in {inference_timer.duration:.4f} s')
return test_accuracy, inference_timer.duration
# TODO: FIL inference [ ? ]
# evaluate_perf_FIL(self, trained_model, X_test, y_test ):
# TODO: global_best_model.save()
def save_best_model(self, global_best_model=None):
pass
# ------------------------------------------------------
# end of data science logic
# ------------------------------------------------------
def parse_compute(self, n_workers=None):
if 'CPU' in self.compute_type or 'GPU' in self.compute_type:
available_devices = self.query_compute()
if n_workers == -1:
n_workers = available_devices
assert (n_workers <= available_devices)
self.log_to_file(f'compute type: {self.compute_type}, n_workers: {n_workers}')
else:
raise Exception('unsupported compute type')
return n_workers
def query_compute(self):
available_devices = None
if 'CPU' in self.compute_type:
available_devices = os.cpu_count()
self.log_to_file(f'detected {available_devices} CPUs')
elif 'GPU' in self.compute_type:
available_devices = cupy.cuda.runtime.getDeviceCount()
self.log_to_file(f'detected {available_devices} GPUs')
return available_devices
# TODO: enumerate all visible GPUs [ ? ]
def query_memory(self):
def print_device_memory(memory, device_ID=-1):
memory_free_GB = np.array(memory.free) / np.array(10e8)
memory_used_GB = np.array(memory.used) / np.array(10e8)
memory_total_GB = np.array(memory.total) / np.array(10e8)
if device_ID != -1:
self.log_to_file(f'device ID = {device_ID}')
self.log_to_file(f'memory free, used, total: {memory_free_GB}, {memory_used_GB}, {memory_total_GB}')
if 'CPU' in self.compute_type:
print_device_memory(psutil.virtual_memory())
elif 'GPU' in self.compute_type:
pynvml.nvmlInit()
for iGPU in range(self.n_workers):
handle = pynvml.nvmlDeviceGetHandleByIndex(iGPU)
print_device_memory(pynvml.nvmlDeviceGetMemoryInfo(handle))
def set_up_logging(self):
logging_path = self.CSP_paths['output'] + '/log.txt'
logging.basicConfig(filename=logging_path,
level=logging.INFO)
def log_to_file(self, text):
logging.info(text)
print(text)
def environment_check(self):
self.check_dirs()
if self.cloud_type == 'AWS':
try:
self.list_files('/opt/ml')
self.log_to_file(os.environ['SM_NUM_GPUS'])
self.log_to_file(os.environ['SM_TRAINING_ENV'])
self.log_to_file(os.environ['SM_CHANNEL_TRAIN'])
self.log_to_file(os.environ['SM_HPS'])
except:
pass
else:
pass
def check_dirs(self):
self.log_to_file('\n> checking for sagemaker paths...\n')
directories_to_check = self.CSP_paths
for iDir, val in directories_to_check.items():
self.log_to_file(f'{val}, exists : {os.path.exists(val)}')
self.log_to_file(f'working directory = {os.getcwd()}')
def list_files(self, startpath):
print(f'\n> listing contents of {startpath}\n')
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
# perf_counter = highest available timer resolution
class PerfTimer:
def __init__(self):
self.start = None
self.duration = None
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.duration = time.perf_counter() - self.start
'''
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier.fit
n_estimators=100,
criterion='gini',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None
'''
| pt | 0.19506 | 2.015011 | 2 |
docs/making_widgets_from_scratch/line_clock.py | Rahuum/glooey | 86 | 14922 | #!/usr/bin/env python3
import pyglet
import glooey
import autoprop
import datetime
from pyglet.gl import *
from vecrec import Vector, Rect
@autoprop
class LineClock(glooey.Widget):
custom_radius = 50
custom_color = 'green'
custom_hour_hand_width = 3
custom_minute_hand_width = 2
custom_second_hand_width = 1
custom_face_border_width = 3
def __init__(self):
super().__init__()
# User-controlled attributes:
self._radius = self.custom_radius
self._color = self.custom_color
# Internal attributes:
self._face = None
self._hands = {
'hour': glooey.drawing.Rectangle(),
'min': glooey.drawing.Rectangle(),
'sec': glooey.drawing.Rectangle(),
}
def get_radius(self):
return self._radius
def set_radius(self, radius):
self._radius = radius
self._repack()
def get_color(self):
return self._color
def set_color(self, color):
self._color = color
self._draw()
def on_update(self, dt):
self._draw()
def do_attach(self):
# Update the clock ten times a second.
pyglet.clock.schedule_interval(self.on_update, 1/10)
def do_detach(self):
pyglet.clock.unschedule(self.on_update)
def do_claim(self):
width = height = 2 * self.radius
return width, height
def do_regroup(self):
if self._face is not None:
self.batch.migrate(
self._face, GL_TRIANGLE_STRIP, self.group, self.batch)
for k in self._hands:
self._hands[k].batch = self.batch
self._hands[k].group = HandGroup(self)
def do_draw(self):
self.do_draw_face()
self.do_draw_hands()
def do_draw_face(self):
N = 48
vertices = []
for i in range(N + 2):
direction = Vector.from_degrees(360 * i / N)
radius = self._radius - (i % 2 * self.custom_face_border_width)
vertex = self.rect.center + radius * direction
vertices += vertex.tuple
# Insert duplicate vertices at the beginning and end of the list,
# otherwise this triangle strip will end up connected to any other
# triangle strips in the scene.
vertices = vertices[:2] + vertices + vertices[-2:]
num_vertices = len(vertices) // 2
color = glooey.drawing.Color.from_anything(self._color)
colors = num_vertices * color.rgb
# The vertex list for the face may or may not exist yet, e.g. if the
# clock is being drawn for the first time or was previously being
# hidden. So create the vertex list if we need to, otherwise just
# update its coordinates.
if self._face is None:
self._face = self.batch.add(
num_vertices,
GL_TRIANGLE_STRIP,
self.group,
('v2f', vertices),
('c3B', colors),
)
else:
self._face.vertices = vertices
self._face.colors = colors
def do_draw_hands(self):
# We're hard-coding the radii of the hands here. Probably it would be
# better to make separate attributes for these, but I think that would
# start to detract from the clarity of the example.
rects = {
'hour': Rect.from_size(self.custom_hour_hand_width, self.radius/2),
'min': Rect.from_size(self.custom_minute_hand_width, self.radius),
'sec': Rect.from_size(self.custom_second_hand_width, self.radius),
}
# The clock hands all start pointing towards 12:00, and the rotations
# are clockwise, so 90° is 3:00, 180° is 6:00, 270° is 9:00, etc.
now = datetime.datetime.now()
angles = {
'hour': 360 * now.hour / 12,
'min': 360 * now.minute / 60,
'sec': 360 * now.second / 60,
}
for k in self._hands:
rects[k].bottom = 0
rects[k].center_x = 0
self._hands[k].rect = rects[k]
self._hands[k].group.angle = angles[k]
self._hands[k].color = self._color
self._hands[k].show()
def do_undraw(self):
if self._face is not None:
self._face.delete()
self._face = None
for k in self._hands:
self._hands[k].hide()
class HandGroup(pyglet.graphics.Group):
def __init__(self, clock):
super().__init__(parent=clock.group)
self.clock = clock
self.angle = 0
def set_state(self):
x, y = self.clock.rect.center
clockwise = -1
glPushMatrix()
glLoadIdentity()
glTranslatef(x, y, 0)
glRotatef(self.angle, 0, 0, clockwise)
def unset_state(self):
glPopMatrix()
window = pyglet.window.Window()
gui = glooey.Gui(window)
gui.add(LineClock())
pyglet.app.run()
| #!/usr/bin/env python3
import pyglet
import glooey
import autoprop
import datetime
from pyglet.gl import *
from vecrec import Vector, Rect
@autoprop
class LineClock(glooey.Widget):
custom_radius = 50
custom_color = 'green'
custom_hour_hand_width = 3
custom_minute_hand_width = 2
custom_second_hand_width = 1
custom_face_border_width = 3
def __init__(self):
super().__init__()
# User-controlled attributes:
self._radius = self.custom_radius
self._color = self.custom_color
# Internal attributes:
self._face = None
self._hands = {
'hour': glooey.drawing.Rectangle(),
'min': glooey.drawing.Rectangle(),
'sec': glooey.drawing.Rectangle(),
}
def get_radius(self):
return self._radius
def set_radius(self, radius):
self._radius = radius
self._repack()
def get_color(self):
return self._color
def set_color(self, color):
self._color = color
self._draw()
def on_update(self, dt):
self._draw()
def do_attach(self):
# Update the clock ten times a second.
pyglet.clock.schedule_interval(self.on_update, 1/10)
def do_detach(self):
pyglet.clock.unschedule(self.on_update)
def do_claim(self):
width = height = 2 * self.radius
return width, height
def do_regroup(self):
if self._face is not None:
self.batch.migrate(
self._face, GL_TRIANGLE_STRIP, self.group, self.batch)
for k in self._hands:
self._hands[k].batch = self.batch
self._hands[k].group = HandGroup(self)
def do_draw(self):
self.do_draw_face()
self.do_draw_hands()
def do_draw_face(self):
N = 48
vertices = []
for i in range(N + 2):
direction = Vector.from_degrees(360 * i / N)
radius = self._radius - (i % 2 * self.custom_face_border_width)
vertex = self.rect.center + radius * direction
vertices += vertex.tuple
# Insert duplicate vertices at the beginning and end of the list,
# otherwise this triangle strip will end up connected to any other
# triangle strips in the scene.
vertices = vertices[:2] + vertices + vertices[-2:]
num_vertices = len(vertices) // 2
color = glooey.drawing.Color.from_anything(self._color)
colors = num_vertices * color.rgb
# The vertex list for the face may or may not exist yet, e.g. if the
# clock is being drawn for the first time or was previously being
# hidden. So create the vertex list if we need to, otherwise just
# update its coordinates.
if self._face is None:
self._face = self.batch.add(
num_vertices,
GL_TRIANGLE_STRIP,
self.group,
('v2f', vertices),
('c3B', colors),
)
else:
self._face.vertices = vertices
self._face.colors = colors
def do_draw_hands(self):
# We're hard-coding the radii of the hands here. Probably it would be
# better to make separate attributes for these, but I think that would
# start to detract from the clarity of the example.
rects = {
'hour': Rect.from_size(self.custom_hour_hand_width, self.radius/2),
'min': Rect.from_size(self.custom_minute_hand_width, self.radius),
'sec': Rect.from_size(self.custom_second_hand_width, self.radius),
}
# The clock hands all start pointing towards 12:00, and the rotations
# are clockwise, so 90° is 3:00, 180° is 6:00, 270° is 9:00, etc.
now = datetime.datetime.now()
angles = {
'hour': 360 * now.hour / 12,
'min': 360 * now.minute / 60,
'sec': 360 * now.second / 60,
}
for k in self._hands:
rects[k].bottom = 0
rects[k].center_x = 0
self._hands[k].rect = rects[k]
self._hands[k].group.angle = angles[k]
self._hands[k].color = self._color
self._hands[k].show()
def do_undraw(self):
if self._face is not None:
self._face.delete()
self._face = None
for k in self._hands:
self._hands[k].hide()
class HandGroup(pyglet.graphics.Group):
def __init__(self, clock):
super().__init__(parent=clock.group)
self.clock = clock
self.angle = 0
def set_state(self):
x, y = self.clock.rect.center
clockwise = -1
glPushMatrix()
glLoadIdentity()
glTranslatef(x, y, 0)
glRotatef(self.angle, 0, 0, clockwise)
def unset_state(self):
glPopMatrix()
window = pyglet.window.Window()
gui = glooey.Gui(window)
gui.add(LineClock())
pyglet.app.run()
| it | 0.125168 | 2.425898 | 2 |
forms_builder/forms/migrations/0004_auto_20180727_1256.py | maqmigh/django-forms-builder | 0 | 14923 | # coding=utf-8
# Generated by Django 2.0.7 on 2018-07-27 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0003_auto_20180522_0820'),
]
operations = [
migrations.AlterField(
model_name='field',
name='help_text',
field=models.CharField(blank=True, max_length=2000, verbose_name='Help text'),
),
migrations.AlterField(
model_name='form',
name='slug',
field=models.SlugField(max_length=100, unique=True, verbose_name='Slug'),
),
]
| # coding=utf-8
# Generated by Django 2.0.7 on 2018-07-27 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0003_auto_20180522_0820'),
]
operations = [
migrations.AlterField(
model_name='field',
name='help_text',
field=models.CharField(blank=True, max_length=2000, verbose_name='Help text'),
),
migrations.AlterField(
model_name='form',
name='slug',
field=models.SlugField(max_length=100, unique=True, verbose_name='Slug'),
),
]
| it | 0.131873 | 1.602473 | 2 |
scripts/sighan/generate.py | piglaker/SpecialEdition | 2 | 14924 | import os
import re
import sys
import json
#upper import
sys.path.append("../../")
from utils import levenshtein
from utils.io import load_json, write_to
def strQ2B(ustring):
"""全角转半角"""
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 12288: #全角空格直接转换
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): #全角字符(除空格)根据关系转化
inside_code -= 65248
rstring += chr(inside_code)
return rstring
def get_sighan_from_json():
all_data = {
"train":None,
"dev":None,
"test":None,
"test14":None,
"test15":None,
}
data_dir = "../../data/rawdata/sighan/csc/"
train_file1 = os.path.join(data_dir, "train_dev.json")
train_file2 = os.path.join(data_dir, "train131415.json")
test14_file = os.path.join(data_dir, "test14.json")
test15_file = os.path.join(data_dir, "test15.json")
#test15_file = "../../data/rawdata/sighan/enchanted/test15.enc.json"
all_data["train"] = load_json(train_file1)
all_data["train"].extend(load_json(train_file2))
all_data["train"] = all_data["train"]
all_data["valid14"] = load_json(test14_file)
all_data["valid"] = load_json(test15_file)
#all_data["test"].extend(load_json(test15_file))
return all_data
def preprocess(sentence):
s = strQ2B(sentence)
back_num = re.findall('\d+', s)
back_eng = re.findall(r'[a-zA-Z]+', s)
#s = re.sub(r'[a-zA-Z]+', 'e', s)
#s = re.sub('\d+', 'n', s)
return s
def json2list(data, need_preprocess):
source, target = [], []
for i, element in enumerate(data):
if need_preprocess:
source.append(preprocess(element["original_text"]))
target.append(preprocess(element["correct_text"]))
assert len(preprocess(element["original_text"])) == len(preprocess(element["correct_text"])), preprocess(element["original_text"])+preprocess(element["correct_text"])
else:
print("ERROR: ABORT !")
exit(0)
source.append(strQ2B((element["original_text"])))
target.append(strQ2B((element["correct_text"])))
return source, target
def generate(need_preprocess=True):
"""
split raw data(train.json) to preprocessed target
"""
#file = open("../../data/rawdata/ctc2021/train.json", 'r', encoding='utf-8')
data = get_sighan_from_json()
train_source, train_target = json2list(data["train"], need_preprocess)
valid14_source, valid14_target = json2list(data["valid14"], need_preprocess)
valid_source, valid_target = json2list(data["valid"], need_preprocess)
print(train_source[:3], train_target[:3])
print(len(train_source), len(train_target))
print(valid_source[:3], valid_target[:3])
print(len(valid_source), len(valid_target))
need_remove = {}
# cluster all need_remove
for i, sample in enumerate(valid_source):
for j, char in enumerate(sample):
tgt = valid_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
for i, sample in enumerate(valid14_source):
for j, char in enumerate(sample):
tgt = valid14_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
#remove
remove_count = 0
new_train_source, new_train_target = [], []
for i, sample in enumerate(train_source):
skip = False
for j, char in enumerate(sample):
tgt = train_target[i][j]
if char != tgt:
key = (char, tgt)
if key in need_remove:
skip = True
remove_count += 1
break
if not skip:
new_train_source.append(sample)
new_train_target.append(train_target[i])
print("Total Skip: ", remove_count)
train_source, train_target = new_train_source, new_train_target
#f_src = levenstein.tokenize(source, vocab_file_path="vocab.txt")
train_through = levenshtein.convert_from_sentpair_through(train_source, train_target, train_source)
valid14_through = levenshtein.convert_from_sentpair_through(valid14_source, valid14_target, valid14_source)
valid_through = levenshtein.convert_from_sentpair_through(valid_source, valid_target, valid_source)
#print(train_through[0], valid_through[0])
#output_name = "enchanted"
#output_name = "raw"
output_name = "holy"
write_to("../../data/rawdata/sighan/" + output_name + "/train.src", "\n".join(train_source))
write_to("../../data/rawdata/sighan/"+output_name+"/train.tgt", "\n".join(train_target))
#write_to("../../data/rawdata/sighan/std/train.through", "\n".join(train_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.src", "\n".join(valid14_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.tgt", "\n".join(valid14_target))
#write_to("../../data/rawdata/sighan/std/valid14.through", "\n".join(valid14_through))
write_to("../../data/rawdata/sighan/"+output_name+"/test.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/test.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/test.through", "\n".join(valid_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/valid.through", "\n".join(valid_through[:500]))
if __name__ == "__main__":
generate()
| import os
import re
import sys
import json
#upper import
sys.path.append("../../")
from utils import levenshtein
from utils.io import load_json, write_to
def strQ2B(ustring):
"""全角转半角"""
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 12288: #全角空格直接转换
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): #全角字符(除空格)根据关系转化
inside_code -= 65248
rstring += chr(inside_code)
return rstring
def get_sighan_from_json():
all_data = {
"train":None,
"dev":None,
"test":None,
"test14":None,
"test15":None,
}
data_dir = "../../data/rawdata/sighan/csc/"
train_file1 = os.path.join(data_dir, "train_dev.json")
train_file2 = os.path.join(data_dir, "train131415.json")
test14_file = os.path.join(data_dir, "test14.json")
test15_file = os.path.join(data_dir, "test15.json")
#test15_file = "../../data/rawdata/sighan/enchanted/test15.enc.json"
all_data["train"] = load_json(train_file1)
all_data["train"].extend(load_json(train_file2))
all_data["train"] = all_data["train"]
all_data["valid14"] = load_json(test14_file)
all_data["valid"] = load_json(test15_file)
#all_data["test"].extend(load_json(test15_file))
return all_data
def preprocess(sentence):
s = strQ2B(sentence)
back_num = re.findall('\d+', s)
back_eng = re.findall(r'[a-zA-Z]+', s)
#s = re.sub(r'[a-zA-Z]+', 'e', s)
#s = re.sub('\d+', 'n', s)
return s
def json2list(data, need_preprocess):
source, target = [], []
for i, element in enumerate(data):
if need_preprocess:
source.append(preprocess(element["original_text"]))
target.append(preprocess(element["correct_text"]))
assert len(preprocess(element["original_text"])) == len(preprocess(element["correct_text"])), preprocess(element["original_text"])+preprocess(element["correct_text"])
else:
print("ERROR: ABORT !")
exit(0)
source.append(strQ2B((element["original_text"])))
target.append(strQ2B((element["correct_text"])))
return source, target
def generate(need_preprocess=True):
"""
split raw data(train.json) to preprocessed target
"""
#file = open("../../data/rawdata/ctc2021/train.json", 'r', encoding='utf-8')
data = get_sighan_from_json()
train_source, train_target = json2list(data["train"], need_preprocess)
valid14_source, valid14_target = json2list(data["valid14"], need_preprocess)
valid_source, valid_target = json2list(data["valid"], need_preprocess)
print(train_source[:3], train_target[:3])
print(len(train_source), len(train_target))
print(valid_source[:3], valid_target[:3])
print(len(valid_source), len(valid_target))
need_remove = {}
# cluster all need_remove
for i, sample in enumerate(valid_source):
for j, char in enumerate(sample):
tgt = valid_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
for i, sample in enumerate(valid14_source):
for j, char in enumerate(sample):
tgt = valid14_target[i][j]
if char != tgt:
need_remove[ (char, tgt) ] = 0
#remove
remove_count = 0
new_train_source, new_train_target = [], []
for i, sample in enumerate(train_source):
skip = False
for j, char in enumerate(sample):
tgt = train_target[i][j]
if char != tgt:
key = (char, tgt)
if key in need_remove:
skip = True
remove_count += 1
break
if not skip:
new_train_source.append(sample)
new_train_target.append(train_target[i])
print("Total Skip: ", remove_count)
train_source, train_target = new_train_source, new_train_target
#f_src = levenstein.tokenize(source, vocab_file_path="vocab.txt")
train_through = levenshtein.convert_from_sentpair_through(train_source, train_target, train_source)
valid14_through = levenshtein.convert_from_sentpair_through(valid14_source, valid14_target, valid14_source)
valid_through = levenshtein.convert_from_sentpair_through(valid_source, valid_target, valid_source)
#print(train_through[0], valid_through[0])
#output_name = "enchanted"
#output_name = "raw"
output_name = "holy"
write_to("../../data/rawdata/sighan/" + output_name + "/train.src", "\n".join(train_source))
write_to("../../data/rawdata/sighan/"+output_name+"/train.tgt", "\n".join(train_target))
#write_to("../../data/rawdata/sighan/std/train.through", "\n".join(train_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.src", "\n".join(valid14_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid14.tgt", "\n".join(valid14_target))
#write_to("../../data/rawdata/sighan/std/valid14.through", "\n".join(valid14_through))
write_to("../../data/rawdata/sighan/"+output_name+"/test.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/test.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/test.through", "\n".join(valid_through))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.src", "\n".join(valid_source))
write_to("../../data/rawdata/sighan/"+output_name+"/valid.tgt", "\n".join(valid_target))
#write_to("../../data/rawdata/sighan/std/valid.through", "\n".join(valid_through[:500]))
if __name__ == "__main__":
generate()
| en | 0.107543 | 2.565735 | 3 |
keras_retinanet/backend/__init__.py | mj-haghighi/keras-retinanet | 0 | 14925 | <filename>keras_retinanet/backend/__init__.py
# from .backend import * # noqa: F401,F403
from .sbackend import * | <filename>keras_retinanet/backend/__init__.py
# from .backend import * # noqa: F401,F403
from .sbackend import * | en | 0.094181 | 1.139824 | 1 |
docs/_docs/bash/az3166_patch_binary.py | skolbin-ssi/azure-iot-developer-kit | 43 | 14926 | <filename>docs/_docs/bash/az3166_patch_binary.py
# ----------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license.
# ----------------------------------------------------------------------------
import os
import binascii
import struct
import shutil
import inspect
import sys
def binary_hook(binf, outf):
with open(binf,'rb') as f:
appbin = f.read()
with open('boot.bin', 'rb') as f:
bootbin = f.read()
with open(outf ,'wb') as f:
f.write(bootbin + ('\xFF' * (0xc000 - len(bootbin))) + appbin)
if __name__ == '__main__':
binary_hook(sys.argv[1], sys.argv[2]) | <filename>docs/_docs/bash/az3166_patch_binary.py
# ----------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license.
# ----------------------------------------------------------------------------
import os
import binascii
import struct
import shutil
import inspect
import sys
def binary_hook(binf, outf):
with open(binf,'rb') as f:
appbin = f.read()
with open('boot.bin', 'rb') as f:
bootbin = f.read()
with open(outf ,'wb') as f:
f.write(bootbin + ('\xFF' * (0xc000 - len(bootbin))) + appbin)
if __name__ == '__main__':
binary_hook(sys.argv[1], sys.argv[2]) | it | 0.498812 | 2.00186 | 2 |
setup.py | FireXStuff/firex-bundle-ci | 1 | 14927 | <gh_stars>1-10
import versioneer
from setuptools import setup
setup(name='firex-bundle-ci',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='FireX CI services',
url='https://github.com/FireXStuff/firex-bundle-ci.git',
author='<NAME>',
author_email='<EMAIL>',
license='BSD-3-Clause',
packages=['firex_bundle_ci'],
zip_safe=True,
install_requires=[
"firexapp",
"firex-keeper",
"lxml",
"xunitmerge",
"unittest-xml-reporting"
],
)
| import versioneer
from setuptools import setup
setup(name='firex-bundle-ci',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='FireX CI services',
url='https://github.com/FireXStuff/firex-bundle-ci.git',
author='<NAME>',
author_email='<EMAIL>',
license='BSD-3-Clause',
packages=['firex_bundle_ci'],
zip_safe=True,
install_requires=[
"firexapp",
"firex-keeper",
"lxml",
"xunitmerge",
"unittest-xml-reporting"
],
) | none | 1 | 1.116093 | 1 |
meta_middleware/meta_middleware/middleware.py | kevin-wyx/ProxyFS | 0 | 14928 | # Copyright (c) 2016 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MetaMiddleware(object):
def __init__(self, app, conf):
self.app = app
def __call__(self, env, start_response):
hToDel = list()
vToAdd = list()
for h in env:
if h.upper() == 'HTTP_X_PROXYFS_BIMODAL':
hToDel.append(h)
vToAdd.append(env[h])
for h in hToDel:
del env[h]
for v in vToAdd:
env['HTTP_X_ACCOUNT_SYSMETA_PROXYFS_BIMODAL'] = v # only last one, if multiple, will determine value
def meta_response(status, response_headers, exc_info=None):
hvToDel = list()
vToAdd = list()
for (h,v) in response_headers:
if h.upper() == 'X-ACCOUNT-SYSMETA-PROXYFS-BIMODAL':
hvToDel.append((h,v))
vToAdd.append(v)
for hv in hvToDel:
response_headers.remove(hv)
for v in vToAdd:
response_headers.append(('X-ProxyFS-BiModal',v)) # potentially multiple instances of same header
return start_response(status, response_headers, exc_info)
return self.app(env, meta_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def meta_filter(app):
return MetaMiddleware(app, conf)
return meta_filter
| # Copyright (c) 2016 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MetaMiddleware(object):
def __init__(self, app, conf):
self.app = app
def __call__(self, env, start_response):
hToDel = list()
vToAdd = list()
for h in env:
if h.upper() == 'HTTP_X_PROXYFS_BIMODAL':
hToDel.append(h)
vToAdd.append(env[h])
for h in hToDel:
del env[h]
for v in vToAdd:
env['HTTP_X_ACCOUNT_SYSMETA_PROXYFS_BIMODAL'] = v # only last one, if multiple, will determine value
def meta_response(status, response_headers, exc_info=None):
hvToDel = list()
vToAdd = list()
for (h,v) in response_headers:
if h.upper() == 'X-ACCOUNT-SYSMETA-PROXYFS-BIMODAL':
hvToDel.append((h,v))
vToAdd.append(v)
for hv in hvToDel:
response_headers.remove(hv)
for v in vToAdd:
response_headers.append(('X-ProxyFS-BiModal',v)) # potentially multiple instances of same header
return start_response(status, response_headers, exc_info)
return self.app(env, meta_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def meta_filter(app):
return MetaMiddleware(app, conf)
return meta_filter
| pt | 0.183851 | 1.699192 | 2 |
distpy/util/__init__.py | CU-NESS/distpy | 0 | 14929 | <reponame>CU-NESS/distpy
"""
Introduces utilities used throughout the package, including:
- interfaces for making objects `distpy.util.Savable.Savable` and
`distpy.util.Loadable.Loadable` in binary hdf5 files using h5py
- helper methods for using h5py to save and load variables and arrays
(`h5py_extensions`)
- type category definitions (`distpy.util.TypeCategories`)
- functions for making univariate histograms, bivariate histograms, and
triangle plots (`distpy.util.TrianglePlot`)
- a class that uses strings to represent an `distpy.util.Expression.Expression`
that can be modified and have arguments passed to it before being evaluated
- a class that represents
**File**: $DISTPY/distpy/util/\\_\\_init\\_\\_.py
**Author**: <NAME>
**Date**: 14 May 2021
"""
from distpy.util.Savable import Savable
from distpy.util.Loadable import Loadable
from distpy.util.TypeCategories import bool_types, int_types, float_types,\
real_numerical_types, complex_numerical_types, numerical_types,\
sequence_types
from distpy.util.h5py_extensions import create_hdf5_dataset, get_hdf5_value,\
HDF5Link, save_dictionary, load_dictionary
from distpy.util.TrianglePlot import univariate_histogram,\
confidence_contour_2D, bivariate_histogram, triangle_plot
from distpy.util.Expression import Expression
from distpy.util.SparseSquareBlockDiagonalMatrix import\
SparseSquareBlockDiagonalMatrix
| """
Introduces utilities used throughout the package, including:
- interfaces for making objects `distpy.util.Savable.Savable` and
`distpy.util.Loadable.Loadable` in binary hdf5 files using h5py
- helper methods for using h5py to save and load variables and arrays
(`h5py_extensions`)
- type category definitions (`distpy.util.TypeCategories`)
- functions for making univariate histograms, bivariate histograms, and
triangle plots (`distpy.util.TrianglePlot`)
- a class that uses strings to represent an `distpy.util.Expression.Expression`
that can be modified and have arguments passed to it before being evaluated
- a class that represents
**File**: $DISTPY/distpy/util/\\_\\_init\\_\\_.py
**Author**: <NAME>
**Date**: 14 May 2021
"""
from distpy.util.Savable import Savable
from distpy.util.Loadable import Loadable
from distpy.util.TypeCategories import bool_types, int_types, float_types,\
real_numerical_types, complex_numerical_types, numerical_types,\
sequence_types
from distpy.util.h5py_extensions import create_hdf5_dataset, get_hdf5_value,\
HDF5Link, save_dictionary, load_dictionary
from distpy.util.TrianglePlot import univariate_histogram,\
confidence_contour_2D, bivariate_histogram, triangle_plot
from distpy.util.Expression import Expression
from distpy.util.SparseSquareBlockDiagonalMatrix import\
SparseSquareBlockDiagonalMatrix | pt | 0.102088 | 2.22312 | 2 |
Crypto-hardRSA/flag.py | JSW2020/hsctf-2019-freshmen | 16 | 14930 | flag = "flag{b3453333-9da9-49ae-b4ed-0017c392d58e}"
e1 = 65537
e2 = 368273 | flag = "flag{b3453333-9da9-49ae-b4ed-0017c392d58e}"
e1 = 65537
e2 = 368273 | none | 1 | 0.923095 | 1 |
toast/decorators/__init__.py | joshuaskelly/Toast | 0 | 14931 | <gh_stars>0
class call_if(object):
def __init__(self, cond):
self.condition = cond
def __call__(self, func):
def inner(*args, **kwargs):
if getattr(args[0], self.condition):
return func(*args, **kwargs)
else:
return None
return inner | class call_if(object):
def __init__(self, cond):
self.condition = cond
def __call__(self, func):
def inner(*args, **kwargs):
if getattr(args[0], self.condition):
return func(*args, **kwargs)
else:
return None
return inner | none | 1 | 3.078993 | 3 |
drogher/package/fedex.py | thisisnotmyuserid/drogher | 13 | 14932 | <filename>drogher/package/fedex.py
import itertools
from .base import Package
class FedEx(Package):
shipper = 'FedEx'
class FedExExpress(FedEx):
barcode_pattern = r'^\d{34}$'
@property
def tracking_number(self):
return self.barcode[20:22].lstrip('0') + self.barcode[22:]
@property
def valid_checksum(self):
chars, check_digit = self.tracking_number[:-1], self.tracking_number[-1]
total = 0
for digit, char in zip(itertools.cycle([1, 3, 7]), reversed(chars)):
total += int(char) * digit
return total % 11 % 10 == int(check_digit)
class FedExGround96(FedEx):
barcode_pattern = r'^96\d{20}$'
@property
def tracking_number(self):
return self.barcode[7:]
@property
def valid_checksum(self):
chars, check_digit = self.tracking_number[:-1], self.tracking_number[-1]
odd = even = 0
for i, char in enumerate(reversed(chars)):
if i & 0x1:
odd += int(char)
else:
even += int(char)
check = ((even * 3) + odd) % 10
if check != 0:
check = 10 - check
return check == int(check_digit)
| <filename>drogher/package/fedex.py
import itertools
from .base import Package
class FedEx(Package):
shipper = 'FedEx'
class FedExExpress(FedEx):
barcode_pattern = r'^\d{34}$'
@property
def tracking_number(self):
return self.barcode[20:22].lstrip('0') + self.barcode[22:]
@property
def valid_checksum(self):
chars, check_digit = self.tracking_number[:-1], self.tracking_number[-1]
total = 0
for digit, char in zip(itertools.cycle([1, 3, 7]), reversed(chars)):
total += int(char) * digit
return total % 11 % 10 == int(check_digit)
class FedExGround96(FedEx):
barcode_pattern = r'^96\d{20}$'
@property
def tracking_number(self):
return self.barcode[7:]
@property
def valid_checksum(self):
chars, check_digit = self.tracking_number[:-1], self.tracking_number[-1]
odd = even = 0
for i, char in enumerate(reversed(chars)):
if i & 0x1:
odd += int(char)
else:
even += int(char)
check = ((even * 3) + odd) % 10
if check != 0:
check = 10 - check
return check == int(check_digit)
| none | 1 | 3.350061 | 3 |
nipy/labs/spatial_models/tests/test_bsa_io.py | arokem/nipy | 0 | 14933 | from __future__ import with_statement
from nose.tools import assert_true
from os.path import exists
import numpy as np
from nibabel import Nifti1Image
from numpy.testing import assert_equal
from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset
from ..bsa_io import make_bsa_image
from nibabel.tmpdirs import InTemporaryDirectory
def test_parcel_intra_from_3d_images_list():
"""Test that a parcellation is generated, starting from a list of 3D images
"""
# Generate an image
shape = (5, 5, 5)
contrast_id = 'plop'
mask_image = Nifti1Image(np.ones(shape), np.eye(4))
#mask_images = [mask_image for _ in range(5)]
with InTemporaryDirectory() as dir_context:
data_image = ['image_%d.nii' % i for i in range(5)]
for datim in data_image:
surrogate_3d_dataset(mask=mask_image, out_image_file=datim)
#run the algo
landmark, hrois = make_bsa_image(
mask_image, data_image, threshold=10., smin=0, sigma=1.,
prevalence_threshold=0, prevalence_pval=0.5, write_dir=dir_context,
algorithm='density', contrast_id=contrast_id)
assert_equal(landmark, None)
assert_equal(len(hrois), 5)
assert_true(exists('density_%s.nii' % contrast_id))
assert_true(exists('prevalence_%s.nii' % contrast_id))
assert_true(exists('AR_%s.nii' % contrast_id))
assert_true(exists('CR_%s.nii' % contrast_id))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| from __future__ import with_statement
from nose.tools import assert_true
from os.path import exists
import numpy as np
from nibabel import Nifti1Image
from numpy.testing import assert_equal
from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset
from ..bsa_io import make_bsa_image
from nibabel.tmpdirs import InTemporaryDirectory
def test_parcel_intra_from_3d_images_list():
"""Test that a parcellation is generated, starting from a list of 3D images
"""
# Generate an image
shape = (5, 5, 5)
contrast_id = 'plop'
mask_image = Nifti1Image(np.ones(shape), np.eye(4))
#mask_images = [mask_image for _ in range(5)]
with InTemporaryDirectory() as dir_context:
data_image = ['image_%d.nii' % i for i in range(5)]
for datim in data_image:
surrogate_3d_dataset(mask=mask_image, out_image_file=datim)
#run the algo
landmark, hrois = make_bsa_image(
mask_image, data_image, threshold=10., smin=0, sigma=1.,
prevalence_threshold=0, prevalence_pval=0.5, write_dir=dir_context,
algorithm='density', contrast_id=contrast_id)
assert_equal(landmark, None)
assert_equal(len(hrois), 5)
assert_true(exists('density_%s.nii' % contrast_id))
assert_true(exists('prevalence_%s.nii' % contrast_id))
assert_true(exists('AR_%s.nii' % contrast_id))
assert_true(exists('CR_%s.nii' % contrast_id))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| pt | 0.216359 | 2.044467 | 2 |
MFSDA/MFSDA_run.py | bpaniagua/MFSDA_Python | 3 | 14934 | <reponame>bpaniagua/MFSDA_Python<gh_stars>1-10
#!/usr/bin/env python-real
# -*- coding: utf-8 -*-
"""
Run script: multivariate functional shape data analysis (MFSDA).
Author: <NAME> (<EMAIL>)
Last update: 2017-08-14
"""
import sys,os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),os.path.join('Resources','Libraries')))
import numpy as np
from scipy import stats
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from stat_read_x import read_x
from stat_lpks import lpks
from stat_sif import sif
from stat_wald_ht import wald_ht
from stat_bstrp_pvalue import bstrp_pvalue
import MFSDA_stat as mfsda
import timeit
import vtk
import argparse
import os
import json
"""installed all the libraries above"""
def main():
parser = argparse.ArgumentParser(description='Multivariate Functional Shape Data Analysis (MFSDA)')
parser.add_argument('--shapeData', type=str, help='Text file list with vtk filenames, 1 file per line', required=True)
parser.add_argument('--coordData', type=str, help='filename, .vtk shape template', required=True)
parser.add_argument('--outputDir', help='output directory', default='./output')
args = parser.parse_args()
start_all = timeit.default_timer()
run_script(args)
stop_all = timeit.default_timer()
delta_time_all = str(stop_all - start_all)
print("The total elapsed time is " + delta_time_all)
def run_script(args):
"""
Run the commandline script for MFSDA.
"""
"""+++++++++++++++++++++++++++++++++++"""
"""Step 1. load dataset """
print("loading data ......")
print("+++++++Read the surface shape data+++++++")
fh = open(args.shapeData, 'rU')
y_design = []
nshape = 0
numpoints = -1
header = fh.readline()
toks = header.split(sep=',')
covs_tmp = []
for line in fh.readlines():
toks = line.strip().split(sep=',')
# Read VTK file
vtkfilename = toks[0].rstrip()
print("Reading {}".format(vtkfilename))
reader = vtk.vtkPolyDataReader()
reader.SetFileName(vtkfilename)
reader.Update()
shapedata = reader.GetOutput()
shapedatapoints = shapedata.GetPoints()
y_design.append([])
if numpoints == -1:
numpoints = shapedatapoints.GetNumberOfPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The number of points is not the same for the shape:", vtkfilename)
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
y_design[nshape].append(p)
nshape += 1
# Build covariate matrix
covs_tmp.append(toks[1:])
y_design = np.array(y_design)
y_design.reshape(nshape, numpoints, 3)
y_design = np.array(y_design)
y_design.reshape(nshape, numpoints, 3)
print("The dimension of shape matrix is " + str(y_design.shape))
print("+++++++Read the sphere coordinate data+++++++")
print("Reading", args.coordData)
reader = vtk.vtkPolyDataReader()
reader.SetFileName(args.coordData)
reader.Update()
coordData = reader.GetOutput()
shapedatapoints = coordData.GetPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The template does not have the same number of points as the shapes")
coord_mat = []
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
coord_mat.append(p)
coord_mat = np.array(coord_mat)
# Set up design matrix
design_data = np.array(covs_tmp,dtype=float)
# read the covariate type
var_type = getCovariateType(design_data)
"""+++++++++++++++++++++++++++++++++++"""
"""Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing"""
gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)
"""+++++++++++++++++++++++++++++++++++"""
"""Step3. Save all the results"""
if not os.path.exists(args.outputDir):
os.makedirs(args.outputDir)
pvalues = {}
pvalues['Gpvals'] = gpvals.tolist()
pvalues['clu_pvals'] = clu_pvals.tolist()
pvalues['Lpvals_fdr'] = lpvals_fdr.tolist()
with open(os.path.join(args.outputDir,'pvalues.json'), 'w') as outfile:
json.dump(pvalues, outfile)
efit = {}
efit['efitBetas'] = efit_beta.tolist()
efit['efitYdesign'] = efity_design.tolist()
efit['efitEtas'] = efit_eta.tolist()
with open(os.path.join(args.outputDir,'efit.json'), 'w') as outfile:
json.dump(efit, outfile)
def getCovariateType(design_data):
(row,column)=design_data.shape
cov_types=[]
for c in range(column):
cov_col=design_data[:,c]
cov_type = 0. #int
for i in range(len(cov_col)):
if int(cov_col[i])!=cov_col[i]:
cov_type = 1. #double
break
cov_types.append(cov_type)
cov_types = np.array(cov_types)
return cov_types
if __name__ == '__main__':
main()
| #!/usr/bin/env python-real
# -*- coding: utf-8 -*-
"""
Run script: multivariate functional shape data analysis (MFSDA).
Author: <NAME> (<EMAIL>)
Last update: 2017-08-14
"""
import sys,os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),os.path.join('Resources','Libraries')))
import numpy as np
from scipy import stats
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from stat_read_x import read_x
from stat_lpks import lpks
from stat_sif import sif
from stat_wald_ht import wald_ht
from stat_bstrp_pvalue import bstrp_pvalue
import MFSDA_stat as mfsda
import timeit
import vtk
import argparse
import os
import json
"""installed all the libraries above"""
def main():
parser = argparse.ArgumentParser(description='Multivariate Functional Shape Data Analysis (MFSDA)')
parser.add_argument('--shapeData', type=str, help='Text file list with vtk filenames, 1 file per line', required=True)
parser.add_argument('--coordData', type=str, help='filename, .vtk shape template', required=True)
parser.add_argument('--outputDir', help='output directory', default='./output')
args = parser.parse_args()
start_all = timeit.default_timer()
run_script(args)
stop_all = timeit.default_timer()
delta_time_all = str(stop_all - start_all)
print("The total elapsed time is " + delta_time_all)
def run_script(args):
"""
Run the commandline script for MFSDA.
"""
"""+++++++++++++++++++++++++++++++++++"""
"""Step 1. load dataset """
print("loading data ......")
print("+++++++Read the surface shape data+++++++")
fh = open(args.shapeData, 'rU')
y_design = []
nshape = 0
numpoints = -1
header = fh.readline()
toks = header.split(sep=',')
covs_tmp = []
for line in fh.readlines():
toks = line.strip().split(sep=',')
# Read VTK file
vtkfilename = toks[0].rstrip()
print("Reading {}".format(vtkfilename))
reader = vtk.vtkPolyDataReader()
reader.SetFileName(vtkfilename)
reader.Update()
shapedata = reader.GetOutput()
shapedatapoints = shapedata.GetPoints()
y_design.append([])
if numpoints == -1:
numpoints = shapedatapoints.GetNumberOfPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The number of points is not the same for the shape:", vtkfilename)
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
y_design[nshape].append(p)
nshape += 1
# Build covariate matrix
covs_tmp.append(toks[1:])
y_design = np.array(y_design)
y_design.reshape(nshape, numpoints, 3)
y_design = np.array(y_design)
y_design.reshape(nshape, numpoints, 3)
print("The dimension of shape matrix is " + str(y_design.shape))
print("+++++++Read the sphere coordinate data+++++++")
print("Reading", args.coordData)
reader = vtk.vtkPolyDataReader()
reader.SetFileName(args.coordData)
reader.Update()
coordData = reader.GetOutput()
shapedatapoints = coordData.GetPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The template does not have the same number of points as the shapes")
coord_mat = []
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
coord_mat.append(p)
coord_mat = np.array(coord_mat)
# Set up design matrix
design_data = np.array(covs_tmp,dtype=float)
# read the covariate type
var_type = getCovariateType(design_data)
"""+++++++++++++++++++++++++++++++++++"""
"""Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing"""
gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)
"""+++++++++++++++++++++++++++++++++++"""
"""Step3. Save all the results"""
if not os.path.exists(args.outputDir):
os.makedirs(args.outputDir)
pvalues = {}
pvalues['Gpvals'] = gpvals.tolist()
pvalues['clu_pvals'] = clu_pvals.tolist()
pvalues['Lpvals_fdr'] = lpvals_fdr.tolist()
with open(os.path.join(args.outputDir,'pvalues.json'), 'w') as outfile:
json.dump(pvalues, outfile)
efit = {}
efit['efitBetas'] = efit_beta.tolist()
efit['efitYdesign'] = efity_design.tolist()
efit['efitEtas'] = efit_eta.tolist()
with open(os.path.join(args.outputDir,'efit.json'), 'w') as outfile:
json.dump(efit, outfile)
def getCovariateType(design_data):
(row,column)=design_data.shape
cov_types=[]
for c in range(column):
cov_col=design_data[:,c]
cov_type = 0. #int
for i in range(len(cov_col)):
if int(cov_col[i])!=cov_col[i]:
cov_type = 1. #double
break
cov_types.append(cov_type)
cov_types = np.array(cov_types)
return cov_types
if __name__ == '__main__':
main() | en | 0.117024 | 2.501441 | 3 |
modules/mongodb_atlas/mongodb_atlas.py | riddopic/opta | 595 | 14935 | <filename>modules/mongodb_atlas/mongodb_atlas.py
import os
from typing import TYPE_CHECKING
from modules.base import ModuleProcessor
from opta.core.terraform import get_terraform_outputs
from opta.exceptions import UserErrors
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
class MongodbAtlasProcessor(ModuleProcessor):
def __init__(self, module: "Module", layer: "Layer"):
if module.data["type"] != "mongodb-atlas":
raise Exception(
f"The module {module.name} was expected to be of type mongodb-atlas"
)
super(MongodbAtlasProcessor, self).__init__(module, layer)
def pre_hook(self, module_idx: int) -> None:
required_env_set = set(["MONGODB_ATLAS_PUBLIC_KEY", "MONGODB_ATLAS_PRIVATE_KEY"])
if not required_env_set.issubset(set(os.environ.keys())):
raise UserErrors(
"Opta did not find environment variable(s), please set them and retry: {}".format(
required_env_set - set(os.environ.keys())
)
)
super(MongodbAtlasProcessor, self).pre_hook(module_idx)
def process(self, module_idx: int) -> None:
self.module.data["cloud_provider"] = self.layer.cloud.upper()
if self.module.data["cloud_provider"] == "LOCAL":
self.module.data["cloud_provider"] = "AWS" # For local, always spin up in AWS
self.module.data["region"] = "US_EAST_1"
base_layer = self.layer.root()
root_outputs = get_terraform_outputs(base_layer)
self.module.data["public_nat_ips"] = root_outputs["public_nat_ips"]
super(MongodbAtlasProcessor, self).process(module_idx)
| <filename>modules/mongodb_atlas/mongodb_atlas.py
import os
from typing import TYPE_CHECKING
from modules.base import ModuleProcessor
from opta.core.terraform import get_terraform_outputs
from opta.exceptions import UserErrors
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
class MongodbAtlasProcessor(ModuleProcessor):
def __init__(self, module: "Module", layer: "Layer"):
if module.data["type"] != "mongodb-atlas":
raise Exception(
f"The module {module.name} was expected to be of type mongodb-atlas"
)
super(MongodbAtlasProcessor, self).__init__(module, layer)
def pre_hook(self, module_idx: int) -> None:
required_env_set = set(["MONGODB_ATLAS_PUBLIC_KEY", "MONGODB_ATLAS_PRIVATE_KEY"])
if not required_env_set.issubset(set(os.environ.keys())):
raise UserErrors(
"Opta did not find environment variable(s), please set them and retry: {}".format(
required_env_set - set(os.environ.keys())
)
)
super(MongodbAtlasProcessor, self).pre_hook(module_idx)
def process(self, module_idx: int) -> None:
self.module.data["cloud_provider"] = self.layer.cloud.upper()
if self.module.data["cloud_provider"] == "LOCAL":
self.module.data["cloud_provider"] = "AWS" # For local, always spin up in AWS
self.module.data["region"] = "US_EAST_1"
base_layer = self.layer.root()
root_outputs = get_terraform_outputs(base_layer)
self.module.data["public_nat_ips"] = root_outputs["public_nat_ips"]
super(MongodbAtlasProcessor, self).process(module_idx)
| es | 0.187235 | 2.053182 | 2 |
python/tests/testdata/region_HU.py | kevin-brown/python-phonenumbers | 1 | 14936 | <reponame>kevin-brown/python-phonenumbers
"""Auto-generated file, do not edit by hand. HU metadata"""
from phonenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HU = PhoneMetadata(id='HU', country_code=36, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='30\\d{7}', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='30\\d{7}', example_number='301234567', possible_length=(9,)),
national_prefix='06',
national_prefix_for_parsing='06')
| """Auto-generated file, do not edit by hand. HU metadata"""
from phonenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HU = PhoneMetadata(id='HU', country_code=36, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='30\\d{7}', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='30\\d{7}', example_number='301234567', possible_length=(9,)),
national_prefix='06',
national_prefix_for_parsing='06') | pt | 0.179005 | 2.090074 | 2 |
lambdas/budget-handler/lambda_handler.py | weAllWeGot/personal_financial_engine | 2 | 14937 | <filename>lambdas/budget-handler/lambda_handler.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import boto3
import csv
import json
import logging
from budget_retrieval import get_budget
from budget_placement import place_budget
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
"Access-Control-Allow-Origin": "*", # Required for CORS support to work
"Access-Control-Allow-Credentials": True
},
}
def lambda_handler(event: dict, context: dict) -> dict:
'''Demonstrates a simple HTTP endpoint using API Gateway. You have full
access to the request and response payload, including headers and
status code.
'''
path = event['path']
user_uid = event['requestContext']['authorizer']['claims']['sub']
body = json.loads(event['body'])
path = '/retrieve' if body['RetrieveOrPlace'].endswith('retrieve') else '/place'
entity = 'budget' if body['Entity'].endswith('budget') else 'account'
print(path)
if path.endswith('/retrieve'):
response = get_budget(user_uid, entity)
elif path.endswith('/place'):
response = place_budget(user_uid, body, entity)
return respond(err=None, res=response)
# with open('event.json') as f:
# e = json.load(f)
# lambda_handler(e, {})
| <filename>lambdas/budget-handler/lambda_handler.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import boto3
import csv
import json
import logging
from budget_retrieval import get_budget
from budget_placement import place_budget
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
"Access-Control-Allow-Origin": "*", # Required for CORS support to work
"Access-Control-Allow-Credentials": True
},
}
def lambda_handler(event: dict, context: dict) -> dict:
'''Demonstrates a simple HTTP endpoint using API Gateway. You have full
access to the request and response payload, including headers and
status code.
'''
path = event['path']
user_uid = event['requestContext']['authorizer']['claims']['sub']
body = json.loads(event['body'])
path = '/retrieve' if body['RetrieveOrPlace'].endswith('retrieve') else '/place'
entity = 'budget' if body['Entity'].endswith('budget') else 'account'
print(path)
if path.endswith('/retrieve'):
response = get_budget(user_uid, entity)
elif path.endswith('/place'):
response = place_budget(user_uid, body, entity)
return respond(err=None, res=response)
# with open('event.json') as f:
# e = json.load(f)
# lambda_handler(e, {})
| pt | 0.16257 | 2.275915 | 2 |
src/documenteer/stackdocs/doxygentag.py | lsst-sqre/sphinxkit | 3 | 14938 | """Utilities for working with Doxygen tag files.
"""
__all__ = ["get_tag_entity_names"]
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import List, Optional, Sequence, Union
try:
from sphinxcontrib.doxylink import doxylink
except ImportError:
print(
"sphinxcontrib.doxylink is missing. Install documenteer with the "
"pipelines extra:\n\n pip install documenteer[pipelines]"
)
def get_tag_entity_names(
tag_path: Union[str, Path], kinds: Optional[Sequence[str]] = None
) -> List[str]:
"""Get the list of API names in a Doxygen tag file.
Parameters
----------
tag_path : `str` or `~pathlib.Path`
File path of the Doxygen tag file.
kinds : sequence of `str`, optional
If provided, a sequence of API kinds to include in the listing.
Doxygen types are:
- namespace
- struct
- class
- file
- define
- group
- variable
- typedef
- enumeration
- function
Returns
-------
names : `list` of `str`
List of API names.
"""
doc = ET.parse(str(tag_path))
symbol_map = doxylink.SymbolMap(doc)
keys = []
for key in symbol_map._mapping.keys():
entry = symbol_map[key]
if kinds:
if entry.kind in kinds:
keys.append(key)
else:
keys.append(key)
keys.sort()
return keys
| """Utilities for working with Doxygen tag files.
"""
__all__ = ["get_tag_entity_names"]
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import List, Optional, Sequence, Union
try:
from sphinxcontrib.doxylink import doxylink
except ImportError:
print(
"sphinxcontrib.doxylink is missing. Install documenteer with the "
"pipelines extra:\n\n pip install documenteer[pipelines]"
)
def get_tag_entity_names(
tag_path: Union[str, Path], kinds: Optional[Sequence[str]] = None
) -> List[str]:
"""Get the list of API names in a Doxygen tag file.
Parameters
----------
tag_path : `str` or `~pathlib.Path`
File path of the Doxygen tag file.
kinds : sequence of `str`, optional
If provided, a sequence of API kinds to include in the listing.
Doxygen types are:
- namespace
- struct
- class
- file
- define
- group
- variable
- typedef
- enumeration
- function
Returns
-------
names : `list` of `str`
List of API names.
"""
doc = ET.parse(str(tag_path))
symbol_map = doxylink.SymbolMap(doc)
keys = []
for key in symbol_map._mapping.keys():
entry = symbol_map[key]
if kinds:
if entry.kind in kinds:
keys.append(key)
else:
keys.append(key)
keys.sort()
return keys
| pt | 0.183369 | 2.349449 | 2 |
src/gamesbyexample/shellgame.py | skinzor/PythonStdioGames | 1 | 14939 | # Shell Game, by <NAME> <EMAIL>
# A random gambling game.
import random, time, sys
print('''SHELL GAME
By <NAME> <EMAIL>
Try to find the diamond!
Press Enter to continue...''')
input()
CUPS = ['diamond', 'pocket lint', 'nothing']
while True:
print()
print('Shuffling the cups', end='')
random.shuffle(CUPS) # This happens instantly.
# We add fake pauses to make it seem more interesting:
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print()
while True:
print('Okay! Pick a cup 1-{}'.format(len(CUPS)))
pickedCup = input()
if pickedCup.isdecimal() and 1 <= int(pickedCup) <= len(CUPS):
break
print('Type a number between 1 and {}.'.format(len(CUPS)))
print()
if CUPS[int(pickedCup) - 1] == 'diamond':
print('You found the cup with the diamond!')
else:
print('Nope! You picked the cup that had {} in it.'.format(CUPS[int(pickedCup) - 1]))
print('Would you like to play again? Y/N')
response = input().upper()
if not response.startswith('Y'):
print('Thanks for playing!')
sys.exit()
| # Shell Game, by <NAME> <EMAIL>
# A random gambling game.
import random, time, sys
print('''SHELL GAME
By <NAME> <EMAIL>
Try to find the diamond!
Press Enter to continue...''')
input()
CUPS = ['diamond', 'pocket lint', 'nothing']
while True:
print()
print('Shuffling the cups', end='')
random.shuffle(CUPS) # This happens instantly.
# We add fake pauses to make it seem more interesting:
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print('.', end='')
time.sleep(0.3)
print()
while True:
print('Okay! Pick a cup 1-{}'.format(len(CUPS)))
pickedCup = input()
if pickedCup.isdecimal() and 1 <= int(pickedCup) <= len(CUPS):
break
print('Type a number between 1 and {}.'.format(len(CUPS)))
print()
if CUPS[int(pickedCup) - 1] == 'diamond':
print('You found the cup with the diamond!')
else:
print('Nope! You picked the cup that had {} in it.'.format(CUPS[int(pickedCup) - 1]))
print('Would you like to play again? Y/N')
response = input().upper()
if not response.startswith('Y'):
print('Thanks for playing!')
sys.exit()
| pt | 0.198141 | 4.02356 | 4 |
main.py | mathew4STAR/GPT-3_based_AI | 0 | 14940 | import pyttsx3
import speech_recognition as sr
import openai as op
import os
op.api_key = os.getenv("OPENAI_API_KEY")
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 1.0)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def tell(text):
engine.say(text)
engine.runAndWait()
def takecommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print("Please repeat")
return "Nothing"
return query
while True:
query = takecommand()
response = op.Completion.create(
engine="text-davinci-001",
prompt="The following is a conversation with an AI friend. The friend is helpful, creative, clever, and very friendly.\n\nHuman: " + query + "\nAI: ",
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
)
presponse= response["choices"][0]["text"]
print(presponse)
tell(presponse) | import pyttsx3
import speech_recognition as sr
import openai as op
import os
op.api_key = os.getenv("OPENAI_API_KEY")
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 1.0)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def tell(text):
engine.say(text)
engine.runAndWait()
def takecommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print("Please repeat")
return "Nothing"
return query
while True:
query = takecommand()
response = op.Completion.create(
engine="text-davinci-001",
prompt="The following is a conversation with an AI friend. The friend is helpful, creative, clever, and very friendly.\n\nHuman: " + query + "\nAI: ",
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
)
presponse= response["choices"][0]["text"]
print(presponse)
tell(presponse) | none | 1 | 2.858399 | 3 |
sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py | polivbr/pulumi-azure-native | 0 | 14941 | <filename>sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'CreationDataArgs',
'DataDiskImageEncryptionArgs',
'DisallowedArgs',
'DiskSkuArgs',
'EncryptionImagesArgs',
'EncryptionSetIdentityArgs',
'EncryptionSettingsCollectionArgs',
'EncryptionSettingsElementArgs',
'EncryptionArgs',
'ExtendedLocationArgs',
'GalleryApplicationVersionPublishingProfileArgs',
'GalleryArtifactVersionSourceArgs',
'GalleryDataDiskImageArgs',
'GalleryImageFeatureArgs',
'GalleryImageIdentifierArgs',
'GalleryImageVersionPublishingProfileArgs',
'GalleryImageVersionStorageProfileArgs',
'GalleryOSDiskImageArgs',
'ImageDiskReferenceArgs',
'ImagePurchasePlanArgs',
'KeyForDiskEncryptionSetArgs',
'KeyVaultAndKeyReferenceArgs',
'KeyVaultAndSecretReferenceArgs',
'OSDiskImageEncryptionArgs',
'PrivateLinkServiceConnectionStateArgs',
'PurchasePlanArgs',
'RecommendedMachineConfigurationArgs',
'ResourceRangeArgs',
'SharingProfileArgs',
'SnapshotSkuArgs',
'SourceVaultArgs',
'TargetRegionArgs',
'UserArtifactManageArgs',
'UserArtifactSourceArgs',
]
@pulumi.input_type
class CreationDataArgs:
def __init__(__self__, *,
create_option: pulumi.Input[Union[str, 'DiskCreateOption']],
gallery_image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,
image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,
logical_sector_size: Optional[pulumi.Input[int]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
upload_size_bytes: Optional[pulumi.Input[float]] = None):
"""
Data used when creating a disk.
:param pulumi.Input[Union[str, 'DiskCreateOption']] create_option: This enumerates the possible sources of a disk's creation.
:param pulumi.Input['ImageDiskReferenceArgs'] gallery_image_reference: Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk.
:param pulumi.Input['ImageDiskReferenceArgs'] image_reference: Disk source information.
:param pulumi.Input[int] logical_sector_size: Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.
:param pulumi.Input[str] source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot or disk.
:param pulumi.Input[str] source_uri: If createOption is Import, this is the URI of a blob to be imported into a managed disk.
:param pulumi.Input[str] storage_account_id: Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk.
:param pulumi.Input[float] upload_size_bytes: If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
"""
pulumi.set(__self__, "create_option", create_option)
if gallery_image_reference is not None:
pulumi.set(__self__, "gallery_image_reference", gallery_image_reference)
if image_reference is not None:
pulumi.set(__self__, "image_reference", image_reference)
if logical_sector_size is not None:
pulumi.set(__self__, "logical_sector_size", logical_sector_size)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if source_uri is not None:
pulumi.set(__self__, "source_uri", source_uri)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
if upload_size_bytes is not None:
pulumi.set(__self__, "upload_size_bytes", upload_size_bytes)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> pulumi.Input[Union[str, 'DiskCreateOption']]:
"""
This enumerates the possible sources of a disk's creation.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: pulumi.Input[Union[str, 'DiskCreateOption']]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="galleryImageReference")
def gallery_image_reference(self) -> Optional[pulumi.Input['ImageDiskReferenceArgs']]:
"""
Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk.
"""
return pulumi.get(self, "gallery_image_reference")
@gallery_image_reference.setter
def gallery_image_reference(self, value: Optional[pulumi.Input['ImageDiskReferenceArgs']]):
pulumi.set(self, "gallery_image_reference", value)
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> Optional[pulumi.Input['ImageDiskReferenceArgs']]:
"""
Disk source information.
"""
return pulumi.get(self, "image_reference")
@image_reference.setter
def image_reference(self, value: Optional[pulumi.Input['ImageDiskReferenceArgs']]):
pulumi.set(self, "image_reference", value)
@property
@pulumi.getter(name="logicalSectorSize")
def logical_sector_size(self) -> Optional[pulumi.Input[int]]:
"""
Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.
"""
return pulumi.get(self, "logical_sector_size")
@logical_sector_size.setter
def logical_sector_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "logical_sector_size", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
If createOption is Copy, this is the ARM id of the source snapshot or disk.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> Optional[pulumi.Input[str]]:
"""
If createOption is Import, this is the URI of a blob to be imported into a managed disk.
"""
return pulumi.get(self, "source_uri")
@source_uri.setter
def source_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_uri", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
@property
@pulumi.getter(name="uploadSizeBytes")
def upload_size_bytes(self) -> Optional[pulumi.Input[float]]:
"""
If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
"""
return pulumi.get(self, "upload_size_bytes")
@upload_size_bytes.setter
def upload_size_bytes(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "upload_size_bytes", value)
@pulumi.input_type
class DataDiskImageEncryptionArgs:
def __init__(__self__, *,
lun: pulumi.Input[int],
disk_encryption_set_id: Optional[pulumi.Input[str]] = None):
"""
Contains encryption settings for a data disk image.
:param pulumi.Input[int] lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
:param pulumi.Input[str] disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set.
"""
pulumi.set(__self__, "lun", lun)
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
@property
@pulumi.getter
def lun(self) -> pulumi.Input[int]:
"""
This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: pulumi.Input[int]):
pulumi.set(self, "lun", value)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:
"""
A relative URI containing the resource ID of the disk encryption set.
"""
return pulumi.get(self, "disk_encryption_set_id")
@disk_encryption_set_id.setter
def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_encryption_set_id", value)
@pulumi.input_type
class DisallowedArgs:
def __init__(__self__, *,
disk_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Describes the disallowed disk types.
:param pulumi.Input[Sequence[pulumi.Input[str]]] disk_types: A list of disk types.
"""
if disk_types is not None:
pulumi.set(__self__, "disk_types", disk_types)
@property
@pulumi.getter(name="diskTypes")
def disk_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of disk types.
"""
return pulumi.get(self, "disk_types")
@disk_types.setter
def disk_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "disk_types", value)
@pulumi.input_type
class DiskSkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]] = None):
"""
The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
:param pulumi.Input[Union[str, 'DiskStorageAccountTypes']] name: The sku name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class EncryptionImagesArgs:
def __init__(__self__, *,
data_disk_images: Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]] = None,
os_disk_image: Optional[pulumi.Input['OSDiskImageEncryptionArgs']] = None):
"""
Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
:param pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]] data_disk_images: A list of encryption specifications for data disk images.
:param pulumi.Input['OSDiskImageEncryptionArgs'] os_disk_image: Contains encryption settings for an OS disk image.
"""
if data_disk_images is not None:
pulumi.set(__self__, "data_disk_images", data_disk_images)
if os_disk_image is not None:
pulumi.set(__self__, "os_disk_image", os_disk_image)
@property
@pulumi.getter(name="dataDiskImages")
def data_disk_images(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]]:
"""
A list of encryption specifications for data disk images.
"""
return pulumi.get(self, "data_disk_images")
@data_disk_images.setter
def data_disk_images(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]]):
pulumi.set(self, "data_disk_images", value)
@property
@pulumi.getter(name="osDiskImage")
def os_disk_image(self) -> Optional[pulumi.Input['OSDiskImageEncryptionArgs']]:
"""
Contains encryption settings for an OS disk image.
"""
return pulumi.get(self, "os_disk_image")
@os_disk_image.setter
def os_disk_image(self, value: Optional[pulumi.Input['OSDiskImageEncryptionArgs']]):
pulumi.set(self, "os_disk_image", value)
@pulumi.input_type
class EncryptionSetIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]] = None):
"""
The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used to encrypt disks.
:param pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']] type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]]:
"""
The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class EncryptionSettingsCollectionArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
encryption_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]] = None,
encryption_settings_version: Optional[pulumi.Input[str]] = None):
"""
Encryption settings for disk or snapshot
:param pulumi.Input[bool] enabled: Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
:param pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]] encryption_settings: A collection of encryption settings, one for each disk volume.
:param pulumi.Input[str] encryption_settings_version: Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption.
"""
pulumi.set(__self__, "enabled", enabled)
if encryption_settings is not None:
pulumi.set(__self__, "encryption_settings", encryption_settings)
if encryption_settings_version is not None:
pulumi.set(__self__, "encryption_settings_version", encryption_settings_version)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]:
"""
A collection of encryption settings, one for each disk volume.
"""
return pulumi.get(self, "encryption_settings")
@encryption_settings.setter
def encryption_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]):
pulumi.set(self, "encryption_settings", value)
@property
@pulumi.getter(name="encryptionSettingsVersion")
def encryption_settings_version(self) -> Optional[pulumi.Input[str]]:
"""
Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption.
"""
return pulumi.get(self, "encryption_settings_version")
@encryption_settings_version.setter
def encryption_settings_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_settings_version", value)
@pulumi.input_type
class EncryptionSettingsElementArgs:
def __init__(__self__, *,
disk_encryption_key: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']] = None,
key_encryption_key: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']] = None):
"""
Encryption settings for one disk volume.
:param pulumi.Input['KeyVaultAndSecretReferenceArgs'] disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key
:param pulumi.Input['KeyVaultAndKeyReferenceArgs'] key_encryption_key: Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
"""
if disk_encryption_key is not None:
pulumi.set(__self__, "disk_encryption_key", disk_encryption_key)
if key_encryption_key is not None:
pulumi.set(__self__, "key_encryption_key", key_encryption_key)
@property
@pulumi.getter(name="diskEncryptionKey")
def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:
"""
Key Vault Secret Url and vault id of the disk encryption key
"""
return pulumi.get(self, "disk_encryption_key")
@disk_encryption_key.setter
def disk_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]):
pulumi.set(self, "disk_encryption_key", value)
@property
@pulumi.getter(name="keyEncryptionKey")
def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:
"""
Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
"""
return pulumi.get(self, "key_encryption_key")
@key_encryption_key.setter
def key_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]):
pulumi.set(self, "key_encryption_key", value)
@pulumi.input_type
class EncryptionArgs:
def __init__(__self__, *,
disk_encryption_set_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'EncryptionType']]] = None):
"""
Encryption at rest settings for disk or snapshot
:param pulumi.Input[str] disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling encryption at rest.
:param pulumi.Input[Union[str, 'EncryptionType']] type: The type of key used to encrypt the data of the disk.
"""
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:
"""
ResourceId of the disk encryption set to use for enabling encryption at rest.
"""
return pulumi.get(self, "disk_encryption_set_id")
@disk_encryption_set_id.setter
def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_encryption_set_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'EncryptionType']]]:
"""
The type of key used to encrypt the data of the disk.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'EncryptionType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ExtendedLocationArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]] = None):
"""
The complex type of the extended location.
:param pulumi.Input[str] name: The name of the extended location.
:param pulumi.Input[Union[str, 'ExtendedLocationTypes']] type: The type of the extended location.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extended location.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]:
"""
The type of the extended location.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class GalleryApplicationVersionPublishingProfileArgs:
def __init__(__self__, *,
source: pulumi.Input['UserArtifactSourceArgs'],
enable_health_check: Optional[pulumi.Input[bool]] = None,
end_of_life_date: Optional[pulumi.Input[str]] = None,
exclude_from_latest: Optional[pulumi.Input[bool]] = None,
manage_actions: Optional[pulumi.Input['UserArtifactManageArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None,
target_regions: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]] = None):
"""
The publishing profile of a gallery image version.
:param pulumi.Input['UserArtifactSourceArgs'] source: The source image from which the Image Version is going to be created.
:param pulumi.Input[bool] enable_health_check: Optional. Whether or not this application reports health.
:param pulumi.Input[str] end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
:param pulumi.Input[bool] exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
:param pulumi.Input[int] replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
:param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
:param pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
pulumi.set(__self__, "source", source)
if enable_health_check is not None:
pulumi.set(__self__, "enable_health_check", enable_health_check)
if end_of_life_date is not None:
pulumi.set(__self__, "end_of_life_date", end_of_life_date)
if exclude_from_latest is not None:
pulumi.set(__self__, "exclude_from_latest", exclude_from_latest)
if manage_actions is not None:
pulumi.set(__self__, "manage_actions", manage_actions)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
if target_regions is not None:
pulumi.set(__self__, "target_regions", target_regions)
@property
@pulumi.getter
def source(self) -> pulumi.Input['UserArtifactSourceArgs']:
"""
The source image from which the Image Version is going to be created.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input['UserArtifactSourceArgs']):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="enableHealthCheck")
def enable_health_check(self) -> Optional[pulumi.Input[bool]]:
"""
Optional. Whether or not this application reports health.
"""
return pulumi.get(self, "enable_health_check")
@enable_health_check.setter
def enable_health_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_health_check", value)
@property
@pulumi.getter(name="endOfLifeDate")
def end_of_life_date(self) -> Optional[pulumi.Input[str]]:
"""
The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
"""
return pulumi.get(self, "end_of_life_date")
@end_of_life_date.setter
def end_of_life_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_of_life_date", value)
@property
@pulumi.getter(name="excludeFromLatest")
def exclude_from_latest(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
"""
return pulumi.get(self, "exclude_from_latest")
@exclude_from_latest.setter
def exclude_from_latest(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclude_from_latest", value)
@property
@pulumi.getter(name="manageActions")
def manage_actions(self) -> Optional[pulumi.Input['UserArtifactManageArgs']]:
return pulumi.get(self, "manage_actions")
@manage_actions.setter
def manage_actions(self, value: Optional[pulumi.Input['UserArtifactManageArgs']]):
pulumi.set(self, "manage_actions", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@storage_account_type.setter
def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]):
pulumi.set(self, "storage_account_type", value)
@property
@pulumi.getter(name="targetRegions")
def target_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]:
"""
The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
return pulumi.get(self, "target_regions")
@target_regions.setter
def target_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]):
pulumi.set(self, "target_regions", value)
@pulumi.input_type
class GalleryArtifactVersionSourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
The gallery artifact version source.
:param pulumi.Input[str] id: The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource.
:param pulumi.Input[str] uri: The uri of the gallery artifact version source. Currently used to specify vhd/blob source.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
The uri of the gallery artifact version source. Currently used to specify vhd/blob source.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class GalleryDataDiskImageArgs:
def __init__(__self__, *,
lun: pulumi.Input[int],
host_caching: Optional[pulumi.Input['HostCaching']] = None,
source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None):
"""
This is the data disk image.
:param pulumi.Input[int] lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
:param pulumi.Input['HostCaching'] host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
:param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source.
"""
pulumi.set(__self__, "lun", lun)
if host_caching is not None:
pulumi.set(__self__, "host_caching", host_caching)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter
def lun(self) -> pulumi.Input[int]:
"""
This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: pulumi.Input[int]):
pulumi.set(self, "lun", value)
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[pulumi.Input['HostCaching']]:
"""
The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
"""
return pulumi.get(self, "host_caching")
@host_caching.setter
def host_caching(self, value: Optional[pulumi.Input['HostCaching']]):
pulumi.set(self, "host_caching", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]):
pulumi.set(self, "source", value)
@pulumi.input_type
class GalleryImageFeatureArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
A feature for gallery image.
:param pulumi.Input[str] name: The name of the gallery image feature.
:param pulumi.Input[str] value: The value of the gallery image feature.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the gallery image feature.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the gallery image feature.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class GalleryImageIdentifierArgs:
def __init__(__self__, *,
offer: pulumi.Input[str],
publisher: pulumi.Input[str],
sku: pulumi.Input[str]):
"""
This is the gallery image definition identifier.
:param pulumi.Input[str] offer: The name of the gallery image definition offer.
:param pulumi.Input[str] publisher: The name of the gallery image definition publisher.
:param pulumi.Input[str] sku: The name of the gallery image definition SKU.
"""
pulumi.set(__self__, "offer", offer)
pulumi.set(__self__, "publisher", publisher)
pulumi.set(__self__, "sku", sku)
@property
@pulumi.getter
def offer(self) -> pulumi.Input[str]:
"""
The name of the gallery image definition offer.
"""
return pulumi.get(self, "offer")
@offer.setter
def offer(self, value: pulumi.Input[str]):
pulumi.set(self, "offer", value)
@property
@pulumi.getter
def publisher(self) -> pulumi.Input[str]:
"""
The name of the gallery image definition publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: pulumi.Input[str]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input[str]:
"""
The name of the gallery image definition SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input[str]):
pulumi.set(self, "sku", value)
@pulumi.input_type
class GalleryImageVersionPublishingProfileArgs:
def __init__(__self__, *,
end_of_life_date: Optional[pulumi.Input[str]] = None,
exclude_from_latest: Optional[pulumi.Input[bool]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None,
target_regions: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]] = None):
"""
The publishing profile of a gallery image Version.
:param pulumi.Input[str] end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
:param pulumi.Input[bool] exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
:param pulumi.Input[int] replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
:param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
:param pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
if end_of_life_date is not None:
pulumi.set(__self__, "end_of_life_date", end_of_life_date)
if exclude_from_latest is not None:
pulumi.set(__self__, "exclude_from_latest", exclude_from_latest)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
if target_regions is not None:
pulumi.set(__self__, "target_regions", target_regions)
@property
@pulumi.getter(name="endOfLifeDate")
def end_of_life_date(self) -> Optional[pulumi.Input[str]]:
"""
The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
"""
return pulumi.get(self, "end_of_life_date")
@end_of_life_date.setter
def end_of_life_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_of_life_date", value)
@property
@pulumi.getter(name="excludeFromLatest")
def exclude_from_latest(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
"""
return pulumi.get(self, "exclude_from_latest")
@exclude_from_latest.setter
def exclude_from_latest(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclude_from_latest", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@storage_account_type.setter
def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]):
pulumi.set(self, "storage_account_type", value)
@property
@pulumi.getter(name="targetRegions")
def target_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]:
"""
The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
return pulumi.get(self, "target_regions")
@target_regions.setter
def target_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]):
pulumi.set(self, "target_regions", value)
@pulumi.input_type
class GalleryImageVersionStorageProfileArgs:
def __init__(__self__, *,
data_disk_images: Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]] = None,
os_disk_image: Optional[pulumi.Input['GalleryOSDiskImageArgs']] = None,
source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None):
"""
This is the storage profile of a Gallery Image Version.
:param pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]] data_disk_images: A list of data disk images.
:param pulumi.Input['GalleryOSDiskImageArgs'] os_disk_image: This is the OS disk image.
:param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source.
"""
if data_disk_images is not None:
pulumi.set(__self__, "data_disk_images", data_disk_images)
if os_disk_image is not None:
pulumi.set(__self__, "os_disk_image", os_disk_image)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="dataDiskImages")
def data_disk_images(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]]:
"""
A list of data disk images.
"""
return pulumi.get(self, "data_disk_images")
@data_disk_images.setter
def data_disk_images(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]]):
pulumi.set(self, "data_disk_images", value)
@property
@pulumi.getter(name="osDiskImage")
def os_disk_image(self) -> Optional[pulumi.Input['GalleryOSDiskImageArgs']]:
"""
This is the OS disk image.
"""
return pulumi.get(self, "os_disk_image")
@os_disk_image.setter
def os_disk_image(self, value: Optional[pulumi.Input['GalleryOSDiskImageArgs']]):
pulumi.set(self, "os_disk_image", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]):
pulumi.set(self, "source", value)
@pulumi.input_type
class GalleryOSDiskImageArgs:
def __init__(__self__, *,
host_caching: Optional[pulumi.Input['HostCaching']] = None,
source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None):
"""
This is the OS disk image.
:param pulumi.Input['HostCaching'] host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
:param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source.
"""
if host_caching is not None:
pulumi.set(__self__, "host_caching", host_caching)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[pulumi.Input['HostCaching']]:
"""
The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
"""
return pulumi.get(self, "host_caching")
@host_caching.setter
def host_caching(self, value: Optional[pulumi.Input['HostCaching']]):
pulumi.set(self, "host_caching", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]):
pulumi.set(self, "source", value)
@pulumi.input_type
class ImageDiskReferenceArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
lun: Optional[pulumi.Input[int]] = None):
"""
The source image used for creating the disk.
:param pulumi.Input[str] id: A relative uri containing either a Platform Image Repository or user image reference.
:param pulumi.Input[int] lun: If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null.
"""
pulumi.set(__self__, "id", id)
if lun is not None:
pulumi.set(__self__, "lun", lun)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
A relative uri containing either a Platform Image Repository or user image reference.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def lun(self) -> Optional[pulumi.Input[int]]:
"""
If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lun", value)
@pulumi.input_type
class ImagePurchasePlanArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
publisher: Optional[pulumi.Input[str]] = None):
"""
Describes the gallery image definition purchase plan. This is used by marketplace images.
:param pulumi.Input[str] name: The plan ID.
:param pulumi.Input[str] product: The product ID.
:param pulumi.Input[str] publisher: The publisher ID.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
pulumi.set(__self__, "product", product)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
"""
The product ID.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The publisher ID.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@pulumi.input_type
class KeyForDiskEncryptionSetArgs:
def __init__(__self__, *,
key_url: pulumi.Input[str],
source_vault: Optional[pulumi.Input['SourceVaultArgs']] = None):
"""
Key Vault Key Url to be used for server side encryption of Managed Disks and Snapshots
:param pulumi.Input[str] key_url: Fully versioned Key Url pointing to a key in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription.
"""
pulumi.set(__self__, "key_url", key_url)
if source_vault is not None:
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="keyUrl")
def key_url(self) -> pulumi.Input[str]:
"""
Fully versioned Key Url pointing to a key in KeyVault
"""
return pulumi.get(self, "key_url")
@key_url.setter
def key_url(self, value: pulumi.Input[str]):
pulumi.set(self, "key_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> Optional[pulumi.Input['SourceVaultArgs']]:
"""
Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription.
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: Optional[pulumi.Input['SourceVaultArgs']]):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class KeyVaultAndKeyReferenceArgs:
def __init__(__self__, *,
key_url: pulumi.Input[str],
source_vault: pulumi.Input['SourceVaultArgs']):
"""
Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey
:param pulumi.Input[str] key_url: Url pointing to a key or secret in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret
"""
pulumi.set(__self__, "key_url", key_url)
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="keyUrl")
def key_url(self) -> pulumi.Input[str]:
"""
Url pointing to a key or secret in KeyVault
"""
return pulumi.get(self, "key_url")
@key_url.setter
def key_url(self, value: pulumi.Input[str]):
pulumi.set(self, "key_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> pulumi.Input['SourceVaultArgs']:
"""
Resource id of the KeyVault containing the key or secret
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: pulumi.Input['SourceVaultArgs']):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class KeyVaultAndSecretReferenceArgs:
def __init__(__self__, *,
secret_url: pulumi.Input[str],
source_vault: pulumi.Input['SourceVaultArgs']):
"""
Key Vault Secret Url and vault id of the encryption key
:param pulumi.Input[str] secret_url: Url pointing to a key or secret in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret
"""
pulumi.set(__self__, "secret_url", secret_url)
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="secretUrl")
def secret_url(self) -> pulumi.Input[str]:
"""
Url pointing to a key or secret in KeyVault
"""
return pulumi.get(self, "secret_url")
@secret_url.setter
def secret_url(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> pulumi.Input['SourceVaultArgs']:
"""
Resource id of the KeyVault containing the key or secret
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: pulumi.Input['SourceVaultArgs']):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class OSDiskImageEncryptionArgs:
def __init__(__self__, *,
disk_encryption_set_id: Optional[pulumi.Input[str]] = None):
"""
Contains encryption settings for an OS disk image.
:param pulumi.Input[str] disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set.
"""
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:
"""
A relative URI containing the resource ID of the disk encryption set.
"""
return pulumi.get(self, "disk_encryption_set_id")
@disk_encryption_set_id.setter
def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_encryption_set_id", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
actions_required: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param pulumi.Input[str] description: The reason for approval/rejection of the connection.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[pulumi.Input[str]]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@actions_required.setter
def actions_required(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "actions_required", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class PurchasePlanArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
product: pulumi.Input[str],
publisher: pulumi.Input[str],
promotion_code: Optional[pulumi.Input[str]] = None):
"""
Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
:param pulumi.Input[str] name: The plan ID.
:param pulumi.Input[str] product: Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
:param pulumi.Input[str] publisher: The publisher ID.
:param pulumi.Input[str] promotion_code: The Offer Promotion Code.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "product", product)
pulumi.set(__self__, "publisher", publisher)
if promotion_code is not None:
pulumi.set(__self__, "promotion_code", promotion_code)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> pulumi.Input[str]:
"""
Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: pulumi.Input[str]):
pulumi.set(self, "product", value)
@property
@pulumi.getter
def publisher(self) -> pulumi.Input[str]:
"""
The publisher ID.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: pulumi.Input[str]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter(name="promotionCode")
def promotion_code(self) -> Optional[pulumi.Input[str]]:
"""
The Offer Promotion Code.
"""
return pulumi.get(self, "promotion_code")
@promotion_code.setter
def promotion_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "promotion_code", value)
@pulumi.input_type
class RecommendedMachineConfigurationArgs:
def __init__(__self__, *,
memory: Optional[pulumi.Input['ResourceRangeArgs']] = None,
v_cpus: Optional[pulumi.Input['ResourceRangeArgs']] = None):
"""
The properties describe the recommended machine configuration for this Image Definition. These properties are updatable.
:param pulumi.Input['ResourceRangeArgs'] memory: Describes the resource range.
:param pulumi.Input['ResourceRangeArgs'] v_cpus: Describes the resource range.
"""
if memory is not None:
pulumi.set(__self__, "memory", memory)
if v_cpus is not None:
pulumi.set(__self__, "v_cpus", v_cpus)
@property
@pulumi.getter
def memory(self) -> Optional[pulumi.Input['ResourceRangeArgs']]:
"""
Describes the resource range.
"""
return pulumi.get(self, "memory")
@memory.setter
def memory(self, value: Optional[pulumi.Input['ResourceRangeArgs']]):
pulumi.set(self, "memory", value)
@property
@pulumi.getter(name="vCPUs")
def v_cpus(self) -> Optional[pulumi.Input['ResourceRangeArgs']]:
"""
Describes the resource range.
"""
return pulumi.get(self, "v_cpus")
@v_cpus.setter
def v_cpus(self, value: Optional[pulumi.Input['ResourceRangeArgs']]):
pulumi.set(self, "v_cpus", value)
@pulumi.input_type
class ResourceRangeArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
"""
Describes the resource range.
:param pulumi.Input[int] max: The maximum number of the resource.
:param pulumi.Input[int] min: The minimum number of the resource.
"""
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of the resource.
"""
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of the resource.
"""
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class SharingProfileArgs:
def __init__(__self__, *,
permissions: Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]] = None):
"""
Profile for gallery sharing to subscription or tenant
:param pulumi.Input[Union[str, 'GallerySharingPermissionTypes']] permissions: This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups**
"""
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]]:
"""
This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups**
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]]):
pulumi.set(self, "permissions", value)
@pulumi.input_type
class SnapshotSkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]] = None):
"""
The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This is an optional parameter for incremental snapshot and the default behavior is the SKU will be set to the same sku as the previous snapshot
:param pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']] name: The sku name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SourceVaultArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}
:param pulumi.Input[str] id: Resource Id
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class TargetRegionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
encryption: Optional[pulumi.Input['EncryptionImagesArgs']] = None,
regional_replica_count: Optional[pulumi.Input[int]] = None,
storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None):
"""
Describes the target region information.
:param pulumi.Input[str] name: The name of the region.
:param pulumi.Input['EncryptionImagesArgs'] encryption: Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
:param pulumi.Input[int] regional_replica_count: The number of replicas of the Image Version to be created per region. This property is updatable.
:param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
"""
pulumi.set(__self__, "name", name)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if regional_replica_count is not None:
pulumi.set(__self__, "regional_replica_count", regional_replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the region.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['EncryptionImagesArgs']]:
"""
Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['EncryptionImagesArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="regionalReplicaCount")
def regional_replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the Image Version to be created per region. This property is updatable.
"""
return pulumi.get(self, "regional_replica_count")
@regional_replica_count.setter
def regional_replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "regional_replica_count", value)
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@storage_account_type.setter
def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]):
pulumi.set(self, "storage_account_type", value)
@pulumi.input_type
class UserArtifactManageArgs:
def __init__(__self__, *,
install: pulumi.Input[str],
remove: pulumi.Input[str],
update: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] install: Required. The path and arguments to install the gallery application. This is limited to 4096 characters.
:param pulumi.Input[str] remove: Required. The path and arguments to remove the gallery application. This is limited to 4096 characters.
:param pulumi.Input[str] update: Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters.
"""
pulumi.set(__self__, "install", install)
pulumi.set(__self__, "remove", remove)
if update is not None:
pulumi.set(__self__, "update", update)
@property
@pulumi.getter
def install(self) -> pulumi.Input[str]:
"""
Required. The path and arguments to install the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "install")
@install.setter
def install(self, value: pulumi.Input[str]):
pulumi.set(self, "install", value)
@property
@pulumi.getter
def remove(self) -> pulumi.Input[str]:
"""
Required. The path and arguments to remove the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "remove")
@remove.setter
def remove(self, value: pulumi.Input[str]):
pulumi.set(self, "remove", value)
@property
@pulumi.getter
def update(self) -> Optional[pulumi.Input[str]]:
"""
Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "update")
@update.setter
def update(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update", value)
@pulumi.input_type
class UserArtifactSourceArgs:
def __init__(__self__, *,
media_link: pulumi.Input[str],
default_configuration_link: Optional[pulumi.Input[str]] = None):
"""
The source image from which the Image Version is going to be created.
:param pulumi.Input[str] media_link: Required. The mediaLink of the artifact, must be a readable storage page blob.
:param pulumi.Input[str] default_configuration_link: Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob.
"""
pulumi.set(__self__, "media_link", media_link)
if default_configuration_link is not None:
pulumi.set(__self__, "default_configuration_link", default_configuration_link)
@property
@pulumi.getter(name="mediaLink")
def media_link(self) -> pulumi.Input[str]:
"""
Required. The mediaLink of the artifact, must be a readable storage page blob.
"""
return pulumi.get(self, "media_link")
@media_link.setter
def media_link(self, value: pulumi.Input[str]):
pulumi.set(self, "media_link", value)
@property
@pulumi.getter(name="defaultConfigurationLink")
def default_configuration_link(self) -> Optional[pulumi.Input[str]]:
"""
Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob.
"""
return pulumi.get(self, "default_configuration_link")
@default_configuration_link.setter
def default_configuration_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_configuration_link", value)
| <filename>sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'CreationDataArgs',
'DataDiskImageEncryptionArgs',
'DisallowedArgs',
'DiskSkuArgs',
'EncryptionImagesArgs',
'EncryptionSetIdentityArgs',
'EncryptionSettingsCollectionArgs',
'EncryptionSettingsElementArgs',
'EncryptionArgs',
'ExtendedLocationArgs',
'GalleryApplicationVersionPublishingProfileArgs',
'GalleryArtifactVersionSourceArgs',
'GalleryDataDiskImageArgs',
'GalleryImageFeatureArgs',
'GalleryImageIdentifierArgs',
'GalleryImageVersionPublishingProfileArgs',
'GalleryImageVersionStorageProfileArgs',
'GalleryOSDiskImageArgs',
'ImageDiskReferenceArgs',
'ImagePurchasePlanArgs',
'KeyForDiskEncryptionSetArgs',
'KeyVaultAndKeyReferenceArgs',
'KeyVaultAndSecretReferenceArgs',
'OSDiskImageEncryptionArgs',
'PrivateLinkServiceConnectionStateArgs',
'PurchasePlanArgs',
'RecommendedMachineConfigurationArgs',
'ResourceRangeArgs',
'SharingProfileArgs',
'SnapshotSkuArgs',
'SourceVaultArgs',
'TargetRegionArgs',
'UserArtifactManageArgs',
'UserArtifactSourceArgs',
]
@pulumi.input_type
class CreationDataArgs:
def __init__(__self__, *,
create_option: pulumi.Input[Union[str, 'DiskCreateOption']],
gallery_image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,
image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,
logical_sector_size: Optional[pulumi.Input[int]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
upload_size_bytes: Optional[pulumi.Input[float]] = None):
"""
Data used when creating a disk.
:param pulumi.Input[Union[str, 'DiskCreateOption']] create_option: This enumerates the possible sources of a disk's creation.
:param pulumi.Input['ImageDiskReferenceArgs'] gallery_image_reference: Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk.
:param pulumi.Input['ImageDiskReferenceArgs'] image_reference: Disk source information.
:param pulumi.Input[int] logical_sector_size: Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.
:param pulumi.Input[str] source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot or disk.
:param pulumi.Input[str] source_uri: If createOption is Import, this is the URI of a blob to be imported into a managed disk.
:param pulumi.Input[str] storage_account_id: Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk.
:param pulumi.Input[float] upload_size_bytes: If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
"""
pulumi.set(__self__, "create_option", create_option)
if gallery_image_reference is not None:
pulumi.set(__self__, "gallery_image_reference", gallery_image_reference)
if image_reference is not None:
pulumi.set(__self__, "image_reference", image_reference)
if logical_sector_size is not None:
pulumi.set(__self__, "logical_sector_size", logical_sector_size)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if source_uri is not None:
pulumi.set(__self__, "source_uri", source_uri)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
if upload_size_bytes is not None:
pulumi.set(__self__, "upload_size_bytes", upload_size_bytes)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> pulumi.Input[Union[str, 'DiskCreateOption']]:
"""
This enumerates the possible sources of a disk's creation.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: pulumi.Input[Union[str, 'DiskCreateOption']]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="galleryImageReference")
def gallery_image_reference(self) -> Optional[pulumi.Input['ImageDiskReferenceArgs']]:
"""
Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk.
"""
return pulumi.get(self, "gallery_image_reference")
@gallery_image_reference.setter
def gallery_image_reference(self, value: Optional[pulumi.Input['ImageDiskReferenceArgs']]):
pulumi.set(self, "gallery_image_reference", value)
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> Optional[pulumi.Input['ImageDiskReferenceArgs']]:
"""
Disk source information.
"""
return pulumi.get(self, "image_reference")
@image_reference.setter
def image_reference(self, value: Optional[pulumi.Input['ImageDiskReferenceArgs']]):
pulumi.set(self, "image_reference", value)
@property
@pulumi.getter(name="logicalSectorSize")
def logical_sector_size(self) -> Optional[pulumi.Input[int]]:
"""
Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.
"""
return pulumi.get(self, "logical_sector_size")
@logical_sector_size.setter
def logical_sector_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "logical_sector_size", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
If createOption is Copy, this is the ARM id of the source snapshot or disk.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> Optional[pulumi.Input[str]]:
"""
If createOption is Import, this is the URI of a blob to be imported into a managed disk.
"""
return pulumi.get(self, "source_uri")
@source_uri.setter
def source_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_uri", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
@property
@pulumi.getter(name="uploadSizeBytes")
def upload_size_bytes(self) -> Optional[pulumi.Input[float]]:
"""
If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
"""
return pulumi.get(self, "upload_size_bytes")
@upload_size_bytes.setter
def upload_size_bytes(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "upload_size_bytes", value)
@pulumi.input_type
class DataDiskImageEncryptionArgs:
def __init__(__self__, *,
lun: pulumi.Input[int],
disk_encryption_set_id: Optional[pulumi.Input[str]] = None):
"""
Contains encryption settings for a data disk image.
:param pulumi.Input[int] lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
:param pulumi.Input[str] disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set.
"""
pulumi.set(__self__, "lun", lun)
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
@property
@pulumi.getter
def lun(self) -> pulumi.Input[int]:
"""
This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: pulumi.Input[int]):
pulumi.set(self, "lun", value)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:
"""
A relative URI containing the resource ID of the disk encryption set.
"""
return pulumi.get(self, "disk_encryption_set_id")
@disk_encryption_set_id.setter
def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_encryption_set_id", value)
@pulumi.input_type
class DisallowedArgs:
def __init__(__self__, *,
disk_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Describes the disallowed disk types.
:param pulumi.Input[Sequence[pulumi.Input[str]]] disk_types: A list of disk types.
"""
if disk_types is not None:
pulumi.set(__self__, "disk_types", disk_types)
@property
@pulumi.getter(name="diskTypes")
def disk_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of disk types.
"""
return pulumi.get(self, "disk_types")
@disk_types.setter
def disk_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "disk_types", value)
@pulumi.input_type
class DiskSkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]] = None):
"""
The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
:param pulumi.Input[Union[str, 'DiskStorageAccountTypes']] name: The sku name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class EncryptionImagesArgs:
def __init__(__self__, *,
data_disk_images: Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]] = None,
os_disk_image: Optional[pulumi.Input['OSDiskImageEncryptionArgs']] = None):
"""
Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
:param pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]] data_disk_images: A list of encryption specifications for data disk images.
:param pulumi.Input['OSDiskImageEncryptionArgs'] os_disk_image: Contains encryption settings for an OS disk image.
"""
if data_disk_images is not None:
pulumi.set(__self__, "data_disk_images", data_disk_images)
if os_disk_image is not None:
pulumi.set(__self__, "os_disk_image", os_disk_image)
@property
@pulumi.getter(name="dataDiskImages")
def data_disk_images(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]]:
"""
A list of encryption specifications for data disk images.
"""
return pulumi.get(self, "data_disk_images")
@data_disk_images.setter
def data_disk_images(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]]):
pulumi.set(self, "data_disk_images", value)
@property
@pulumi.getter(name="osDiskImage")
def os_disk_image(self) -> Optional[pulumi.Input['OSDiskImageEncryptionArgs']]:
"""
Contains encryption settings for an OS disk image.
"""
return pulumi.get(self, "os_disk_image")
@os_disk_image.setter
def os_disk_image(self, value: Optional[pulumi.Input['OSDiskImageEncryptionArgs']]):
pulumi.set(self, "os_disk_image", value)
@pulumi.input_type
class EncryptionSetIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]] = None):
"""
The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used to encrypt disks.
:param pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']] type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]]:
"""
The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class EncryptionSettingsCollectionArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
encryption_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]] = None,
encryption_settings_version: Optional[pulumi.Input[str]] = None):
"""
Encryption settings for disk or snapshot
:param pulumi.Input[bool] enabled: Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
:param pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]] encryption_settings: A collection of encryption settings, one for each disk volume.
:param pulumi.Input[str] encryption_settings_version: Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption.
"""
pulumi.set(__self__, "enabled", enabled)
if encryption_settings is not None:
pulumi.set(__self__, "encryption_settings", encryption_settings)
if encryption_settings_version is not None:
pulumi.set(__self__, "encryption_settings_version", encryption_settings_version)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]:
"""
A collection of encryption settings, one for each disk volume.
"""
return pulumi.get(self, "encryption_settings")
@encryption_settings.setter
def encryption_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]):
pulumi.set(self, "encryption_settings", value)
@property
@pulumi.getter(name="encryptionSettingsVersion")
def encryption_settings_version(self) -> Optional[pulumi.Input[str]]:
"""
Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption.
"""
return pulumi.get(self, "encryption_settings_version")
@encryption_settings_version.setter
def encryption_settings_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_settings_version", value)
@pulumi.input_type
class EncryptionSettingsElementArgs:
def __init__(__self__, *,
disk_encryption_key: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']] = None,
key_encryption_key: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']] = None):
"""
Encryption settings for one disk volume.
:param pulumi.Input['KeyVaultAndSecretReferenceArgs'] disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key
:param pulumi.Input['KeyVaultAndKeyReferenceArgs'] key_encryption_key: Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
"""
if disk_encryption_key is not None:
pulumi.set(__self__, "disk_encryption_key", disk_encryption_key)
if key_encryption_key is not None:
pulumi.set(__self__, "key_encryption_key", key_encryption_key)
@property
@pulumi.getter(name="diskEncryptionKey")
def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:
"""
Key Vault Secret Url and vault id of the disk encryption key
"""
return pulumi.get(self, "disk_encryption_key")
@disk_encryption_key.setter
def disk_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]):
pulumi.set(self, "disk_encryption_key", value)
@property
@pulumi.getter(name="keyEncryptionKey")
def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:
"""
Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
"""
return pulumi.get(self, "key_encryption_key")
@key_encryption_key.setter
def key_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]):
pulumi.set(self, "key_encryption_key", value)
@pulumi.input_type
class EncryptionArgs:
def __init__(__self__, *,
disk_encryption_set_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'EncryptionType']]] = None):
"""
Encryption at rest settings for disk or snapshot
:param pulumi.Input[str] disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling encryption at rest.
:param pulumi.Input[Union[str, 'EncryptionType']] type: The type of key used to encrypt the data of the disk.
"""
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:
"""
ResourceId of the disk encryption set to use for enabling encryption at rest.
"""
return pulumi.get(self, "disk_encryption_set_id")
@disk_encryption_set_id.setter
def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_encryption_set_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'EncryptionType']]]:
"""
The type of key used to encrypt the data of the disk.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'EncryptionType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ExtendedLocationArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]] = None):
"""
The complex type of the extended location.
:param pulumi.Input[str] name: The name of the extended location.
:param pulumi.Input[Union[str, 'ExtendedLocationTypes']] type: The type of the extended location.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extended location.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]:
"""
The type of the extended location.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class GalleryApplicationVersionPublishingProfileArgs:
def __init__(__self__, *,
source: pulumi.Input['UserArtifactSourceArgs'],
enable_health_check: Optional[pulumi.Input[bool]] = None,
end_of_life_date: Optional[pulumi.Input[str]] = None,
exclude_from_latest: Optional[pulumi.Input[bool]] = None,
manage_actions: Optional[pulumi.Input['UserArtifactManageArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None,
target_regions: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]] = None):
"""
The publishing profile of a gallery image version.
:param pulumi.Input['UserArtifactSourceArgs'] source: The source image from which the Image Version is going to be created.
:param pulumi.Input[bool] enable_health_check: Optional. Whether or not this application reports health.
:param pulumi.Input[str] end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
:param pulumi.Input[bool] exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
:param pulumi.Input[int] replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
:param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
:param pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
pulumi.set(__self__, "source", source)
if enable_health_check is not None:
pulumi.set(__self__, "enable_health_check", enable_health_check)
if end_of_life_date is not None:
pulumi.set(__self__, "end_of_life_date", end_of_life_date)
if exclude_from_latest is not None:
pulumi.set(__self__, "exclude_from_latest", exclude_from_latest)
if manage_actions is not None:
pulumi.set(__self__, "manage_actions", manage_actions)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
if target_regions is not None:
pulumi.set(__self__, "target_regions", target_regions)
@property
@pulumi.getter
def source(self) -> pulumi.Input['UserArtifactSourceArgs']:
"""
The source image from which the Image Version is going to be created.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input['UserArtifactSourceArgs']):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="enableHealthCheck")
def enable_health_check(self) -> Optional[pulumi.Input[bool]]:
"""
Optional. Whether or not this application reports health.
"""
return pulumi.get(self, "enable_health_check")
@enable_health_check.setter
def enable_health_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_health_check", value)
@property
@pulumi.getter(name="endOfLifeDate")
def end_of_life_date(self) -> Optional[pulumi.Input[str]]:
"""
The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
"""
return pulumi.get(self, "end_of_life_date")
@end_of_life_date.setter
def end_of_life_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_of_life_date", value)
@property
@pulumi.getter(name="excludeFromLatest")
def exclude_from_latest(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
"""
return pulumi.get(self, "exclude_from_latest")
@exclude_from_latest.setter
def exclude_from_latest(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclude_from_latest", value)
@property
@pulumi.getter(name="manageActions")
def manage_actions(self) -> Optional[pulumi.Input['UserArtifactManageArgs']]:
return pulumi.get(self, "manage_actions")
@manage_actions.setter
def manage_actions(self, value: Optional[pulumi.Input['UserArtifactManageArgs']]):
pulumi.set(self, "manage_actions", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@storage_account_type.setter
def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]):
pulumi.set(self, "storage_account_type", value)
@property
@pulumi.getter(name="targetRegions")
def target_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]:
"""
The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
return pulumi.get(self, "target_regions")
@target_regions.setter
def target_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]):
pulumi.set(self, "target_regions", value)
@pulumi.input_type
class GalleryArtifactVersionSourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
The gallery artifact version source.
:param pulumi.Input[str] id: The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource.
:param pulumi.Input[str] uri: The uri of the gallery artifact version source. Currently used to specify vhd/blob source.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
The uri of the gallery artifact version source. Currently used to specify vhd/blob source.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class GalleryDataDiskImageArgs:
def __init__(__self__, *,
lun: pulumi.Input[int],
host_caching: Optional[pulumi.Input['HostCaching']] = None,
source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None):
"""
This is the data disk image.
:param pulumi.Input[int] lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
:param pulumi.Input['HostCaching'] host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
:param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source.
"""
pulumi.set(__self__, "lun", lun)
if host_caching is not None:
pulumi.set(__self__, "host_caching", host_caching)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter
def lun(self) -> pulumi.Input[int]:
"""
This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: pulumi.Input[int]):
pulumi.set(self, "lun", value)
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[pulumi.Input['HostCaching']]:
"""
The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
"""
return pulumi.get(self, "host_caching")
@host_caching.setter
def host_caching(self, value: Optional[pulumi.Input['HostCaching']]):
pulumi.set(self, "host_caching", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]):
pulumi.set(self, "source", value)
@pulumi.input_type
class GalleryImageFeatureArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
A feature for gallery image.
:param pulumi.Input[str] name: The name of the gallery image feature.
:param pulumi.Input[str] value: The value of the gallery image feature.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the gallery image feature.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the gallery image feature.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class GalleryImageIdentifierArgs:
def __init__(__self__, *,
offer: pulumi.Input[str],
publisher: pulumi.Input[str],
sku: pulumi.Input[str]):
"""
This is the gallery image definition identifier.
:param pulumi.Input[str] offer: The name of the gallery image definition offer.
:param pulumi.Input[str] publisher: The name of the gallery image definition publisher.
:param pulumi.Input[str] sku: The name of the gallery image definition SKU.
"""
pulumi.set(__self__, "offer", offer)
pulumi.set(__self__, "publisher", publisher)
pulumi.set(__self__, "sku", sku)
@property
@pulumi.getter
def offer(self) -> pulumi.Input[str]:
"""
The name of the gallery image definition offer.
"""
return pulumi.get(self, "offer")
@offer.setter
def offer(self, value: pulumi.Input[str]):
pulumi.set(self, "offer", value)
@property
@pulumi.getter
def publisher(self) -> pulumi.Input[str]:
"""
The name of the gallery image definition publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: pulumi.Input[str]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input[str]:
"""
The name of the gallery image definition SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input[str]):
pulumi.set(self, "sku", value)
@pulumi.input_type
class GalleryImageVersionPublishingProfileArgs:
def __init__(__self__, *,
end_of_life_date: Optional[pulumi.Input[str]] = None,
exclude_from_latest: Optional[pulumi.Input[bool]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None,
target_regions: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]] = None):
"""
The publishing profile of a gallery image Version.
:param pulumi.Input[str] end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
:param pulumi.Input[bool] exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
:param pulumi.Input[int] replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
:param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
:param pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
if end_of_life_date is not None:
pulumi.set(__self__, "end_of_life_date", end_of_life_date)
if exclude_from_latest is not None:
pulumi.set(__self__, "exclude_from_latest", exclude_from_latest)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
if target_regions is not None:
pulumi.set(__self__, "target_regions", target_regions)
@property
@pulumi.getter(name="endOfLifeDate")
def end_of_life_date(self) -> Optional[pulumi.Input[str]]:
"""
The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
"""
return pulumi.get(self, "end_of_life_date")
@end_of_life_date.setter
def end_of_life_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_of_life_date", value)
@property
@pulumi.getter(name="excludeFromLatest")
def exclude_from_latest(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
"""
return pulumi.get(self, "exclude_from_latest")
@exclude_from_latest.setter
def exclude_from_latest(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exclude_from_latest", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@storage_account_type.setter
def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]):
pulumi.set(self, "storage_account_type", value)
@property
@pulumi.getter(name="targetRegions")
def target_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]:
"""
The target regions where the Image Version is going to be replicated to. This property is updatable.
"""
return pulumi.get(self, "target_regions")
@target_regions.setter
def target_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]):
pulumi.set(self, "target_regions", value)
@pulumi.input_type
class GalleryImageVersionStorageProfileArgs:
def __init__(__self__, *,
data_disk_images: Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]] = None,
os_disk_image: Optional[pulumi.Input['GalleryOSDiskImageArgs']] = None,
source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None):
"""
This is the storage profile of a Gallery Image Version.
:param pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]] data_disk_images: A list of data disk images.
:param pulumi.Input['GalleryOSDiskImageArgs'] os_disk_image: This is the OS disk image.
:param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source.
"""
if data_disk_images is not None:
pulumi.set(__self__, "data_disk_images", data_disk_images)
if os_disk_image is not None:
pulumi.set(__self__, "os_disk_image", os_disk_image)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="dataDiskImages")
def data_disk_images(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]]:
"""
A list of data disk images.
"""
return pulumi.get(self, "data_disk_images")
@data_disk_images.setter
def data_disk_images(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]]):
pulumi.set(self, "data_disk_images", value)
@property
@pulumi.getter(name="osDiskImage")
def os_disk_image(self) -> Optional[pulumi.Input['GalleryOSDiskImageArgs']]:
"""
This is the OS disk image.
"""
return pulumi.get(self, "os_disk_image")
@os_disk_image.setter
def os_disk_image(self, value: Optional[pulumi.Input['GalleryOSDiskImageArgs']]):
pulumi.set(self, "os_disk_image", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]):
pulumi.set(self, "source", value)
@pulumi.input_type
class GalleryOSDiskImageArgs:
def __init__(__self__, *,
host_caching: Optional[pulumi.Input['HostCaching']] = None,
source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None):
"""
This is the OS disk image.
:param pulumi.Input['HostCaching'] host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
:param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source.
"""
if host_caching is not None:
pulumi.set(__self__, "host_caching", host_caching)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[pulumi.Input['HostCaching']]:
"""
The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
"""
return pulumi.get(self, "host_caching")
@host_caching.setter
def host_caching(self, value: Optional[pulumi.Input['HostCaching']]):
pulumi.set(self, "host_caching", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]:
"""
The gallery artifact version source.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]):
pulumi.set(self, "source", value)
@pulumi.input_type
class ImageDiskReferenceArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
lun: Optional[pulumi.Input[int]] = None):
"""
The source image used for creating the disk.
:param pulumi.Input[str] id: A relative uri containing either a Platform Image Repository or user image reference.
:param pulumi.Input[int] lun: If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null.
"""
pulumi.set(__self__, "id", id)
if lun is not None:
pulumi.set(__self__, "lun", lun)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
A relative uri containing either a Platform Image Repository or user image reference.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def lun(self) -> Optional[pulumi.Input[int]]:
"""
If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lun", value)
@pulumi.input_type
class ImagePurchasePlanArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
publisher: Optional[pulumi.Input[str]] = None):
"""
Describes the gallery image definition purchase plan. This is used by marketplace images.
:param pulumi.Input[str] name: The plan ID.
:param pulumi.Input[str] product: The product ID.
:param pulumi.Input[str] publisher: The publisher ID.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
pulumi.set(__self__, "product", product)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
"""
The product ID.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The publisher ID.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@pulumi.input_type
class KeyForDiskEncryptionSetArgs:
def __init__(__self__, *,
key_url: pulumi.Input[str],
source_vault: Optional[pulumi.Input['SourceVaultArgs']] = None):
"""
Key Vault Key Url to be used for server side encryption of Managed Disks and Snapshots
:param pulumi.Input[str] key_url: Fully versioned Key Url pointing to a key in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription.
"""
pulumi.set(__self__, "key_url", key_url)
if source_vault is not None:
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="keyUrl")
def key_url(self) -> pulumi.Input[str]:
"""
Fully versioned Key Url pointing to a key in KeyVault
"""
return pulumi.get(self, "key_url")
@key_url.setter
def key_url(self, value: pulumi.Input[str]):
pulumi.set(self, "key_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> Optional[pulumi.Input['SourceVaultArgs']]:
"""
Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription.
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: Optional[pulumi.Input['SourceVaultArgs']]):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class KeyVaultAndKeyReferenceArgs:
def __init__(__self__, *,
key_url: pulumi.Input[str],
source_vault: pulumi.Input['SourceVaultArgs']):
"""
Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey
:param pulumi.Input[str] key_url: Url pointing to a key or secret in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret
"""
pulumi.set(__self__, "key_url", key_url)
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="keyUrl")
def key_url(self) -> pulumi.Input[str]:
"""
Url pointing to a key or secret in KeyVault
"""
return pulumi.get(self, "key_url")
@key_url.setter
def key_url(self, value: pulumi.Input[str]):
pulumi.set(self, "key_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> pulumi.Input['SourceVaultArgs']:
"""
Resource id of the KeyVault containing the key or secret
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: pulumi.Input['SourceVaultArgs']):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class KeyVaultAndSecretReferenceArgs:
def __init__(__self__, *,
secret_url: pulumi.Input[str],
source_vault: pulumi.Input['SourceVaultArgs']):
"""
Key Vault Secret Url and vault id of the encryption key
:param pulumi.Input[str] secret_url: Url pointing to a key or secret in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret
"""
pulumi.set(__self__, "secret_url", secret_url)
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="secretUrl")
def secret_url(self) -> pulumi.Input[str]:
"""
Url pointing to a key or secret in KeyVault
"""
return pulumi.get(self, "secret_url")
@secret_url.setter
def secret_url(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> pulumi.Input['SourceVaultArgs']:
"""
Resource id of the KeyVault containing the key or secret
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: pulumi.Input['SourceVaultArgs']):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class OSDiskImageEncryptionArgs:
def __init__(__self__, *,
disk_encryption_set_id: Optional[pulumi.Input[str]] = None):
"""
Contains encryption settings for an OS disk image.
:param pulumi.Input[str] disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set.
"""
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:
"""
A relative URI containing the resource ID of the disk encryption set.
"""
return pulumi.get(self, "disk_encryption_set_id")
@disk_encryption_set_id.setter
def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_encryption_set_id", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
actions_required: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param pulumi.Input[str] description: The reason for approval/rejection of the connection.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[pulumi.Input[str]]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@actions_required.setter
def actions_required(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "actions_required", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class PurchasePlanArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
product: pulumi.Input[str],
publisher: pulumi.Input[str],
promotion_code: Optional[pulumi.Input[str]] = None):
"""
Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
:param pulumi.Input[str] name: The plan ID.
:param pulumi.Input[str] product: Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
:param pulumi.Input[str] publisher: The publisher ID.
:param pulumi.Input[str] promotion_code: The Offer Promotion Code.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "product", product)
pulumi.set(__self__, "publisher", publisher)
if promotion_code is not None:
pulumi.set(__self__, "promotion_code", promotion_code)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> pulumi.Input[str]:
"""
Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: pulumi.Input[str]):
pulumi.set(self, "product", value)
@property
@pulumi.getter
def publisher(self) -> pulumi.Input[str]:
"""
The publisher ID.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: pulumi.Input[str]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter(name="promotionCode")
def promotion_code(self) -> Optional[pulumi.Input[str]]:
"""
The Offer Promotion Code.
"""
return pulumi.get(self, "promotion_code")
@promotion_code.setter
def promotion_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "promotion_code", value)
@pulumi.input_type
class RecommendedMachineConfigurationArgs:
def __init__(__self__, *,
memory: Optional[pulumi.Input['ResourceRangeArgs']] = None,
v_cpus: Optional[pulumi.Input['ResourceRangeArgs']] = None):
"""
The properties describe the recommended machine configuration for this Image Definition. These properties are updatable.
:param pulumi.Input['ResourceRangeArgs'] memory: Describes the resource range.
:param pulumi.Input['ResourceRangeArgs'] v_cpus: Describes the resource range.
"""
if memory is not None:
pulumi.set(__self__, "memory", memory)
if v_cpus is not None:
pulumi.set(__self__, "v_cpus", v_cpus)
@property
@pulumi.getter
def memory(self) -> Optional[pulumi.Input['ResourceRangeArgs']]:
"""
Describes the resource range.
"""
return pulumi.get(self, "memory")
@memory.setter
def memory(self, value: Optional[pulumi.Input['ResourceRangeArgs']]):
pulumi.set(self, "memory", value)
@property
@pulumi.getter(name="vCPUs")
def v_cpus(self) -> Optional[pulumi.Input['ResourceRangeArgs']]:
"""
Describes the resource range.
"""
return pulumi.get(self, "v_cpus")
@v_cpus.setter
def v_cpus(self, value: Optional[pulumi.Input['ResourceRangeArgs']]):
pulumi.set(self, "v_cpus", value)
@pulumi.input_type
class ResourceRangeArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
"""
Describes the resource range.
:param pulumi.Input[int] max: The maximum number of the resource.
:param pulumi.Input[int] min: The minimum number of the resource.
"""
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of the resource.
"""
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of the resource.
"""
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class SharingProfileArgs:
def __init__(__self__, *,
permissions: Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]] = None):
"""
Profile for gallery sharing to subscription or tenant
:param pulumi.Input[Union[str, 'GallerySharingPermissionTypes']] permissions: This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups**
"""
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]]:
"""
This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups**
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]]):
pulumi.set(self, "permissions", value)
@pulumi.input_type
class SnapshotSkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]] = None):
"""
The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This is an optional parameter for incremental snapshot and the default behavior is the SKU will be set to the same sku as the previous snapshot
:param pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']] name: The sku name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SourceVaultArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}
:param pulumi.Input[str] id: Resource Id
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class TargetRegionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
encryption: Optional[pulumi.Input['EncryptionImagesArgs']] = None,
regional_replica_count: Optional[pulumi.Input[int]] = None,
storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None):
"""
Describes the target region information.
:param pulumi.Input[str] name: The name of the region.
:param pulumi.Input['EncryptionImagesArgs'] encryption: Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
:param pulumi.Input[int] regional_replica_count: The number of replicas of the Image Version to be created per region. This property is updatable.
:param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable.
"""
pulumi.set(__self__, "name", name)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if regional_replica_count is not None:
pulumi.set(__self__, "regional_replica_count", regional_replica_count)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the region.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['EncryptionImagesArgs']]:
"""
Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['EncryptionImagesArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="regionalReplicaCount")
def regional_replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the Image Version to be created per region. This property is updatable.
"""
return pulumi.get(self, "regional_replica_count")
@regional_replica_count.setter
def regional_replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "regional_replica_count", value)
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]:
"""
Specifies the storage account type to be used to store the image. This property is not updatable.
"""
return pulumi.get(self, "storage_account_type")
@storage_account_type.setter
def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]):
pulumi.set(self, "storage_account_type", value)
@pulumi.input_type
class UserArtifactManageArgs:
def __init__(__self__, *,
install: pulumi.Input[str],
remove: pulumi.Input[str],
update: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] install: Required. The path and arguments to install the gallery application. This is limited to 4096 characters.
:param pulumi.Input[str] remove: Required. The path and arguments to remove the gallery application. This is limited to 4096 characters.
:param pulumi.Input[str] update: Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters.
"""
pulumi.set(__self__, "install", install)
pulumi.set(__self__, "remove", remove)
if update is not None:
pulumi.set(__self__, "update", update)
@property
@pulumi.getter
def install(self) -> pulumi.Input[str]:
"""
Required. The path and arguments to install the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "install")
@install.setter
def install(self, value: pulumi.Input[str]):
pulumi.set(self, "install", value)
@property
@pulumi.getter
def remove(self) -> pulumi.Input[str]:
"""
Required. The path and arguments to remove the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "remove")
@remove.setter
def remove(self, value: pulumi.Input[str]):
pulumi.set(self, "remove", value)
@property
@pulumi.getter
def update(self) -> Optional[pulumi.Input[str]]:
"""
Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters.
"""
return pulumi.get(self, "update")
@update.setter
def update(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update", value)
@pulumi.input_type
class UserArtifactSourceArgs:
def __init__(__self__, *,
media_link: pulumi.Input[str],
default_configuration_link: Optional[pulumi.Input[str]] = None):
"""
The source image from which the Image Version is going to be created.
:param pulumi.Input[str] media_link: Required. The mediaLink of the artifact, must be a readable storage page blob.
:param pulumi.Input[str] default_configuration_link: Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob.
"""
pulumi.set(__self__, "media_link", media_link)
if default_configuration_link is not None:
pulumi.set(__self__, "default_configuration_link", default_configuration_link)
@property
@pulumi.getter(name="mediaLink")
def media_link(self) -> pulumi.Input[str]:
"""
Required. The mediaLink of the artifact, must be a readable storage page blob.
"""
return pulumi.get(self, "media_link")
@media_link.setter
def media_link(self, value: pulumi.Input[str]):
pulumi.set(self, "media_link", value)
@property
@pulumi.getter(name="defaultConfigurationLink")
def default_configuration_link(self) -> Optional[pulumi.Input[str]]:
"""
Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob.
"""
return pulumi.get(self, "default_configuration_link")
@default_configuration_link.setter
def default_configuration_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_configuration_link", value)
| pt | 0.16637 | 1.313802 | 1 |
bgp_adjacencies/BGP_check_job.py | KamyarZiabari/solutions_examples | 59 | 14942 | # To run the job:
# pyats run job BGP_check_job.py --testbed-file <testbed_file.yaml>
# Description: This job file checks that all BGP neighbors are in Established state
import os
# All run() must be inside a main function
def main(runtime):
# Find the location of the script in relation to the job file
bgp_tests = os.path.join(os.path.dirname(__file__),
'BGP_Neighbors_Established.py')
# Execute the testscript
runtime.tasks.run(testscript=bgp_tests)
| # To run the job:
# pyats run job BGP_check_job.py --testbed-file <testbed_file.yaml>
# Description: This job file checks that all BGP neighbors are in Established state
import os
# All run() must be inside a main function
def main(runtime):
# Find the location of the script in relation to the job file
bgp_tests = os.path.join(os.path.dirname(__file__),
'BGP_Neighbors_Established.py')
# Execute the testscript
runtime.tasks.run(testscript=bgp_tests)
| pt | 0.136266 | 2.112992 | 2 |
tvae/utils/logging.py | ReallyAnonNeurips2021/TopographicVAE | 57 | 14943 | import os
def get_dirs():
cwd = os.path.dirname(os.path.realpath(__file__))
local_savedir = cwd
local_datadir = cwd
local_wandbdir = cwd
return local_savedir, local_datadir, local_wandbdir
def configure_logging(config, name, model):
if config['wandb_on']:
import wandb
wandb.init(name=name,
project='YOUR_PROJECT_NAME',
entity='YOUR_ENTITY_NAME',
dir=config['wandb_dir'],
config=config)
wandb.watch(model)
def log(key, val):
print(f"{key}: {val}")
wandb.log({key: val})
checkpoint_path = os.path.join(wandb.run.dir, 'checkpoint.tar')
else:
def log(key, val):
print(f"{key}: {val}")
checkpoint_path = './checkpoint.tar'
return log, checkpoint_path
| import os
def get_dirs():
cwd = os.path.dirname(os.path.realpath(__file__))
local_savedir = cwd
local_datadir = cwd
local_wandbdir = cwd
return local_savedir, local_datadir, local_wandbdir
def configure_logging(config, name, model):
if config['wandb_on']:
import wandb
wandb.init(name=name,
project='YOUR_PROJECT_NAME',
entity='YOUR_ENTITY_NAME',
dir=config['wandb_dir'],
config=config)
wandb.watch(model)
def log(key, val):
print(f"{key}: {val}")
wandb.log({key: val})
checkpoint_path = os.path.join(wandb.run.dir, 'checkpoint.tar')
else:
def log(key, val):
print(f"{key}: {val}")
checkpoint_path = './checkpoint.tar'
return log, checkpoint_path
| none | 1 | 2.209527 | 2 |
create_lesson_plan/admin.py | rishabhranawat/CrowdPlatform | 1 | 14944 | <filename>create_lesson_plan/admin.py
from django.contrib import admin
from create_lesson_plan.models import *
admin.site.register(lesson)
admin.site.register(lesson_plan)
admin.site.register(Engage_Urls)
admin.site.register(Explain_Urls)
admin.site.register(Evaluate_Urls)
admin.site.register(MCQ)
admin.site.register(FITB)
admin.site.register(Engage_Images)
admin.site.register(Explain_Images)
admin.site.register(Evaluate_Images)
admin.site.register(Document)
admin.site.register(Image)
admin.site.register(TestScore)
admin.site.register(OfflineDocument)
| <filename>create_lesson_plan/admin.py
from django.contrib import admin
from create_lesson_plan.models import *
admin.site.register(lesson)
admin.site.register(lesson_plan)
admin.site.register(Engage_Urls)
admin.site.register(Explain_Urls)
admin.site.register(Evaluate_Urls)
admin.site.register(MCQ)
admin.site.register(FITB)
admin.site.register(Engage_Images)
admin.site.register(Explain_Images)
admin.site.register(Evaluate_Images)
admin.site.register(Document)
admin.site.register(Image)
admin.site.register(TestScore)
admin.site.register(OfflineDocument)
| none | 1 | 1.579913 | 2 |
src/models/modules/visual_bert_classifier.py | inzva/emotion-recognition-drawings | 10 | 14945 | import torch
from torch import nn
from transformers import BertTokenizer, VisualBertModel, VisualBertConfig
import numpy as np
class VisualBertClassifier(nn.Module):
def __init__(self,
visual_bert_model,
num_classes: int = 8,
initial_visual_embedding_dim: int = 96,
final_dropout_rate: float = 0.1):
"""
pooler_output (torch.FloatTensor of shape (batch_size, hidden_size))
— Last layer hidden-state of the first token of the sequence (classification token)
after further processing through the layers used for the auxiliary pretraining task.
E.g. for BERT-family of models, this returns the classification token after processing through
a linear layer and a tanh activation function.
The linear layer weights are trained from the next sentence prediction (classification) objective
during pretraining.
@param initial_visual_embedding_dim:
"""
super().__init__()
self.visual_embedding_projection = nn.Linear(initial_visual_embedding_dim, 2048)
self.visual_bert = visual_bert_model
self.final_dropout = nn.Dropout(final_dropout_rate)
self.out = nn.Linear(768, num_classes)
def forward(self,
text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask
):
visual_embeds = self.visual_embedding_projection(visual_embeds)
output = self.visual_bert(input_ids=text_input_ids,
token_type_ids=text_token_type_ids,
attention_mask=text_attention_mask,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
visual_attention_mask=visual_attention_mask)
output = self.final_dropout(output.pooler_output)
output = self.out(output)
return output
if __name__ == '__main__':
bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt")
text_input_ids = inputs.data['input_ids'].to('cuda')
text_token_type_ids = inputs.data['token_type_ids'].to('cuda')
text_attention_mask = inputs.data['attention_mask'].to('cuda')
sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy"
sample_face_body_embedding = np.load(sample_face_body_embedding_path)
visual_embeds = torch.from_numpy(sample_face_body_embedding)
visual_embeds = visual_embeds.to('cuda')
visual_embeds = torch.unsqueeze(visual_embeds, 0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda')
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda')
classifier = VisualBertClassifier()
classifier.to('cuda')
classifier.forward(text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask)
| import torch
from torch import nn
from transformers import BertTokenizer, VisualBertModel, VisualBertConfig
import numpy as np
class VisualBertClassifier(nn.Module):
def __init__(self,
visual_bert_model,
num_classes: int = 8,
initial_visual_embedding_dim: int = 96,
final_dropout_rate: float = 0.1):
"""
pooler_output (torch.FloatTensor of shape (batch_size, hidden_size))
— Last layer hidden-state of the first token of the sequence (classification token)
after further processing through the layers used for the auxiliary pretraining task.
E.g. for BERT-family of models, this returns the classification token after processing through
a linear layer and a tanh activation function.
The linear layer weights are trained from the next sentence prediction (classification) objective
during pretraining.
@param initial_visual_embedding_dim:
"""
super().__init__()
self.visual_embedding_projection = nn.Linear(initial_visual_embedding_dim, 2048)
self.visual_bert = visual_bert_model
self.final_dropout = nn.Dropout(final_dropout_rate)
self.out = nn.Linear(768, num_classes)
def forward(self,
text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask
):
visual_embeds = self.visual_embedding_projection(visual_embeds)
output = self.visual_bert(input_ids=text_input_ids,
token_type_ids=text_token_type_ids,
attention_mask=text_attention_mask,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
visual_attention_mask=visual_attention_mask)
output = self.final_dropout(output.pooler_output)
output = self.out(output)
return output
if __name__ == '__main__':
bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt")
text_input_ids = inputs.data['input_ids'].to('cuda')
text_token_type_ids = inputs.data['token_type_ids'].to('cuda')
text_attention_mask = inputs.data['attention_mask'].to('cuda')
sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy"
sample_face_body_embedding = np.load(sample_face_body_embedding_path)
visual_embeds = torch.from_numpy(sample_face_body_embedding)
visual_embeds = visual_embeds.to('cuda')
visual_embeds = torch.unsqueeze(visual_embeds, 0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda')
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda')
classifier = VisualBertClassifier()
classifier.to('cuda')
classifier.forward(text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask)
| pt | 0.141818 | 2.883648 | 3 |
narrative2vec/logging_instance/pose.py | code-iai/narrative2vec | 0 | 14946 | from narrative2vec.logging_instance.logging_instance import LoggingInstance, _get_first_rdf_query_result
from narrative2vec.logging_instance.reasoning_task import ReasoningTask
from narrative2vec.ontology.neemNarrativeDefinitions import QUATERNION
from narrative2vec.ontology.ontologyHandler import get_knowrob_uri
class Pose(LoggingInstance):
def get_translation(self):
read_translation = self._get_property_('translation')
return read_translation.strip().split()
def get_quaternion(self):
read_orientation = self._get_property_(QUATERNION)
return read_orientation.strip().split()
def get_reasoning_task__id(self):
reasoning_task_property = self._graph_.subjects(get_knowrob_uri('parameter2'), self.context)
reasoning_task = _get_first_rdf_query_result(reasoning_task_property)
if reasoning_task and not reasoning_task.startswith('file://'):
return ReasoningTask(reasoning_task, self._graph_).get_id()
return '' | from narrative2vec.logging_instance.logging_instance import LoggingInstance, _get_first_rdf_query_result
from narrative2vec.logging_instance.reasoning_task import ReasoningTask
from narrative2vec.ontology.neemNarrativeDefinitions import QUATERNION
from narrative2vec.ontology.ontologyHandler import get_knowrob_uri
class Pose(LoggingInstance):
def get_translation(self):
read_translation = self._get_property_('translation')
return read_translation.strip().split()
def get_quaternion(self):
read_orientation = self._get_property_(QUATERNION)
return read_orientation.strip().split()
def get_reasoning_task__id(self):
reasoning_task_property = self._graph_.subjects(get_knowrob_uri('parameter2'), self.context)
reasoning_task = _get_first_rdf_query_result(reasoning_task_property)
if reasoning_task and not reasoning_task.startswith('file://'):
return ReasoningTask(reasoning_task, self._graph_).get_id()
return '' | none | 1 | 2.329421 | 2 |
kobra/settings/development.py | karservice/kobra | 4 | 14947 | # -*- coding: utf-8 -*-
from . import *
SECRET_KEY = env.str('KOBRA_SECRET_KEY',
'Unsafe_development_key._Never_use_in_production.')
DEBUG = env.bool('KOBRA_DEBUG_MODE', True)
DATABASES = {
'default': env.db_url('KOBRA_DATABASE_URL', 'sqlite:///db.sqlite3')
}
| # -*- coding: utf-8 -*-
from . import *
SECRET_KEY = env.str('KOBRA_SECRET_KEY',
'Unsafe_development_key._Never_use_in_production.')
DEBUG = env.bool('KOBRA_DEBUG_MODE', True)
DATABASES = {
'default': env.db_url('KOBRA_DATABASE_URL', 'sqlite:///db.sqlite3')
}
| fr | 0.176995 | 1.408682 | 1 |
foobot_grapher.py | jpwright/foobot-slack | 1 | 14948 | <filename>foobot_grapher.py
#!/usr/bin/env python
from pyfoobot import Foobot
import requests
import matplotlib
matplotlib.use('Agg')
import matplotlib.dates
import matplotlib.pyplot
import datetime
from imgurpython import ImgurClient
import ConfigParser
def getSensorReadings(notify):
config = ConfigParser.ConfigParser()
config.read("config.txt")
settings = {
'foobot_api_key': '',
'foobot_email': '',
'foobot_password': '',
'imgur_id': '',
'imgur_secret': '',
'slack_webhook': '',
'averaging_period': 15,
'periods_to_graph': 12,
'threshold_pm': 25.0,
'threshold_temperature': 26.5,
'threshold_humidity': 60.0,
'threshold_co2': 30000.0,
'threshold_tvoc': 500.0
}
for settings_key in settings:
try:
value_to_set = config.get('default', settings_key)
settings[settings_key] = value_to_set
except:
pass
imgur_supported = False
if (len(settings['imgur_id']) > 0 and len(settings['imgur_secret']) > 0):
imgur_supported = True
imgur = ImgurClient(settings['imgur_id'], settings['imgur_secret'])
fb = Foobot(settings['foobot_api_key'], settings['foobot_email'], settings['foobot_password'])
devices = fb.devices()
device = devices[0]
measurement_interval = 60*(int(settings['averaging_period']) * int(settings['periods_to_graph']))
data = device.data_period(measurement_interval, 0)
alerts = []
labels = ["PM2.5", "Temperature", "Humidity", "CO2", "tVOC"]
units = ["ug/m3", "C", "%", "ppm", "ppb"]
max_vals = [0, 0, 0, 0, 0]
sums = [0, 0, 0, 0, 0]
datapoints = [[], [], [], [], []]
timeseries = []
thresholds = [
float(settings['threshold_pm']),
float(settings['threshold_temperature']),
float(settings['threshold_humidity']),
float(settings['threshold_co2']),
float(settings['threshold_tvoc'])
]
num_averaging_samples = int(len(data['datapoints']) / int(settings['periods_to_graph']))
for i in range(0, len(data['datapoints'])):
datapoint = data['datapoints'][i]
time = datapoint[0]
pm = datapoint[1]
tmp = datapoint[2]
hum = datapoint[3]
co2 = datapoint[4]
voc = datapoint[5]
allpollu = datapoint[6]
for j in range(0, 5):
datapoints[j].append(datapoint[j+1])
if (i >= (len(data['datapoints']) - num_averaging_samples)):
sums[j] += datapoint[j+1]
if datapoint[j] > max_vals[j]:
max_vals[j] = datapoint[j+1]
timeseries.append(datetime.datetime.fromtimestamp(time))
hours = matplotlib.dates.HourLocator()
minutes = matplotlib.dates.MinuteLocator(interval = 10)
hoursFmt = matplotlib.dates.DateFormatter('%-I:%M')
if notify:
for i in range(0, 5):
sums[i] = sums[i] / num_averaging_samples
if sums[i] > thresholds[i]:
print("Sending alert for "+labels[i])
fig, ax = matplotlib.pyplot.subplots()
ax.plot(timeseries, datapoints[i])
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hoursFmt)
ax.grid(True)
matplotlib.pyplot.xlabel("Time")
matplotlib.pyplot.ylabel(labels[i] + " ("+units[i]+")")
fig.autofmt_xdate()
matplotlib.pyplot.savefig("figure.png")
if imgur_supported:
image = imgur.upload_from_path("figure.png", anon=True)
else:
image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"}
payload = '{"text": "Warning: '+labels[i]+' levels at '+"{0:.2f}".format(sums[i])+' '+units[i]+'.", "attachments": [{"fallback": "Graph.", "image_url": "'+image["link"]+'"}]}'
r = requests.post("https://hooks.slack.com/services/"+settings['slack_webhook'], data={"payload": payload})
else:
fig, axarr = matplotlib.pyplot.subplots(1,5)
for i in range(0, 5):
ax = axarr[i]
ax.plot(timeseries, datapoints[i])
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hoursFmt)
ax.grid(True)
ax.set_xlabel("Time")
ax.set_title(labels[i] + " ("+units[i]+")")
fig.autofmt_xdate()
fig.set_size_inches(18, 4)
matplotlib.pyplot.savefig("figure.png", bbox_inches='tight')
if (imgur_supported):
image = imgur.upload_from_path("figure.png", anon=True)
else:
image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"}
return image["link"]
if __name__ == "__main__":
getSensorReadings(True)
| <filename>foobot_grapher.py
#!/usr/bin/env python
from pyfoobot import Foobot
import requests
import matplotlib
matplotlib.use('Agg')
import matplotlib.dates
import matplotlib.pyplot
import datetime
from imgurpython import ImgurClient
import ConfigParser
def getSensorReadings(notify):
config = ConfigParser.ConfigParser()
config.read("config.txt")
settings = {
'foobot_api_key': '',
'foobot_email': '',
'foobot_password': '',
'imgur_id': '',
'imgur_secret': '',
'slack_webhook': '',
'averaging_period': 15,
'periods_to_graph': 12,
'threshold_pm': 25.0,
'threshold_temperature': 26.5,
'threshold_humidity': 60.0,
'threshold_co2': 30000.0,
'threshold_tvoc': 500.0
}
for settings_key in settings:
try:
value_to_set = config.get('default', settings_key)
settings[settings_key] = value_to_set
except:
pass
imgur_supported = False
if (len(settings['imgur_id']) > 0 and len(settings['imgur_secret']) > 0):
imgur_supported = True
imgur = ImgurClient(settings['imgur_id'], settings['imgur_secret'])
fb = Foobot(settings['foobot_api_key'], settings['foobot_email'], settings['foobot_password'])
devices = fb.devices()
device = devices[0]
measurement_interval = 60*(int(settings['averaging_period']) * int(settings['periods_to_graph']))
data = device.data_period(measurement_interval, 0)
alerts = []
labels = ["PM2.5", "Temperature", "Humidity", "CO2", "tVOC"]
units = ["ug/m3", "C", "%", "ppm", "ppb"]
max_vals = [0, 0, 0, 0, 0]
sums = [0, 0, 0, 0, 0]
datapoints = [[], [], [], [], []]
timeseries = []
thresholds = [
float(settings['threshold_pm']),
float(settings['threshold_temperature']),
float(settings['threshold_humidity']),
float(settings['threshold_co2']),
float(settings['threshold_tvoc'])
]
num_averaging_samples = int(len(data['datapoints']) / int(settings['periods_to_graph']))
for i in range(0, len(data['datapoints'])):
datapoint = data['datapoints'][i]
time = datapoint[0]
pm = datapoint[1]
tmp = datapoint[2]
hum = datapoint[3]
co2 = datapoint[4]
voc = datapoint[5]
allpollu = datapoint[6]
for j in range(0, 5):
datapoints[j].append(datapoint[j+1])
if (i >= (len(data['datapoints']) - num_averaging_samples)):
sums[j] += datapoint[j+1]
if datapoint[j] > max_vals[j]:
max_vals[j] = datapoint[j+1]
timeseries.append(datetime.datetime.fromtimestamp(time))
hours = matplotlib.dates.HourLocator()
minutes = matplotlib.dates.MinuteLocator(interval = 10)
hoursFmt = matplotlib.dates.DateFormatter('%-I:%M')
if notify:
for i in range(0, 5):
sums[i] = sums[i] / num_averaging_samples
if sums[i] > thresholds[i]:
print("Sending alert for "+labels[i])
fig, ax = matplotlib.pyplot.subplots()
ax.plot(timeseries, datapoints[i])
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hoursFmt)
ax.grid(True)
matplotlib.pyplot.xlabel("Time")
matplotlib.pyplot.ylabel(labels[i] + " ("+units[i]+")")
fig.autofmt_xdate()
matplotlib.pyplot.savefig("figure.png")
if imgur_supported:
image = imgur.upload_from_path("figure.png", anon=True)
else:
image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"}
payload = '{"text": "Warning: '+labels[i]+' levels at '+"{0:.2f}".format(sums[i])+' '+units[i]+'.", "attachments": [{"fallback": "Graph.", "image_url": "'+image["link"]+'"}]}'
r = requests.post("https://hooks.slack.com/services/"+settings['slack_webhook'], data={"payload": payload})
else:
fig, axarr = matplotlib.pyplot.subplots(1,5)
for i in range(0, 5):
ax = axarr[i]
ax.plot(timeseries, datapoints[i])
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hoursFmt)
ax.grid(True)
ax.set_xlabel("Time")
ax.set_title(labels[i] + " ("+units[i]+")")
fig.autofmt_xdate()
fig.set_size_inches(18, 4)
matplotlib.pyplot.savefig("figure.png", bbox_inches='tight')
if (imgur_supported):
image = imgur.upload_from_path("figure.png", anon=True)
else:
image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"}
return image["link"]
if __name__ == "__main__":
getSensorReadings(True)
| es | 0.159471 | 2.526967 | 3 |
safexl/__init__.py | ThePoetCoder/safexl | 6 | 14949 | <reponame>ThePoetCoder/safexl<gh_stars>1-10
# Copyright (c) 2020 safexl
from safexl.toolkit import *
import safexl.xl_constants as xl_constants
import safexl.colors as colors
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
__version__ = "0.0.7"
| # Copyright (c) 2020 safexl
from safexl.toolkit import *
import safexl.xl_constants as xl_constants
import safexl.colors as colors
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
__version__ = "0.0.7" | es | 0.186649 | 1.008552 | 1 |
musicdb/restapi/migrations/0001_initial.py | alexebaker/django-music_database | 0 | 14950 | <filename>musicdb/restapi/migrations/0001_initial.py
# Generated by Django 2.0.4 on 2018-05-01 05:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('year', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='AlbumArt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='album_art')),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='album_art', to='restapi.Album')),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Style',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('duration', models.TimeField()),
('position', models.CharField(max_length=3)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tracks', to='restapi.Album')),
],
),
migrations.AddField(
model_name='album',
name='artist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='albums', to='restapi.Artist'),
),
migrations.AddField(
model_name='album',
name='genres',
field=models.ManyToManyField(blank=True, related_name='albums', to='restapi.Genre'),
),
migrations.AddField(
model_name='album',
name='styles',
field=models.ManyToManyField(blank=True, related_name='albums', to='restapi.Style'),
),
]
| <filename>musicdb/restapi/migrations/0001_initial.py
# Generated by Django 2.0.4 on 2018-05-01 05:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('year', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='AlbumArt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='album_art')),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='album_art', to='restapi.Album')),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Style',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('duration', models.TimeField()),
('position', models.CharField(max_length=3)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tracks', to='restapi.Album')),
],
),
migrations.AddField(
model_name='album',
name='artist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='albums', to='restapi.Artist'),
),
migrations.AddField(
model_name='album',
name='genres',
field=models.ManyToManyField(blank=True, related_name='albums', to='restapi.Genre'),
),
migrations.AddField(
model_name='album',
name='styles',
field=models.ManyToManyField(blank=True, related_name='albums', to='restapi.Style'),
),
]
| es | 0.137229 | 1.704907 | 2 |
ballot_source/sources/migrations/0004_auto_20200824_1444.py | Ballot-Drop/ballot-source | 3 | 14951 | <reponame>Ballot-Drop/ballot-source
# Generated by Django 3.0.9 on 2020-08-24 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sources', '0003_sourcedetail_last_pull'),
]
operations = [
migrations.AlterField(
model_name='sourcedetail',
name='diff',
field=models.TextField(blank=True, null=True),
),
]
| # Generated by Django 3.0.9 on 2020-08-24 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sources', '0003_sourcedetail_last_pull'),
]
operations = [
migrations.AlterField(
model_name='sourcedetail',
name='diff',
field=models.TextField(blank=True, null=True),
),
] | es | 0.126069 | 1.343567 | 1 |
src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py | Idate96/Mimetic-Fem | 0 | 14952 | # -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME>. Created on Mon Jul 10 20:12:27 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME> (张仪). Created on Thu Jul 6 16:00:33 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
from function_space import FunctionSpace
import numpy as np
from mesh import CrazyMesh
from forms import Form
from hodge import hodge
from coboundaries import d
from assemble import assemble
from _assembling import assemble_, integral1d_
import matplotlib.pyplot as plt
from quadrature import extended_gauss_quad
from scipy.integrate import quad
from sympy import Matrix
import scipy.io
from scipy import sparse
import scipy as sp
from inner_product import inner
# %% exact solution define
# u^{(1)} = { u, v }^T
def u(x,y):
return +np.cos(np.pi*x) * np.sin(np.pi*y)
def v(x,y):
return -np.sin(np.pi*x) * np.cos(np.pi*y)
def r_u(x,y):
return -2* np.pi**2 * np.cos(np.pi*x) * np.sin(np.pi*y)
def r_v(x,y):
return 2* np.pi**2 * np.sin(np.pi*x) * np.cos(np.pi*y)
# %% define the mesh
mesh = CrazyMesh( 2, (2, 2), ((-1, 1), (-1, 1)), 0.05 )
func_space_gauss1 = FunctionSpace(mesh, '1-gauss', (5, 5), is_inner=False)
func_space_lobatto1 = FunctionSpace(mesh, '1-lobatto', (5, 5), is_inner=False)
form_1_gauss = Form(func_space_gauss1)
form_1_lobatto = Form(func_space_lobatto1)
M = inner(form_1_lobatto.basis,form_1_gauss.basis)
| # -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME>. Created on Mon Jul 10 20:12:27 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME> (张仪). Created on Thu Jul 6 16:00:33 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
from function_space import FunctionSpace
import numpy as np
from mesh import CrazyMesh
from forms import Form
from hodge import hodge
from coboundaries import d
from assemble import assemble
from _assembling import assemble_, integral1d_
import matplotlib.pyplot as plt
from quadrature import extended_gauss_quad
from scipy.integrate import quad
from sympy import Matrix
import scipy.io
from scipy import sparse
import scipy as sp
from inner_product import inner
# %% exact solution define
# u^{(1)} = { u, v }^T
def u(x,y):
return +np.cos(np.pi*x) * np.sin(np.pi*y)
def v(x,y):
return -np.sin(np.pi*x) * np.cos(np.pi*y)
def r_u(x,y):
return -2* np.pi**2 * np.cos(np.pi*x) * np.sin(np.pi*y)
def r_v(x,y):
return 2* np.pi**2 * np.sin(np.pi*x) * np.cos(np.pi*y)
# %% define the mesh
mesh = CrazyMesh( 2, (2, 2), ((-1, 1), (-1, 1)), 0.05 )
func_space_gauss1 = FunctionSpace(mesh, '1-gauss', (5, 5), is_inner=False)
func_space_lobatto1 = FunctionSpace(mesh, '1-lobatto', (5, 5), is_inner=False)
form_1_gauss = Form(func_space_gauss1)
form_1_lobatto = Form(func_space_lobatto1)
M = inner(form_1_lobatto.basis,form_1_gauss.basis)
| pt | 0.25409 | 2.832893 | 3 |
src/quality_control/bin/createSpotDetectionQCHTML.py | WoutDavid/ST-nextflow-pipeline | 0 | 14953 | <filename>src/quality_control/bin/createSpotDetectionQCHTML.py
import json
from bs4 import BeautifulSoup
import pandas as pd
import sys
# Argparsing
argument_index = 1
template = sys.argv[argument_index]
argument_index +=1
recall_json = sys.argv[argument_index]
argument_index +=1
recall_plot = sys.argv[argument_index]
argument_index +=1
precision_jsons_list = [sys.argv[i] for i in range(argument_index, len(sys.argv))]
precision_rows_list = []
# convert jsons back to dicts for html conversion
for json_path in precision_jsons_list:
with open(json_path, 'r') as json_file:
data = json.load(json_file)
precision_rows_list.append(data)
precision_df = pd.DataFrame(precision_rows_list)
precision_df = precision_df.sort_values(by='Round #')
precision_html_table = precision_df.to_html(index=False)
# Same for recall json
recall_rows_list = []
with open(recall_json, 'r') as json_file:
data=json.load(json_file)
recall_rows_list.append(data)
recall_df = pd.DataFrame(recall_rows_list)
recall_html_table = recall_df.to_html(index=False)
# Create html
with open(template, 'r') as template_file:
contents = template_file.read()
template_soup = BeautifulSoup(contents, features="html.parser")
p_list = template_soup.find_all('p')
p_index = 0
# Read recall table tag
recall_soup = BeautifulSoup(recall_html_table, features="html.parser")
table_tag = recall_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
image_tag = template_soup.new_tag('img')
image_tag['src']= f"./recall/{recall_plot}"
image_tag['width']= 700
image_tag['height']= 500
p_list[p_index].insert_after(image_tag)
p_index+=1
precision_soup = BeautifulSoup(precision_html_table, features="html.parser")
table_tag = precision_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
with open('spot_detection_qc_report.html', 'w') as result_file:
result_file.write(str( template_soup ))
| <filename>src/quality_control/bin/createSpotDetectionQCHTML.py
import json
from bs4 import BeautifulSoup
import pandas as pd
import sys
# Argparsing
argument_index = 1
template = sys.argv[argument_index]
argument_index +=1
recall_json = sys.argv[argument_index]
argument_index +=1
recall_plot = sys.argv[argument_index]
argument_index +=1
precision_jsons_list = [sys.argv[i] for i in range(argument_index, len(sys.argv))]
precision_rows_list = []
# convert jsons back to dicts for html conversion
for json_path in precision_jsons_list:
with open(json_path, 'r') as json_file:
data = json.load(json_file)
precision_rows_list.append(data)
precision_df = pd.DataFrame(precision_rows_list)
precision_df = precision_df.sort_values(by='Round #')
precision_html_table = precision_df.to_html(index=False)
# Same for recall json
recall_rows_list = []
with open(recall_json, 'r') as json_file:
data=json.load(json_file)
recall_rows_list.append(data)
recall_df = pd.DataFrame(recall_rows_list)
recall_html_table = recall_df.to_html(index=False)
# Create html
with open(template, 'r') as template_file:
contents = template_file.read()
template_soup = BeautifulSoup(contents, features="html.parser")
p_list = template_soup.find_all('p')
p_index = 0
# Read recall table tag
recall_soup = BeautifulSoup(recall_html_table, features="html.parser")
table_tag = recall_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
image_tag = template_soup.new_tag('img')
image_tag['src']= f"./recall/{recall_plot}"
image_tag['width']= 700
image_tag['height']= 500
p_list[p_index].insert_after(image_tag)
p_index+=1
precision_soup = BeautifulSoup(precision_html_table, features="html.parser")
table_tag = precision_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
with open('spot_detection_qc_report.html', 'w') as result_file:
result_file.write(str( template_soup ))
| pt | 0.179029 | 2.729368 | 3 |
test/torch/mpc/test_fss.py | NicoSerranoP/PySyft | 3 | 14954 | <reponame>NicoSerranoP/PySyft
import pytest
import torch as th
from syft.frameworks.torch.mpc.fss import DPF, DIF, n
@pytest.mark.parametrize("op", ["eq", "le"])
def test_fss_class(op):
class_ = {"eq": DPF, "le": DIF}[op]
th_op = {"eq": th.eq, "le": th.le}[op]
gather_op = {"eq": "__add__", "le": "__xor__"}[op]
# single value
primitive = class_.keygen(n_values=1)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([0])
x_masked = x + k0[0] + k1[0]
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 1D tensor
primitive = class_.keygen(n_values=3)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([0, 2, -2])
x_masked = x + k0[0] + k1[0]
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 2D tensor
primitive = class_.keygen(n_values=4)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([[0, 2], [-2, 0]])
x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 3D tensor
primitive = class_.keygen(n_values=8)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([[[0, 2], [-2, 0]], [[0, 2], [-2, 0]]])
x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
| import pytest
import torch as th
from syft.frameworks.torch.mpc.fss import DPF, DIF, n
@pytest.mark.parametrize("op", ["eq", "le"])
def test_fss_class(op):
class_ = {"eq": DPF, "le": DIF}[op]
th_op = {"eq": th.eq, "le": th.le}[op]
gather_op = {"eq": "__add__", "le": "__xor__"}[op]
# single value
primitive = class_.keygen(n_values=1)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([0])
x_masked = x + k0[0] + k1[0]
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 1D tensor
primitive = class_.keygen(n_values=3)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([0, 2, -2])
x_masked = x + k0[0] + k1[0]
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 2D tensor
primitive = class_.keygen(n_values=4)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([[0, 2], [-2, 0]])
x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 3D tensor
primitive = class_.keygen(n_values=8)
alpha, s_00, s_01, *CW = primitive
mask = th.randint(0, 2 ** n, alpha.shape)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = th.tensor([[[0, 2], [-2, 0]], [[0, 2], [-2, 0]]])
x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all() | it | 0.312759 | 2.220209 | 2 |
allennlp/tests/data/tokenizers/pretrained_transformer_tokenizer_test.py | donna-legal/allennlp | 1 | 14955 | from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
class TestPretrainedTransformerTokenizer(AllenNlpTestCase):
def test_splits_roberta(self):
tokenizer = PretrainedTransformerTokenizer("roberta-base")
sentence = "A, <mask> AllenNLP sentence."
expected_tokens = ["<s>", "A", ",", "<mask>", "Allen", "N", "LP", "Ġsentence", ".", "</s>"]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
# sentence pair
sentence_1 = "A, <mask> AllenNLP sentence."
sentence_2 = "A sentence."
expected_tokens = [
"<s>",
"A",
",",
"<mask>",
"Allen",
"N",
"LP",
"Ġsentence",
".",
"</s>",
"</s>",
"A",
"Ġsentence",
".",
"</s>",
]
tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)]
assert tokens == expected_tokens
def test_splits_cased_bert(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
# sentence pair
sentence_1 = "A, [MASK] AllenNLP sentence."
sentence_2 = "A sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
"A",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)]
assert tokens == expected_tokens
def test_splits_uncased_bert(self):
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"a",
",",
"[MASK]",
"allen",
"##nl",
"##p",
"sentence",
".",
"[SEP]",
]
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
| from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
class TestPretrainedTransformerTokenizer(AllenNlpTestCase):
def test_splits_roberta(self):
tokenizer = PretrainedTransformerTokenizer("roberta-base")
sentence = "A, <mask> AllenNLP sentence."
expected_tokens = ["<s>", "A", ",", "<mask>", "Allen", "N", "LP", "Ġsentence", ".", "</s>"]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
# sentence pair
sentence_1 = "A, <mask> AllenNLP sentence."
sentence_2 = "A sentence."
expected_tokens = [
"<s>",
"A",
",",
"<mask>",
"Allen",
"N",
"LP",
"Ġsentence",
".",
"</s>",
"</s>",
"A",
"Ġsentence",
".",
"</s>",
]
tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)]
assert tokens == expected_tokens
def test_splits_cased_bert(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
# sentence pair
sentence_1 = "A, [MASK] AllenNLP sentence."
sentence_2 = "A sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
"A",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)]
assert tokens == expected_tokens
def test_splits_uncased_bert(self):
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"a",
",",
"[MASK]",
"allen",
"##nl",
"##p",
"sentence",
".",
"[SEP]",
]
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
| it | 0.317631 | 2.812523 | 3 |
openmdao/api.py | ryanfarr01/blue | 0 | 14956 | """Key OpenMDAO classes can be imported from here."""
# Core
from openmdao.core.problem import Problem
from openmdao.core.group import Group
from openmdao.core.parallel_group import ParallelGroup
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.core.implicitcomponent import ImplicitComponent
from openmdao.core.indepvarcomp import IndepVarComp
from openmdao.core.analysis_error import AnalysisError
# Components
from openmdao.components.deprecated_component import Component
from openmdao.components.exec_comp import ExecComp
from openmdao.components.linear_system_comp import LinearSystemComp
from openmdao.components.meta_model import MetaModel
from openmdao.components.multifi_meta_model import MultiFiMetaModel
# Solvers
from openmdao.solvers.linear.linear_block_gs import LinearBlockGS
from openmdao.solvers.linear.linear_block_jac import LinearBlockJac
from openmdao.solvers.linear.direct import DirectSolver
from openmdao.solvers.linear.petsc_ksp import PetscKSP
from openmdao.solvers.linear.linear_runonce import LinearRunOnce
from openmdao.solvers.linear.scipy_iter_solver import ScipyIterativeSolver
from openmdao.solvers.linesearch.backtracking import ArmijoGoldsteinLS
from openmdao.solvers.linesearch.backtracking import BoundsEnforceLS
from openmdao.solvers.nonlinear.nonlinear_block_gs import NonlinearBlockGS
from openmdao.solvers.nonlinear.nonlinear_block_jac import NonlinearBlockJac
from openmdao.solvers.nonlinear.newton import NewtonSolver
from openmdao.solvers.nonlinear.nonlinear_runonce import NonLinearRunOnce
# Surrogate Models
from openmdao.surrogate_models.kriging import KrigingSurrogate, FloatKrigingSurrogate
from openmdao.surrogate_models.multifi_cokriging import MultiFiCoKrigingSurrogate, \
FloatMultiFiCoKrigingSurrogate
from openmdao.surrogate_models.nearest_neighbor import NearestNeighbor
from openmdao.surrogate_models.response_surface import ResponseSurface
from openmdao.surrogate_models.surrogate_model import SurrogateModel, \
MultiFiSurrogateModel
# Vectors
from openmdao.vectors.default_vector import DefaultVector
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
# Developer Tools
from openmdao.devtools.problem_viewer.problem_viewer import view_model
from openmdao.devtools.viewconns import view_connections
# Derivative Specification
from openmdao.jacobians.assembled_jacobian import AssembledJacobian, \
DenseJacobian, COOJacobian, CSRJacobian, CSCJacobian
# Drivers
try:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
except ImportError:
pass
from openmdao.drivers.scipy_optimizer import ScipyOptimizer
# System-Building Tools
from openmdao.utils.options_dictionary import OptionsDictionary
# Recorders
from openmdao.recorders.sqlite_recorder import SqliteRecorder
from openmdao.recorders.openmdao_server_recorder import OpenMDAOServerRecorder
| """Key OpenMDAO classes can be imported from here."""
# Core
from openmdao.core.problem import Problem
from openmdao.core.group import Group
from openmdao.core.parallel_group import ParallelGroup
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.core.implicitcomponent import ImplicitComponent
from openmdao.core.indepvarcomp import IndepVarComp
from openmdao.core.analysis_error import AnalysisError
# Components
from openmdao.components.deprecated_component import Component
from openmdao.components.exec_comp import ExecComp
from openmdao.components.linear_system_comp import LinearSystemComp
from openmdao.components.meta_model import MetaModel
from openmdao.components.multifi_meta_model import MultiFiMetaModel
# Solvers
from openmdao.solvers.linear.linear_block_gs import LinearBlockGS
from openmdao.solvers.linear.linear_block_jac import LinearBlockJac
from openmdao.solvers.linear.direct import DirectSolver
from openmdao.solvers.linear.petsc_ksp import PetscKSP
from openmdao.solvers.linear.linear_runonce import LinearRunOnce
from openmdao.solvers.linear.scipy_iter_solver import ScipyIterativeSolver
from openmdao.solvers.linesearch.backtracking import ArmijoGoldsteinLS
from openmdao.solvers.linesearch.backtracking import BoundsEnforceLS
from openmdao.solvers.nonlinear.nonlinear_block_gs import NonlinearBlockGS
from openmdao.solvers.nonlinear.nonlinear_block_jac import NonlinearBlockJac
from openmdao.solvers.nonlinear.newton import NewtonSolver
from openmdao.solvers.nonlinear.nonlinear_runonce import NonLinearRunOnce
# Surrogate Models
from openmdao.surrogate_models.kriging import KrigingSurrogate, FloatKrigingSurrogate
from openmdao.surrogate_models.multifi_cokriging import MultiFiCoKrigingSurrogate, \
FloatMultiFiCoKrigingSurrogate
from openmdao.surrogate_models.nearest_neighbor import NearestNeighbor
from openmdao.surrogate_models.response_surface import ResponseSurface
from openmdao.surrogate_models.surrogate_model import SurrogateModel, \
MultiFiSurrogateModel
# Vectors
from openmdao.vectors.default_vector import DefaultVector
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
# Developer Tools
from openmdao.devtools.problem_viewer.problem_viewer import view_model
from openmdao.devtools.viewconns import view_connections
# Derivative Specification
from openmdao.jacobians.assembled_jacobian import AssembledJacobian, \
DenseJacobian, COOJacobian, CSRJacobian, CSCJacobian
# Drivers
try:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
except ImportError:
pass
from openmdao.drivers.scipy_optimizer import ScipyOptimizer
# System-Building Tools
from openmdao.utils.options_dictionary import OptionsDictionary
# Recorders
from openmdao.recorders.sqlite_recorder import SqliteRecorder
from openmdao.recorders.openmdao_server_recorder import OpenMDAOServerRecorder
| pt | 0.297112 | 1.312523 | 1 |
code-buddy.py | xl3ehindTim/Code-buddy | 8 | 14957 | import os
from getArgs import getArgs
from modules import python, javascript, html, php, bootstrap, cca
# from folder import file
# code-buddy.py create (file type) (directory name)
# Checks for "create"
if getArgs(1) == "create":
# Checks for which file type
projectType = getArgs(2)
# Checks for file name
if projectType == "python":
name = getArgs(3)
python.createPythonProject(name)
print("Folder created succesfully")
elif projectType == "javascript":
name = getArgs(3)
javascript.createJavascriptProject(name)
print("Folder created succesfully")
elif projectType == "html":
name = getArgs(3)
html.createHtmlProject(name)
print("Folder created succesfully")
elif projectType == "php":
name = getArgs(3)
php.createPhpProject(name)
print("Folder created succesfully")
elif projectType == "bootstrap":
name = getArgs(3)
bootstrap.createPhpProject(name)
print("Folder created succesfully")
elif projectType == "cca"
name = getArgs(3)
cca.createCcaProject(name)
print("Folder created succesfully")
# If not valid file type
else:
print(f"argument {getArgs(2)} is unknown, try: 'python, javascript, html, php or bootstrap'")
else:
# If invalid "create"
print(f"argument {getArgs(1)} is unknown, use 'create' to create a folder")
| import os
from getArgs import getArgs
from modules import python, javascript, html, php, bootstrap, cca
# from folder import file
# code-buddy.py create (file type) (directory name)
# Checks for "create"
if getArgs(1) == "create":
# Checks for which file type
projectType = getArgs(2)
# Checks for file name
if projectType == "python":
name = getArgs(3)
python.createPythonProject(name)
print("Folder created succesfully")
elif projectType == "javascript":
name = getArgs(3)
javascript.createJavascriptProject(name)
print("Folder created succesfully")
elif projectType == "html":
name = getArgs(3)
html.createHtmlProject(name)
print("Folder created succesfully")
elif projectType == "php":
name = getArgs(3)
php.createPhpProject(name)
print("Folder created succesfully")
elif projectType == "bootstrap":
name = getArgs(3)
bootstrap.createPhpProject(name)
print("Folder created succesfully")
elif projectType == "cca"
name = getArgs(3)
cca.createCcaProject(name)
print("Folder created succesfully")
# If not valid file type
else:
print(f"argument {getArgs(2)} is unknown, try: 'python, javascript, html, php or bootstrap'")
else:
# If invalid "create"
print(f"argument {getArgs(1)} is unknown, use 'create' to create a folder")
| pt | 0.16101 | 2.882814 | 3 |
compyle/tests/test_ext_module.py | manish364824/compyle | 1 | 14958 | <filename>compyle/tests/test_ext_module.py
from contextlib import contextmanager
from distutils.sysconfig import get_config_var
from io import open as io_open
import os
from os.path import join, exists
import shutil
import sys
import tempfile
from textwrap import dedent
from multiprocessing import Pool
from unittest import TestCase, main
try:
from unittest import mock
except ImportError:
import mock
from ..ext_module import get_md5, ExtModule, get_ext_extension, get_unicode
def _check_write_source(root):
"""Used to create an ExtModule and test if a file was opened.
It returns the number of times "open" was called.
"""
m = mock.mock_open()
orig_side_effect = m.side_effect
def _side_effect(*args, **kw):
with io_open(*args, **kw) as fp:
fp.write(get_unicode("junk"))
return orig_side_effect(*args, **kw)
m.side_effect = _side_effect
with mock.patch('compyle.ext_module.io.open', m, create=True):
s = ExtModule("print('hello')", root=root)
s.write_source()
return m.call_count
def _check_compile(root):
with mock.patch('shutil.copy') as m:
s = ExtModule("print('hello')", root=root)
s.write_and_build()
if m.called:
# If it was called, do the copy to mimic the action.
shutil.copy(*m.call_args[0])
return m.call_count
class TestMiscExtMod(TestCase):
def test_md5(self):
data = "hello world"
# Two calls with same data produce same result
self.assertEqual(get_md5(data), get_md5(data))
# Two calls with different data produce different md5sums.
self.assertNotEqual(get_md5(data), get_md5(data + ' '))
class TestExtModule(TestCase):
def setUp(self):
self.root = tempfile.mkdtemp()
self.data = dedent('''\
# cython: language_level=3
def f():
return "hello world"
''')
def tearDown(self):
if sys.platform.startswith('win'):
try:
shutil.rmtree(self.root)
except WindowsError:
pass
else:
shutil.rmtree(self.root)
def test_constructor(self):
data = self.data
s = ExtModule(data, root=self.root)
self.assertTrue(exists(join(self.root, 'build')))
self.assertEqual(s.hash, get_md5(data))
self.assertEqual(s.code, data)
expect_name = 'm_%s' % (s.hash)
self.assertEqual(s.name, expect_name)
self.assertEqual(s.src_path, join(self.root, expect_name + '.pyx'))
self.assertEqual(s.ext_path,
join(self.root, expect_name + get_ext_extension()))
s.write_source()
self.assertTrue(exists(s.src_path))
self.assertEqual(data, open(s.src_path).read())
def test_default_root(self):
try:
data = self.data
s = ExtModule(data)
s.write_source()
self.assertTrue(exists(join(s.root, 'build')))
self.assertEqual(s.hash, get_md5(data))
self.assertEqual(s.code, data)
self.assertTrue(exists(s.src_path))
self.assertEqual(data, open(s.src_path).read())
finally:
os.unlink(s.src_path)
def test_load_module(self):
data = self.data
s = ExtModule(data, root=self.root)
mod = s.load()
self.assertEqual(mod.f(), "hello world")
self.assertTrue(exists(s.ext_path))
def _create_dummy_module(self):
code = "# cython: language_level=3\ndef hello(): return 'hello'"
modname = 'test_rebuild.py'
f = join(self.root, modname)
with open(f, 'w') as fp:
fp.write(code)
return f
@contextmanager
def _add_root_to_sys_path(self):
import sys
if self.root not in sys.path:
sys.path.insert(0, self.root)
try:
yield
finally:
sys.path.remove(self.root)
def test_rebuild_when_dependencies_change(self):
# Given.
data = self.data
depends = ["test_rebuild"]
s = ExtModule(data, root=self.root, depends=depends)
fname = self._create_dummy_module()
f_stat = os.stat(fname)
with self._add_root_to_sys_path():
# When
self.assertTrue(s.should_recompile())
s.write_and_build()
# Then.
self.assertFalse(s.should_recompile())
# Now lets re-create the module and try again.
# When.
fname = self._create_dummy_module()
# Update the timestamp to make it newer, otherwise we need to
# sleep.
os.utime(fname, (f_stat.st_atime, f_stat.st_mtime + 10))
# Then.
self.assertTrue(s.should_recompile())
def test_that_multiple_writes_do_not_occur_for_same_source(self):
# Given
n_proc = 5
p = Pool(n_proc)
# When
# Note that _create_extension cannot be defined here or even in the
# class as a nested function or instance method cannot be pickled.
result = p.map(_check_write_source, [self.root]*n_proc)
p.close()
# Then
# The file should have been opened only once.
self.assertEqual(sum(result), 1)
def test_that_multiple_compiles_do_not_occur_for_same_source(self):
# Given
n_proc = 5
p = Pool(n_proc)
# When
# Note that _check_compile cannot be defined here or even in the
# class as a nested function or instance method cannot be pickled.
result = p.map(_check_compile, [self.root]*n_proc)
p.close()
# Then
# The shutil.copy should have been run only once.
self.assertEqual(sum(result), 1)
if __name__ == '__main__':
main()
| <filename>compyle/tests/test_ext_module.py
from contextlib import contextmanager
from distutils.sysconfig import get_config_var
from io import open as io_open
import os
from os.path import join, exists
import shutil
import sys
import tempfile
from textwrap import dedent
from multiprocessing import Pool
from unittest import TestCase, main
try:
from unittest import mock
except ImportError:
import mock
from ..ext_module import get_md5, ExtModule, get_ext_extension, get_unicode
def _check_write_source(root):
"""Used to create an ExtModule and test if a file was opened.
It returns the number of times "open" was called.
"""
m = mock.mock_open()
orig_side_effect = m.side_effect
def _side_effect(*args, **kw):
with io_open(*args, **kw) as fp:
fp.write(get_unicode("junk"))
return orig_side_effect(*args, **kw)
m.side_effect = _side_effect
with mock.patch('compyle.ext_module.io.open', m, create=True):
s = ExtModule("print('hello')", root=root)
s.write_source()
return m.call_count
def _check_compile(root):
with mock.patch('shutil.copy') as m:
s = ExtModule("print('hello')", root=root)
s.write_and_build()
if m.called:
# If it was called, do the copy to mimic the action.
shutil.copy(*m.call_args[0])
return m.call_count
class TestMiscExtMod(TestCase):
def test_md5(self):
data = "hello world"
# Two calls with same data produce same result
self.assertEqual(get_md5(data), get_md5(data))
# Two calls with different data produce different md5sums.
self.assertNotEqual(get_md5(data), get_md5(data + ' '))
class TestExtModule(TestCase):
def setUp(self):
self.root = tempfile.mkdtemp()
self.data = dedent('''\
# cython: language_level=3
def f():
return "hello world"
''')
def tearDown(self):
if sys.platform.startswith('win'):
try:
shutil.rmtree(self.root)
except WindowsError:
pass
else:
shutil.rmtree(self.root)
def test_constructor(self):
data = self.data
s = ExtModule(data, root=self.root)
self.assertTrue(exists(join(self.root, 'build')))
self.assertEqual(s.hash, get_md5(data))
self.assertEqual(s.code, data)
expect_name = 'm_%s' % (s.hash)
self.assertEqual(s.name, expect_name)
self.assertEqual(s.src_path, join(self.root, expect_name + '.pyx'))
self.assertEqual(s.ext_path,
join(self.root, expect_name + get_ext_extension()))
s.write_source()
self.assertTrue(exists(s.src_path))
self.assertEqual(data, open(s.src_path).read())
def test_default_root(self):
try:
data = self.data
s = ExtModule(data)
s.write_source()
self.assertTrue(exists(join(s.root, 'build')))
self.assertEqual(s.hash, get_md5(data))
self.assertEqual(s.code, data)
self.assertTrue(exists(s.src_path))
self.assertEqual(data, open(s.src_path).read())
finally:
os.unlink(s.src_path)
def test_load_module(self):
data = self.data
s = ExtModule(data, root=self.root)
mod = s.load()
self.assertEqual(mod.f(), "hello world")
self.assertTrue(exists(s.ext_path))
def _create_dummy_module(self):
code = "# cython: language_level=3\ndef hello(): return 'hello'"
modname = 'test_rebuild.py'
f = join(self.root, modname)
with open(f, 'w') as fp:
fp.write(code)
return f
@contextmanager
def _add_root_to_sys_path(self):
import sys
if self.root not in sys.path:
sys.path.insert(0, self.root)
try:
yield
finally:
sys.path.remove(self.root)
def test_rebuild_when_dependencies_change(self):
# Given.
data = self.data
depends = ["test_rebuild"]
s = ExtModule(data, root=self.root, depends=depends)
fname = self._create_dummy_module()
f_stat = os.stat(fname)
with self._add_root_to_sys_path():
# When
self.assertTrue(s.should_recompile())
s.write_and_build()
# Then.
self.assertFalse(s.should_recompile())
# Now lets re-create the module and try again.
# When.
fname = self._create_dummy_module()
# Update the timestamp to make it newer, otherwise we need to
# sleep.
os.utime(fname, (f_stat.st_atime, f_stat.st_mtime + 10))
# Then.
self.assertTrue(s.should_recompile())
def test_that_multiple_writes_do_not_occur_for_same_source(self):
# Given
n_proc = 5
p = Pool(n_proc)
# When
# Note that _create_extension cannot be defined here or even in the
# class as a nested function or instance method cannot be pickled.
result = p.map(_check_write_source, [self.root]*n_proc)
p.close()
# Then
# The file should have been opened only once.
self.assertEqual(sum(result), 1)
def test_that_multiple_compiles_do_not_occur_for_same_source(self):
# Given
n_proc = 5
p = Pool(n_proc)
# When
# Note that _check_compile cannot be defined here or even in the
# class as a nested function or instance method cannot be pickled.
result = p.map(_check_compile, [self.root]*n_proc)
p.close()
# Then
# The shutil.copy should have been run only once.
self.assertEqual(sum(result), 1)
if __name__ == '__main__':
main()
| it | 0.216031 | 2.37275 | 2 |
tma/collector/xhn.py | hebpmo/TMA | 2 | 14959 | # -*- coding: UTF-8 -*-
"""
collector.xhn - 新华网数据采集
官网:http://www.xinhuanet.com/
接口分析:
1. 获取文章列表
http://qc.wa.news.cn/nodeart/list?nid=115093&pgnum=1&cnt=10000
新华全媒体头条
http://www.xinhuanet.com/politics/qmtt/index.htm
====================================================================
"""
import requests
import re
from datetime import datetime
from bs4 import BeautifulSoup
from zb.crawlers.utils import get_header
import traceback
import pandas as pd
from tqdm import tqdm
import tma
home_url = "http://www.xinhuanet.com/"
def get_website_map():
wzdt_url = "http://www.xinhuanet.com/wzdt2014.htm"
html = requests.get(wzdt_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
map_raw = bsobj.find('div', {'class': "content_left"})
raise NotImplementedError
def get_special_topics(pgnum=1):
"""获取专题列表"""
url = "http://qc.wa.news.cn/nodeart/list?" \
"nid=115093&pgnum=%s&cnt=200" % str(pgnum)
res = requests.get(url).text
res = res.replace("null", "\'\'")
res = eval(res)
assert res['status'] == 0, "获取文章列表失败"
data = res['data']['list']
specials = []
for a in data:
special = {
"Abstract": a['Abstract'],
"Author": a['Author'],
"LinkUrl": a['LinkUrl'],
"PubTime": a['PubTime'],
"Title": a['Title'],
"allPics": a['allPics'],
}
specials.append(special)
return specials
def get_article_detail(article_url):
"""获取新华网article_url中的文章内容
:param article_url: 文章url
:return:
{
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
"""
# article_url = "http://www.xinhuanet.com/fortune/2018-06/20/c_129897476.htm"
html = requests.get(article_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
# 解析标题
cols = bsobj.find('div', {"class": "h-news"}).text.strip().split("\r\n")
title = cols[0].strip()
pub_time = cols[1].strip()
source = cols[-1].strip()
# 解析内容
content = bsobj.find('div', {"id": "p-detail"}).text.strip()
content = content.replace("\u3000\u3000", "")
content = [x.strip() for x in content.split("\n")]
content = [x for x in content if x != ""]
content = "\n".join(content)
return {
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
class HomePage(object):
"""新华网首页"""
def __init__(self):
self.home_url = "http://www.xinhuanet.com/"
@staticmethod
def _get_date_from_url(url):
pat = re.compile("(\d{4}-\d{2}[/-]\d{2})")
res = pat.findall(url)
if res is not None and len(res) == 1:
return res[0].replace('/', "-")
else:
return None
def get_article_list(self, d=None):
"""获取首页的头条文章列表"""
html = requests.get(self.home_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
a_list = []
for a in bsobj.find_all("a"):
try:
url = a['href']
title = a.text.strip()
date_ = self._get_date_from_url(url)
a_list.append([url, title, date_])
except:
if tma.DEBUG:
traceback.print_exc()
continue
a_list = [a for a in a_list if
a[0] != ""
and a[0].strip("/") != "http://xhgy.xinhuanet.com"
and a[0].startswith("http")
and a[1] != ""
and a[1] != "视频MP4地址"
and "c_" in a[0]
and a[2] != ""
# and 'photo' not in a[0]
# and 'video' not in a[0]
]
# 根据url去重
df = pd.DataFrame(a_list, columns=['url', 'title', 'date'])
df.drop_duplicates('url', inplace=True)
res = [list(x) for x in list(df.values)]
if d is None:
date_list = [datetime.now().date().__str__()]
else:
date_list = d
res = [a for a in res if a[2] in date_list]
res = sorted(res, key=lambda x: x[2], reverse=True)
return res
def get_articles(self, d=None):
"""获取首页文章内容
:param d: list
限定获取文章的日期,默认是当日日期,可以指定多个离散的日期
:return: list
"""
# 获取首页文章列表URL、按发布日期过滤、按URL去重
res = self.get_article_list(d)
a_list = [a[0] for a in res]
a_list = list(set(a_list))
articles = []
for a in tqdm(a_list, ncols=100, desc="xhn.get_articles"):
try:
article = get_article_detail(a)
articles.append(article)
except:
if tma.DEBUG:
traceback.print_exc()
return articles
class Fortune(object):
def __init__(self):
self.url1 = "http://www.xinhuanet.com/fortune/"
self.url2 = "http://www.xinhuanet.com/fortune/caiyan.htm"
self.url3 = "http://www.xinhuanet.com/fortune/cfx.htm"
self.url4 = "http://www.xinhuanet.com/fortune/bcxc.htm"
| # -*- coding: UTF-8 -*-
"""
collector.xhn - 新华网数据采集
官网:http://www.xinhuanet.com/
接口分析:
1. 获取文章列表
http://qc.wa.news.cn/nodeart/list?nid=115093&pgnum=1&cnt=10000
新华全媒体头条
http://www.xinhuanet.com/politics/qmtt/index.htm
====================================================================
"""
import requests
import re
from datetime import datetime
from bs4 import BeautifulSoup
from zb.crawlers.utils import get_header
import traceback
import pandas as pd
from tqdm import tqdm
import tma
home_url = "http://www.xinhuanet.com/"
def get_website_map():
wzdt_url = "http://www.xinhuanet.com/wzdt2014.htm"
html = requests.get(wzdt_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
map_raw = bsobj.find('div', {'class': "content_left"})
raise NotImplementedError
def get_special_topics(pgnum=1):
"""获取专题列表"""
url = "http://qc.wa.news.cn/nodeart/list?" \
"nid=115093&pgnum=%s&cnt=200" % str(pgnum)
res = requests.get(url).text
res = res.replace("null", "\'\'")
res = eval(res)
assert res['status'] == 0, "获取文章列表失败"
data = res['data']['list']
specials = []
for a in data:
special = {
"Abstract": a['Abstract'],
"Author": a['Author'],
"LinkUrl": a['LinkUrl'],
"PubTime": a['PubTime'],
"Title": a['Title'],
"allPics": a['allPics'],
}
specials.append(special)
return specials
def get_article_detail(article_url):
"""获取新华网article_url中的文章内容
:param article_url: 文章url
:return:
{
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
"""
# article_url = "http://www.xinhuanet.com/fortune/2018-06/20/c_129897476.htm"
html = requests.get(article_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
# 解析标题
cols = bsobj.find('div', {"class": "h-news"}).text.strip().split("\r\n")
title = cols[0].strip()
pub_time = cols[1].strip()
source = cols[-1].strip()
# 解析内容
content = bsobj.find('div', {"id": "p-detail"}).text.strip()
content = content.replace("\u3000\u3000", "")
content = [x.strip() for x in content.split("\n")]
content = [x for x in content if x != ""]
content = "\n".join(content)
return {
"url": article_url,
"title": title,
"pub_time": pub_time,
"source": source,
"content": content
}
class HomePage(object):
"""新华网首页"""
def __init__(self):
self.home_url = "http://www.xinhuanet.com/"
@staticmethod
def _get_date_from_url(url):
pat = re.compile("(\d{4}-\d{2}[/-]\d{2})")
res = pat.findall(url)
if res is not None and len(res) == 1:
return res[0].replace('/', "-")
else:
return None
def get_article_list(self, d=None):
"""获取首页的头条文章列表"""
html = requests.get(self.home_url, headers=get_header())
bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')
a_list = []
for a in bsobj.find_all("a"):
try:
url = a['href']
title = a.text.strip()
date_ = self._get_date_from_url(url)
a_list.append([url, title, date_])
except:
if tma.DEBUG:
traceback.print_exc()
continue
a_list = [a for a in a_list if
a[0] != ""
and a[0].strip("/") != "http://xhgy.xinhuanet.com"
and a[0].startswith("http")
and a[1] != ""
and a[1] != "视频MP4地址"
and "c_" in a[0]
and a[2] != ""
# and 'photo' not in a[0]
# and 'video' not in a[0]
]
# 根据url去重
df = pd.DataFrame(a_list, columns=['url', 'title', 'date'])
df.drop_duplicates('url', inplace=True)
res = [list(x) for x in list(df.values)]
if d is None:
date_list = [datetime.now().date().__str__()]
else:
date_list = d
res = [a for a in res if a[2] in date_list]
res = sorted(res, key=lambda x: x[2], reverse=True)
return res
def get_articles(self, d=None):
"""获取首页文章内容
:param d: list
限定获取文章的日期,默认是当日日期,可以指定多个离散的日期
:return: list
"""
# 获取首页文章列表URL、按发布日期过滤、按URL去重
res = self.get_article_list(d)
a_list = [a[0] for a in res]
a_list = list(set(a_list))
articles = []
for a in tqdm(a_list, ncols=100, desc="xhn.get_articles"):
try:
article = get_article_detail(a)
articles.append(article)
except:
if tma.DEBUG:
traceback.print_exc()
return articles
class Fortune(object):
def __init__(self):
self.url1 = "http://www.xinhuanet.com/fortune/"
self.url2 = "http://www.xinhuanet.com/fortune/caiyan.htm"
self.url3 = "http://www.xinhuanet.com/fortune/cfx.htm"
self.url4 = "http://www.xinhuanet.com/fortune/bcxc.htm"
| zh | 0.57782 | 2.358544 | 2 |
tests/test_onetv.py | unlocKing/plugins | 2 | 14960 | import unittest
from plugins.onetv import OneTV
class TestPluginPerviyKanal(unittest.TestCase):
def test_can_handle_url(self):
regex_test_list = [
"https://media.1tv.ru/embed/ctcmedia/ctc-che.html?start=auto",
"https://media.1tv.ru/embed/ctcmedia/ctc-dom.html?start=auto",
"https://media.1tv.ru/embed/ctcmedia/ctc-love.html?start=auto",
"https://stream.1tv.ru/live",
"https://www.1tv.ru/embedlive?start=auto",
"https://www.1tv.ru/live",
"https://www.chetv.ru/online/",
"https://www.ctc.ru/online/",
"https://www.ctclove.ru/online/",
"https://domashniy.ru/online",
"https://ren.tv/live",
"https://media.1tv.ru/embed/nmg/nmg-ren.html",
"https://www.5-tv.ru/live/",
"https://media.1tv.ru/embed/nmg/nmg-5tv.html",
]
for url in regex_test_list:
self.assertTrue(OneTV.can_handle_url(url))
| import unittest
from plugins.onetv import OneTV
class TestPluginPerviyKanal(unittest.TestCase):
def test_can_handle_url(self):
regex_test_list = [
"https://media.1tv.ru/embed/ctcmedia/ctc-che.html?start=auto",
"https://media.1tv.ru/embed/ctcmedia/ctc-dom.html?start=auto",
"https://media.1tv.ru/embed/ctcmedia/ctc-love.html?start=auto",
"https://stream.1tv.ru/live",
"https://www.1tv.ru/embedlive?start=auto",
"https://www.1tv.ru/live",
"https://www.chetv.ru/online/",
"https://www.ctc.ru/online/",
"https://www.ctclove.ru/online/",
"https://domashniy.ru/online",
"https://ren.tv/live",
"https://media.1tv.ru/embed/nmg/nmg-ren.html",
"https://www.5-tv.ru/live/",
"https://media.1tv.ru/embed/nmg/nmg-5tv.html",
]
for url in regex_test_list:
self.assertTrue(OneTV.can_handle_url(url))
| none | 1 | 2.790812 | 3 |
tests/test_plugins/pytester_example_dir/test_file_1.py | MORSECorp/snappiershot | 27 | 14961 | """ This is a test file used for testing the pytest plugin. """
def test_function_passed(snapshot):
""" The snapshot for this function is expected to exist. """
snapshot.assert_match(3 + 4j)
def test_function_new(snapshot):
""" The snapshot for this function is expected to exist, but only one assertion is expected. """
snapshot.assert_match(3 + 4j)
snapshot.assert_match(3 + 4j)
| """ This is a test file used for testing the pytest plugin. """
def test_function_passed(snapshot):
""" The snapshot for this function is expected to exist. """
snapshot.assert_match(3 + 4j)
def test_function_new(snapshot):
""" The snapshot for this function is expected to exist, but only one assertion is expected. """
snapshot.assert_match(3 + 4j)
snapshot.assert_match(3 + 4j)
| pt | 0.152163 | 1.650558 | 2 |
src/compas/geometry/pointclouds/pointcloud.py | Sam-Bouten/compas | 0 | 14962 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from random import uniform
from compas.geometry import transform_points
from compas.geometry import centroid_points
from compas.geometry import bounding_box
from compas.geometry import Primitive
from compas.geometry import Point
__all__ = ['Pointcloud']
class Pointcloud(Primitive):
"""Class for working with pointclouds.
Parameters
----------
points : sequence[point]
A sequence of points to add to the cloud.
**kwargs : dict[str, Any], optional
Additional keyword arguments collected in a dict.
Attributes
----------
points : list[:class:`compas.geometry.Point`]
The points of the cloud.
Examples
--------
>>>
"""
def __init__(self, points, **kwargs):
super(Pointcloud, self).__init__(**kwargs)
self._points = None
self.points = points
@property
def DATASCHEMA(self):
from schema import Schema
from compas.data import is_float3
return Schema({
'points': lambda points: all(is_float3(point) for point in points)
})
@property
def JSONSCHEMANAME(self):
return 'pointcloud'
@property
def data(self):
return {'points': [point.data for point in self.points]}
@data.setter
def data(self, data):
self._points = [Point.from_data(point) for point in data['points']]
@classmethod
def from_data(cls, data):
return cls(data['points'])
# ==========================================================================
# properties
# ==========================================================================
@property
def points(self):
return self._points
@points.setter
def points(self, points):
self._points = [Point(*point) for point in points]
@property
def centroid(self):
return centroid_points(self.points)
@property
def bounding_box(self):
return bounding_box(self.points)
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return 'Pointcloud({0!r})'.format(self.points)
def __len__(self):
return len(self.points)
def __getitem__(self, key):
if key > len(self) - 1:
raise KeyError
return self.points[key]
def __setitem__(self, key, value):
if key > len(self) - 1:
raise KeyError
self.points[key] = value
def __iter__(self):
return iter(self.points)
def __eq__(self, other):
"""Is this pointcloud equal to the other pointcloud?
Two pointclouds are considered equal if they have the same number of points
and if the XYZ coordinates of the corresponding points are identical.
Parameters
----------
other : :class:`compas.geometry.Pointcloud` | list[[float, float, float] | :class:`compas.geometry.Point`]
The pointcloud to compare.
Returns
-------
bool
True if the pointclouds are equal.
False otherwise.
"""
if len(self) != len(other):
return False
A = sorted(self, key=lambda point: (point[0], point[1], point[2]))
B = sorted(other, key=lambda point: (point[0], point[1], point[2]))
return all(a == b for a, b in zip(A, B))
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_ply(cls, filepath):
"""Construct a pointcloud from a PLY file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PLY file.
Returns
-------
:class:`compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_pcd(cls, filepath):
"""Construct a pointcloud from a PCD file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PCD file.
Returns
-------
:class:`compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_bounds(cls, x, y, z, n):
"""Construct a point cloud within a given box.
Parameters
----------
x : float | tuple[float, float]
Size of the cloud in the X direction.
If a single value, the size is (0, x).
If a pair of values, the size is (x[0], x[1]).
y : float | tuple[float, float]
Size of the cloud in the Y direction.
If a single value, the size is (0, y).
If a pair of values, the size is (y[0], y[1]).
z : float | tuple[float, float]
Size of the cloud in the Z direction.
If a single value, the size is (0, z).
If a pair of values, the size is (z[0], z[1]).
n : int
The number of points in the cloud.
Returns
-------
:class:`compas.geometry.Pointcloud`
Notes
-----
The XYZ coordinates of the `n` points are radnomly chosen within the provided `x`, `y`, and `z` bounds.
Thererefor, there is no guarantee that the bounds are part of the resulting coordinates.
Examples
--------
>>>
"""
try:
len(x)
except TypeError:
xmin = 0
xmax = x
else:
xmin, xmax = x
try:
len(y)
except TypeError:
ymin = 0
ymax = y
else:
ymin, ymax = y
try:
len(z)
except TypeError:
zmin = 0
zmax = z
else:
zmin, zmax = z
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
@classmethod
def from_box(cls, box, n):
"""Construct a point cloud within a given box.
Parameters
----------
box: :class:`compas.geometry.Box`
The axis aligned bounding box of the cloud.
n: int
The number of points in the cloud.
Returns
-------
:class:`compas.geometry.Pointcloud`
Examples
--------
>>> from compas.geometry import Box
>>> cloud = Pointcloud.from_box(Box.from_width_height_depth(10, 3, 5), 100)
>>> all((-5 < x < +5) and (-2.5 < y < +2.5) and (-1.5 < z < +1.5) for x, y, z in cloud.points)
True
"""
points = box.points
x, y, z = zip(*points)
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
zmin, zmax = min(z), max(z)
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
# ==========================================================================
# methods
# ==========================================================================
def transform(self, T):
"""Apply a transformation to the pointcloud.
Parameters
----------
T : :class:`compas.geometry.Transformation`
The transformation.
Returns
-------
None
The cloud is modified in place.
"""
for index, point in enumerate(transform_points(self.points, T)):
self.points[index].x = point[0]
self.points[index].y = point[1]
self.points[index].z = point[2]
| from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from random import uniform
from compas.geometry import transform_points
from compas.geometry import centroid_points
from compas.geometry import bounding_box
from compas.geometry import Primitive
from compas.geometry import Point
__all__ = ['Pointcloud']
class Pointcloud(Primitive):
"""Class for working with pointclouds.
Parameters
----------
points : sequence[point]
A sequence of points to add to the cloud.
**kwargs : dict[str, Any], optional
Additional keyword arguments collected in a dict.
Attributes
----------
points : list[:class:`compas.geometry.Point`]
The points of the cloud.
Examples
--------
>>>
"""
def __init__(self, points, **kwargs):
super(Pointcloud, self).__init__(**kwargs)
self._points = None
self.points = points
@property
def DATASCHEMA(self):
from schema import Schema
from compas.data import is_float3
return Schema({
'points': lambda points: all(is_float3(point) for point in points)
})
@property
def JSONSCHEMANAME(self):
return 'pointcloud'
@property
def data(self):
return {'points': [point.data for point in self.points]}
@data.setter
def data(self, data):
self._points = [Point.from_data(point) for point in data['points']]
@classmethod
def from_data(cls, data):
return cls(data['points'])
# ==========================================================================
# properties
# ==========================================================================
@property
def points(self):
return self._points
@points.setter
def points(self, points):
self._points = [Point(*point) for point in points]
@property
def centroid(self):
return centroid_points(self.points)
@property
def bounding_box(self):
return bounding_box(self.points)
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return 'Pointcloud({0!r})'.format(self.points)
def __len__(self):
return len(self.points)
def __getitem__(self, key):
if key > len(self) - 1:
raise KeyError
return self.points[key]
def __setitem__(self, key, value):
if key > len(self) - 1:
raise KeyError
self.points[key] = value
def __iter__(self):
return iter(self.points)
def __eq__(self, other):
"""Is this pointcloud equal to the other pointcloud?
Two pointclouds are considered equal if they have the same number of points
and if the XYZ coordinates of the corresponding points are identical.
Parameters
----------
other : :class:`compas.geometry.Pointcloud` | list[[float, float, float] | :class:`compas.geometry.Point`]
The pointcloud to compare.
Returns
-------
bool
True if the pointclouds are equal.
False otherwise.
"""
if len(self) != len(other):
return False
A = sorted(self, key=lambda point: (point[0], point[1], point[2]))
B = sorted(other, key=lambda point: (point[0], point[1], point[2]))
return all(a == b for a, b in zip(A, B))
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_ply(cls, filepath):
"""Construct a pointcloud from a PLY file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PLY file.
Returns
-------
:class:`compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_pcd(cls, filepath):
"""Construct a pointcloud from a PCD file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PCD file.
Returns
-------
:class:`compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_bounds(cls, x, y, z, n):
"""Construct a point cloud within a given box.
Parameters
----------
x : float | tuple[float, float]
Size of the cloud in the X direction.
If a single value, the size is (0, x).
If a pair of values, the size is (x[0], x[1]).
y : float | tuple[float, float]
Size of the cloud in the Y direction.
If a single value, the size is (0, y).
If a pair of values, the size is (y[0], y[1]).
z : float | tuple[float, float]
Size of the cloud in the Z direction.
If a single value, the size is (0, z).
If a pair of values, the size is (z[0], z[1]).
n : int
The number of points in the cloud.
Returns
-------
:class:`compas.geometry.Pointcloud`
Notes
-----
The XYZ coordinates of the `n` points are radnomly chosen within the provided `x`, `y`, and `z` bounds.
Thererefor, there is no guarantee that the bounds are part of the resulting coordinates.
Examples
--------
>>>
"""
try:
len(x)
except TypeError:
xmin = 0
xmax = x
else:
xmin, xmax = x
try:
len(y)
except TypeError:
ymin = 0
ymax = y
else:
ymin, ymax = y
try:
len(z)
except TypeError:
zmin = 0
zmax = z
else:
zmin, zmax = z
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
@classmethod
def from_box(cls, box, n):
"""Construct a point cloud within a given box.
Parameters
----------
box: :class:`compas.geometry.Box`
The axis aligned bounding box of the cloud.
n: int
The number of points in the cloud.
Returns
-------
:class:`compas.geometry.Pointcloud`
Examples
--------
>>> from compas.geometry import Box
>>> cloud = Pointcloud.from_box(Box.from_width_height_depth(10, 3, 5), 100)
>>> all((-5 < x < +5) and (-2.5 < y < +2.5) and (-1.5 < z < +1.5) for x, y, z in cloud.points)
True
"""
points = box.points
x, y, z = zip(*points)
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
zmin, zmax = min(z), max(z)
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
# ==========================================================================
# methods
# ==========================================================================
def transform(self, T):
"""Apply a transformation to the pointcloud.
Parameters
----------
T : :class:`compas.geometry.Transformation`
The transformation.
Returns
-------
None
The cloud is modified in place.
"""
for index, point in enumerate(transform_points(self.points, T)):
self.points[index].x = point[0]
self.points[index].y = point[1]
self.points[index].z = point[2]
| ceb | 0.225904 | 2.555447 | 3 |
oa/regex.py | Worteks/OrangeAssassin | 0 | 14963 | """Handle regex conversions."""
from builtins import object
import re
import operator
from functools import reduce
import oa.errors
# Map of perl flags and the corresponding re ones.
FLAGS = {
"i": re.IGNORECASE,
"s": re.DOTALL,
"m": re.MULTILINE,
"x": re.VERBOSE,
}
DELIMS = {
"/": "/",
"{": "}",
"%": "%",
"<": ">",
"'": "'",
"~": "~",
",": ",",
"!": "!",
";": ";",
}
# Regex substitution for Perl -> Python compatibility
_CONVERTS = (
(re.compile(r"""
# Python does not support local extensions so remove those. For example:
# (?i:test) becomes (?:test)
(?<=\(\?) # Look-behind and match (?
(([adlupimsx-]*?)|(\^[?^alupimsx]*?)) # Capture the extension
(?=:) # Look-ahead and match the :
""", re.VERBOSE), r""),
(re.compile(r"""
# Python doesn't have support for expression such as \b?
# Replace it with (\b)?
(\\b) # Capture group that matches \b or \B
(?=\?) # Look-ahead that matches ?
""", re.VERBOSE | re.IGNORECASE), r"(\1)"),
(re.compile(r"""
# Python doesn't have support for "independent" subexpression (?>)
# Replace those with non capturing groups (?:)
(?<=\(\?) # Look-behind and match (?
(>) # Match >
""", re.VERBOSE), r":"),
)
class Pattern(object):
"""Abstract class for rule regex matching."""
def __init__(self, pattern):
self._pattern = pattern
def match(self, text):
raise NotImplementedError()
class MatchPattern(Pattern):
"""This pattern does a search on the text and returns either 1 or 0."""
def match(self, text):
return 1 if self._pattern.search(text) else 0
class NotMatchPattern(Pattern):
"""This pattern does a search on the text and returns either 1 or 0."""
def match(self, text):
return 0 if self._pattern.search(text) else 1
def perl2re(pattern, match_op="=~"):
"""Convert a Perl type regex to a Python one."""
# We don't need to consider the pre-flags
pattern = pattern.strip().lstrip("mgs")
delim = pattern[0]
try:
rev_delim = DELIMS[delim]
except KeyError:
raise oa.errors.InvalidRegex("Invalid regex delimiter %r in %r" %
(delim, pattern))
try:
pattern, flags_str = pattern.lstrip(delim).rsplit(rev_delim, 1)
except ValueError:
raise oa.errors.InvalidRegex("Invalid regex %r. Please make sure you "
"have escaped all the special characters "
"when you defined the regex in "
"configuration file" % pattern)
for conv_p, repl in _CONVERTS:
pattern = conv_p.sub(repl, pattern)
flags = reduce(operator.or_, (FLAGS.get(flag, 0) for flag in flags_str), 0)
try:
if match_op == "=~":
return MatchPattern(re.compile(pattern, flags))
elif match_op == "!~": return NotMatchPattern(re.compile(pattern, flags))
except re.error as e:
raise oa.errors.InvalidRegex("Invalid regex %r: %s" % (pattern, e))
class Regex(object):
"""Customised regex class to work in lazy mode"""
compiled = None
def __init__(self, pattern, flags=0):
self.pattern = pattern
self.flags = flags
def compile(self):
from oa.config import LAZY_MODE
if LAZY_MODE:
return re.compile(self.pattern, self.flags)
elif not self.compiled:
self.compiled = re.compile(self.pattern, self.flags)
return self.compiled
def search(self, string):
return self.compile().search(string)
def match(self, string):
return self.compile().match(string)
def fullmatch(self, string):
return self.compile().fullmatch(string)
def sub(self, repl, string, count=0):
return self.compile().sub(repl, string, count)
def subn(self, repl, string, count=0):
return self.compile().sub(repl, string, count)
def split(self, string, maxsplit=0):
return self.compile().split(string, maxsplit)
def findall(self, string):
return self.compile().findall(string)
def finditer(self, string):
return self.compile().finditer(string)
| """Handle regex conversions."""
from builtins import object
import re
import operator
from functools import reduce
import oa.errors
# Map of perl flags and the corresponding re ones.
FLAGS = {
"i": re.IGNORECASE,
"s": re.DOTALL,
"m": re.MULTILINE,
"x": re.VERBOSE,
}
DELIMS = {
"/": "/",
"{": "}",
"%": "%",
"<": ">",
"'": "'",
"~": "~",
",": ",",
"!": "!",
";": ";",
}
# Regex substitution for Perl -> Python compatibility
_CONVERTS = (
(re.compile(r"""
# Python does not support local extensions so remove those. For example:
# (?i:test) becomes (?:test)
(?<=\(\?) # Look-behind and match (?
(([adlupimsx-]*?)|(\^[?^alupimsx]*?)) # Capture the extension
(?=:) # Look-ahead and match the :
""", re.VERBOSE), r""),
(re.compile(r"""
# Python doesn't have support for expression such as \b?
# Replace it with (\b)?
(\\b) # Capture group that matches \b or \B
(?=\?) # Look-ahead that matches ?
""", re.VERBOSE | re.IGNORECASE), r"(\1)"),
(re.compile(r"""
# Python doesn't have support for "independent" subexpression (?>)
# Replace those with non capturing groups (?:)
(?<=\(\?) # Look-behind and match (?
(>) # Match >
""", re.VERBOSE), r":"),
)
class Pattern(object):
"""Abstract class for rule regex matching."""
def __init__(self, pattern):
self._pattern = pattern
def match(self, text):
raise NotImplementedError()
class MatchPattern(Pattern):
"""This pattern does a search on the text and returns either 1 or 0."""
def match(self, text):
return 1 if self._pattern.search(text) else 0
class NotMatchPattern(Pattern):
"""This pattern does a search on the text and returns either 1 or 0."""
def match(self, text):
return 0 if self._pattern.search(text) else 1
def perl2re(pattern, match_op="=~"):
"""Convert a Perl type regex to a Python one."""
# We don't need to consider the pre-flags
pattern = pattern.strip().lstrip("mgs")
delim = pattern[0]
try:
rev_delim = DELIMS[delim]
except KeyError:
raise oa.errors.InvalidRegex("Invalid regex delimiter %r in %r" %
(delim, pattern))
try:
pattern, flags_str = pattern.lstrip(delim).rsplit(rev_delim, 1)
except ValueError:
raise oa.errors.InvalidRegex("Invalid regex %r. Please make sure you "
"have escaped all the special characters "
"when you defined the regex in "
"configuration file" % pattern)
for conv_p, repl in _CONVERTS:
pattern = conv_p.sub(repl, pattern)
flags = reduce(operator.or_, (FLAGS.get(flag, 0) for flag in flags_str), 0)
try:
if match_op == "=~":
return MatchPattern(re.compile(pattern, flags))
elif match_op == "!~": return NotMatchPattern(re.compile(pattern, flags))
except re.error as e:
raise oa.errors.InvalidRegex("Invalid regex %r: %s" % (pattern, e))
class Regex(object):
"""Customised regex class to work in lazy mode"""
compiled = None
def __init__(self, pattern, flags=0):
self.pattern = pattern
self.flags = flags
def compile(self):
from oa.config import LAZY_MODE
if LAZY_MODE:
return re.compile(self.pattern, self.flags)
elif not self.compiled:
self.compiled = re.compile(self.pattern, self.flags)
return self.compiled
def search(self, string):
return self.compile().search(string)
def match(self, string):
return self.compile().match(string)
def fullmatch(self, string):
return self.compile().fullmatch(string)
def sub(self, repl, string, count=0):
return self.compile().sub(repl, string, count)
def subn(self, repl, string, count=0):
return self.compile().sub(repl, string, count)
def split(self, string, maxsplit=0):
return self.compile().split(string, maxsplit)
def findall(self, string):
return self.compile().findall(string)
def finditer(self, string):
return self.compile().finditer(string)
| pt | 0.15604 | 2.808977 | 3 |
test_data/barometer_kalman.py | theo-brown/ahrs | 1 | 14964 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from kalman_filter import KalmanFilter
raw_data = np.loadtxt("barometer_data.txt")
# Truncate raw data (it's super long)
raw_data = raw_data[:raw_data.size//4]
raw_data_step = np.loadtxt("barometer_data_step.txt")
t1 = np.arange(0, raw_data.size/12.5, 1/12.5)
t2 = np.arange(0, raw_data_step.size/12.5, 1/12.5)
fig1 = plt.figure("Data")
ax1 = fig1.add_subplot(121)
ax2 = fig1.add_subplot(122)
fig1.subplots_adjust(bottom=0.25)
[unfiltered_raw_line] = ax1.plot(t1, raw_data)
[unfiltered__step_line] = ax2.plot(t2, raw_data_step)
def filter_data(data, x0, P, Q, R):
filter1 = KalmanFilter(x0, P, 1, 0, 1, Q, R)
x_out = np.zeros(data.size)
P_out = np.zeros(data.size)
for k in np.arange(1, data.size):
x_out[k], P_out[k] = filter1.update(0, data[k])
return x_out, P_out
P0 = 2
Q0 = 1e-4
[filtered_raw_line] = ax1.plot(t1, filter_data(raw_data, 0, P0, Q0, R=raw_data.var())[0])
[filtered_step_line] = ax2.plot(t2, filter_data(raw_data_step, 0, P0, Q0, R=raw_data.var())[0])
P_slider_ax = fig1.add_axes([0.25, 0.15, 0.65, 0.03])
Q_slider_ax = fig1.add_axes([0.25, 0.1, 0.65, 0.03])
P_slider = Slider(P_slider_ax, 'P', 0.5, 5, valinit=P0)
Q_slider = Slider(Q_slider_ax, 'Q', 1e-4, 1e-3, valinit=Q0)
def sliders_on_changed(val):
P = P_slider.val
Q = Q_slider.val
x_raw_new, P_raw_new = filter_data(raw_data, 0, P, Q, R=raw_data.var())
filtered_raw_line.set_ydata(x_raw_new)
x_step_new, P_step_new = filter_data(raw_data_step, 0, P, Q, R=raw_data.var())
filtered_step_line.set_ydata(x_step_new)
P_slider.on_changed(sliders_on_changed)
Q_slider.on_changed(sliders_on_changed)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from kalman_filter import KalmanFilter
raw_data = np.loadtxt("barometer_data.txt")
# Truncate raw data (it's super long)
raw_data = raw_data[:raw_data.size//4]
raw_data_step = np.loadtxt("barometer_data_step.txt")
t1 = np.arange(0, raw_data.size/12.5, 1/12.5)
t2 = np.arange(0, raw_data_step.size/12.5, 1/12.5)
fig1 = plt.figure("Data")
ax1 = fig1.add_subplot(121)
ax2 = fig1.add_subplot(122)
fig1.subplots_adjust(bottom=0.25)
[unfiltered_raw_line] = ax1.plot(t1, raw_data)
[unfiltered__step_line] = ax2.plot(t2, raw_data_step)
def filter_data(data, x0, P, Q, R):
filter1 = KalmanFilter(x0, P, 1, 0, 1, Q, R)
x_out = np.zeros(data.size)
P_out = np.zeros(data.size)
for k in np.arange(1, data.size):
x_out[k], P_out[k] = filter1.update(0, data[k])
return x_out, P_out
P0 = 2
Q0 = 1e-4
[filtered_raw_line] = ax1.plot(t1, filter_data(raw_data, 0, P0, Q0, R=raw_data.var())[0])
[filtered_step_line] = ax2.plot(t2, filter_data(raw_data_step, 0, P0, Q0, R=raw_data.var())[0])
P_slider_ax = fig1.add_axes([0.25, 0.15, 0.65, 0.03])
Q_slider_ax = fig1.add_axes([0.25, 0.1, 0.65, 0.03])
P_slider = Slider(P_slider_ax, 'P', 0.5, 5, valinit=P0)
Q_slider = Slider(Q_slider_ax, 'Q', 1e-4, 1e-3, valinit=Q0)
def sliders_on_changed(val):
P = P_slider.val
Q = Q_slider.val
x_raw_new, P_raw_new = filter_data(raw_data, 0, P, Q, R=raw_data.var())
filtered_raw_line.set_ydata(x_raw_new)
x_step_new, P_step_new = filter_data(raw_data_step, 0, P, Q, R=raw_data.var())
filtered_step_line.set_ydata(x_step_new)
P_slider.on_changed(sliders_on_changed)
Q_slider.on_changed(sliders_on_changed)
plt.show()
| en | 0.151844 | 2.903889 | 3 |
number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py | joaojunior/hackerrank | 0 | 14965 | import heapq
from typing import List
class Solution:
def get_number_of_backlog_orders(self, orders: List[List[int]]) -> int:
sell_backlog = []
buy_backlog = []
for price, amount, order_type in orders:
if order_type == 0:
while amount > 0:
if sell_backlog and sell_backlog[0][0] <= price:
sell_price, sell_amount = heapq.heappop(sell_backlog)
if sell_amount > amount:
heapq.heappush(sell_backlog,
(sell_price, sell_amount - amount))
amount = 0
else:
amount -= sell_amount
else:
heapq.heappush(buy_backlog, (-price, amount))
amount = 0
else:
while amount > 0:
if buy_backlog and -buy_backlog[0][0] >= price:
buy_price, buy_amount = heapq.heappop(buy_backlog)
if buy_amount > amount:
heapq.heappush(buy_backlog,
(buy_price, buy_amount - amount))
amount = 0
else:
amount -= buy_amount
else:
heapq.heappush(sell_backlog, (price, amount))
amount = 0
result = 0
for _, amount in sell_backlog:
result += amount
for _, amount in buy_backlog:
result += amount
return result % (10**9 + 7)
| import heapq
from typing import List
class Solution:
def get_number_of_backlog_orders(self, orders: List[List[int]]) -> int:
sell_backlog = []
buy_backlog = []
for price, amount, order_type in orders:
if order_type == 0:
while amount > 0:
if sell_backlog and sell_backlog[0][0] <= price:
sell_price, sell_amount = heapq.heappop(sell_backlog)
if sell_amount > amount:
heapq.heappush(sell_backlog,
(sell_price, sell_amount - amount))
amount = 0
else:
amount -= sell_amount
else:
heapq.heappush(buy_backlog, (-price, amount))
amount = 0
else:
while amount > 0:
if buy_backlog and -buy_backlog[0][0] >= price:
buy_price, buy_amount = heapq.heappop(buy_backlog)
if buy_amount > amount:
heapq.heappush(buy_backlog,
(buy_price, buy_amount - amount))
amount = 0
else:
amount -= buy_amount
else:
heapq.heappush(sell_backlog, (price, amount))
amount = 0
result = 0
for _, amount in sell_backlog:
result += amount
for _, amount in buy_backlog:
result += amount
return result % (10**9 + 7)
| none | 1 | 3.453799 | 3 |
scripts/plotresults.py | rafzi/DeepThings | 1 | 14966 | <gh_stars>1-10
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# 1: YOLOv2, 2: AlexNet, 3: VGG-16, 4: GoogLeNet
model = 4
LINEPLOT = True
dfs = pd.read_excel("t.xlsx", sheet_name=None, header=None)
if model == 1:
ms = "YOLOv2"
elif model == 2:
ms = "AlexNet"
elif model == 3:
ms = "VGG-16"
elif model == 4:
ms = "GoogLeNet"
sh = dfs[ms]
print(sh)
labels = ["1", "2", "3", "4", "5", "6"]
x = np.arange(len(labels))
plt.rcParams.update({"font.size": 11})
fig, ax = plt.subplots()
plt.subplots_adjust(top=0.95, right=0.95)
# Workaround for this: https://bugs.python.org/issue32790
def fmtFlt(f, digits):
s = ("{:#." + str(digits) + "g}").format(f)
sz = len(s) - 1
if sz < digits:
s += "0"
if s[-1] == ".":
s = s[:-1]
return s
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(fmtFlt(height, 3),
xy=(rect.get_x() + 1.2*rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=90, fontsize=9.5)
def addData(speed, fused):
y = []
lineindex = -4 + (speed)*(13+4)
addindex = 1 if fused else 0
for i in range(0, 6):
y.append(sh[5*2 + addindex][lineindex] / sh[i*2 + addindex][lineindex])
y = np.array(y)# / 1000
y = np.flip(y)
l = ("OWP @ " if fused else "LOP @ ") + \
("1 GBit/s" if speed == 1 else ("100 MBit/s" if speed == 2 else "10 MBit/s"))
color = "C1" if fused else "C0"
if LINEPLOT:
color = "C3" if speed == 1 else ("C4" if speed == 2 else "C1")
#line = "o" if speed == 1 else ("v" if speed == 2 else "s")
line = "o" if fused else "s"
line += "--" if fused else "-"
ax.plot(x, y, line, label=l, color=color)
else:
barw = 0.15
bars = 6
i = 2 * (-speed+4-1) + int(fused)
#patterns = ["\\\\", "//", "||", "--", "..", "OO"]
patterns = ["\\\\", "\\\\", "//", "//", "..", ".."]
g = ax.bar(x + barw/2 - bars/2*barw + i * barw, y, barw, label=l, color=color,
hatch=patterns[i], alpha=0.99)
#autolabel(g)
# 1: 1gbit, 2: 100mbit, 3: 10mbit
addData(1, True)
addData(1, False)
addData(2, True)
addData(2, False)
addData(3, True)
addData(3, False)
#plt.ylim(plt.ylim()*1.1)
ybot, ytop = plt.ylim()
plt.ylim(ybot, ytop*1.05)
ax.set_xlabel("Number of devices")
ax.set_ylabel("Run time speedup over one device")
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
plt.savefig("plot_runtime.pdf")
plt.show()
| import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# 1: YOLOv2, 2: AlexNet, 3: VGG-16, 4: GoogLeNet
model = 4
LINEPLOT = True
dfs = pd.read_excel("t.xlsx", sheet_name=None, header=None)
if model == 1:
ms = "YOLOv2"
elif model == 2:
ms = "AlexNet"
elif model == 3:
ms = "VGG-16"
elif model == 4:
ms = "GoogLeNet"
sh = dfs[ms]
print(sh)
labels = ["1", "2", "3", "4", "5", "6"]
x = np.arange(len(labels))
plt.rcParams.update({"font.size": 11})
fig, ax = plt.subplots()
plt.subplots_adjust(top=0.95, right=0.95)
# Workaround for this: https://bugs.python.org/issue32790
def fmtFlt(f, digits):
s = ("{:#." + str(digits) + "g}").format(f)
sz = len(s) - 1
if sz < digits:
s += "0"
if s[-1] == ".":
s = s[:-1]
return s
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(fmtFlt(height, 3),
xy=(rect.get_x() + 1.2*rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=90, fontsize=9.5)
def addData(speed, fused):
y = []
lineindex = -4 + (speed)*(13+4)
addindex = 1 if fused else 0
for i in range(0, 6):
y.append(sh[5*2 + addindex][lineindex] / sh[i*2 + addindex][lineindex])
y = np.array(y)# / 1000
y = np.flip(y)
l = ("OWP @ " if fused else "LOP @ ") + \
("1 GBit/s" if speed == 1 else ("100 MBit/s" if speed == 2 else "10 MBit/s"))
color = "C1" if fused else "C0"
if LINEPLOT:
color = "C3" if speed == 1 else ("C4" if speed == 2 else "C1")
#line = "o" if speed == 1 else ("v" if speed == 2 else "s")
line = "o" if fused else "s"
line += "--" if fused else "-"
ax.plot(x, y, line, label=l, color=color)
else:
barw = 0.15
bars = 6
i = 2 * (-speed+4-1) + int(fused)
#patterns = ["\\\\", "//", "||", "--", "..", "OO"]
patterns = ["\\\\", "\\\\", "//", "//", "..", ".."]
g = ax.bar(x + barw/2 - bars/2*barw + i * barw, y, barw, label=l, color=color,
hatch=patterns[i], alpha=0.99)
#autolabel(g)
# 1: 1gbit, 2: 100mbit, 3: 10mbit
addData(1, True)
addData(1, False)
addData(2, True)
addData(2, False)
addData(3, True)
addData(3, False)
#plt.ylim(plt.ylim()*1.1)
ybot, ytop = plt.ylim()
plt.ylim(ybot, ytop*1.05)
ax.set_xlabel("Number of devices")
ax.set_ylabel("Run time speedup over one device")
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
plt.savefig("plot_runtime.pdf")
plt.show() | en | 0.135275 | 2.815777 | 3 |
setup.py | oubiwann/myriad-worlds | 3 | 14967 | <reponame>oubiwann/myriad-worlds<gh_stars>1-10
from setuptools import setup, find_packages
from myriad import meta
from myriad.util import dist
setup(
name=meta.display_name,
version=meta.version,
description=meta.description,
long_description=meta.long_description,
author=meta.author,
author_email=meta.author_email,
url=meta.url,
license=meta.license,
packages=find_packages() + ["twisted.plugins"],
package_data={
"twisted": ['plugins/example_server.py']
},
install_requires=meta.requires,
zip_safe=False
)
dist.refresh_plugin_cache()
| from setuptools import setup, find_packages
from myriad import meta
from myriad.util import dist
setup(
name=meta.display_name,
version=meta.version,
description=meta.description,
long_description=meta.long_description,
author=meta.author,
author_email=meta.author_email,
url=meta.url,
license=meta.license,
packages=find_packages() + ["twisted.plugins"],
package_data={
"twisted": ['plugins/example_server.py']
},
install_requires=meta.requires,
zip_safe=False
)
dist.refresh_plugin_cache() | none | 1 | 1.292514 | 1 |
val_resnet.py | AlexKhakhlyuk/fixedconv | 1 | 14968 | from subprocess import run
# python -u val_resnet.py
cuda = 0 # which gpu to use
dataset = 'cifar10'
logs_path = 'logs_resnet' + '_' + dataset
manualSeed = 99
workers = 0
for model in ['resnet20', 'preact_resnet20']:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
run(commands)
for model in ['resnet20', 'preact_resnet20']:
f = True
for k in [1, 3]:
for ff in [False, True]:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-k=' + str(k),
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
if f: commands.append('-f')
if ff: commands.append('--ff')
run(commands)
| from subprocess import run
# python -u val_resnet.py
cuda = 0 # which gpu to use
dataset = 'cifar10'
logs_path = 'logs_resnet' + '_' + dataset
manualSeed = 99
workers = 0
for model in ['resnet20', 'preact_resnet20']:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
run(commands)
for model in ['resnet20', 'preact_resnet20']:
f = True
for k in [1, 3]:
for ff in [False, True]:
commands = [
'python', '-u', 'validate_resnet.py',
'--dataset=' + dataset,
'--model=' + model,
'-k=' + str(k),
'-c=' + str(cuda),
'--workers=' + str(workers),
'--manualSeed=' + str(manualSeed),
'--logs_path=' + logs_path,
]
if f: commands.append('-f')
if ff: commands.append('--ff')
run(commands)
| es | 0.300136 | 1.915976 | 2 |
akagi/data_sources/spreadsheet_data_source.py | pauchan/akagi | 26 | 14969 | from akagi.data_source import DataSource
from akagi.data_file import DataFile
class SpreadsheetDataSource(DataSource):
'''SpreadsheetSource replesents a data on Google Spreadsheets
'''
def __init__(self, sheet_id, sheet_range='A:Z', no_cache=False):
self._sheet_id = sheet_id
self._sheet_range = sheet_range
@property
def data_files(self):
return [DataFile.spreadsheet(self._sheet_id, self._sheet_range)]
| from akagi.data_source import DataSource
from akagi.data_file import DataFile
class SpreadsheetDataSource(DataSource):
'''SpreadsheetSource replesents a data on Google Spreadsheets
'''
def __init__(self, sheet_id, sheet_range='A:Z', no_cache=False):
self._sheet_id = sheet_id
self._sheet_range = sheet_range
@property
def data_files(self):
return [DataFile.spreadsheet(self._sheet_id, self._sheet_range)]
| it | 0.339294 | 2.324613 | 2 |
stanCode_projects/my_drawing/my_drawing.py | kenhuang1204/stanCode_projects | 0 | 14970 | """
File: my_drawing.py
Name: 黃科諺
----------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc
from campy.graphics.gwindow import GWindow
def main():
"""
Meet Snorlax (卡比獸) of stanCode! He dreams of Python when he sleeps. Be like Snorlax.
"""
window = GWindow(width=300, height=300)
face_outer = GOval(120, 75, x=(window.width-120)/2, y=50)
face_outer.filled = True
face_outer.fill_color = 'darkcyan'
face_outer.color = 'darkcyan'
window.add(face_outer)
face_inner = GOval(100, 65, x=(window.width-100)/2, y=60)
face_inner.filled = True
face_inner.fill_color = 'lightsalmon'
face_inner.color = 'lightsalmon'
window.add(face_inner)
forehead = GPolygon()
forehead.add_vertex((135, 60))
forehead.add_vertex((165, 60))
forehead.add_vertex((150, 68))
forehead.filled = True
forehead.fill_color = 'darkcyan'
forehead.color = 'darkcyan'
window.add(forehead)
r_ear = GPolygon()
r_ear.add_vertex((113, 35))
r_ear.add_vertex((95, 75))
r_ear.add_vertex((140, 50))
r_ear.filled = True
r_ear.fill_color = 'darkcyan'
r_ear.color = 'darkcyan'
window.add(r_ear)
l_ear = GPolygon()
l_ear.add_vertex((187, 35))
l_ear.add_vertex((205, 75))
l_ear.add_vertex((160, 50))
l_ear.filled = True
l_ear.fill_color = 'darkcyan'
l_ear.color = 'darkcyan'
window.add(l_ear)
r_eye = GLine (120, 75, 140, 75)
window.add(r_eye)
l_eye = GLine(180, 75, 160, 75)
window.add(l_eye)
mouth = GLine(135, 85, 165, 85)
window.add(mouth)
r_tooth = GPolygon()
r_tooth.add_vertex((135, 84))
r_tooth.add_vertex((139, 84))
r_tooth.add_vertex((137, 80))
r_tooth.filled = True
r_tooth.fill_color = 'white'
r_tooth.color = 'white'
window.add(r_tooth)
l_tooth = GPolygon()
l_tooth.add_vertex((165, 84))
l_tooth.add_vertex((161, 84))
l_tooth.add_vertex((163, 80))
l_tooth.filled = True
l_tooth.fill_color = 'white'
l_tooth.color = 'white'
window.add(l_tooth)
r_arm = GOval(100, 45, x=25, y=98)
r_arm.filled = True
r_arm.fill_color = 'darkcyan'
r_arm.color = 'darkcyan'
window.add(r_arm)
l_arm = GOval(100, 45, x=175, y=98)
l_arm.filled = True
l_arm.fill_color = 'darkcyan'
l_arm.color = 'darkcyan'
window.add(l_arm)
body = GOval(200, 160, x=(window.width - 200) / 2, y=95)
body.filled = True
body.fill_color = 'darkcyan'
body.color = 'darkcyan'
window.add(body)
belly = GOval(176, 120, x=(window.width - 176) / 2, y=95)
belly.filled = True
belly.fill_color = 'lightsalmon'
window.add(belly)
r_claw1 = GPolygon()
r_claw1.add_vertex((38, 100))
r_claw1.add_vertex((44, 102))
r_claw1.add_vertex((40, 106))
r_claw1.filled = True
r_claw1.fill_color = 'white'
window.add(r_claw1)
r_claw2 = GPolygon()
r_claw2.add_vertex((32, 102))
r_claw2.add_vertex((38, 104))
r_claw2.add_vertex((35, 108))
r_claw2.filled = True
r_claw2.fill_color = 'white'
window.add(r_claw2)
r_claw3 = GPolygon()
r_claw3.add_vertex((28, 104))
r_claw3.add_vertex((34, 106))
r_claw3.add_vertex((31, 110))
r_claw3.filled = True
r_claw3.fill_color = 'white'
window.add(r_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((24, 109))
r_claw4.add_vertex((30, 111))
r_claw4.add_vertex((27, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((19, 122))
r_claw5.add_vertex((25, 121))
r_claw5.add_vertex((28, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
l_claw1 = GPolygon()
l_claw1.add_vertex((262, 100))
l_claw1.add_vertex((256, 102))
l_claw1.add_vertex((260, 106))
l_claw1.filled = True
l_claw1.fill_color = 'white'
window.add(l_claw1)
l_claw2 = GPolygon()
l_claw2.add_vertex((268, 102))
l_claw2.add_vertex((262, 104))
l_claw2.add_vertex((265, 108))
l_claw2.filled = True
l_claw2.fill_color = 'white'
window.add(l_claw2)
l_claw3 = GPolygon()
l_claw3.add_vertex((272, 104))
l_claw3.add_vertex((266, 106))
l_claw3.add_vertex((269, 110))
l_claw3.filled = True
l_claw3.fill_color = 'white'
window.add(l_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((276, 109))
r_claw4.add_vertex((270, 111))
r_claw4.add_vertex((273, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((281, 122))
r_claw5.add_vertex((275, 121))
r_claw5.add_vertex((272, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
r_foot = GOval(65, 60, x=50, y=220)
r_foot.filled = True
r_foot.fill_color = 'lightsalmon'
r_foot.color = 'lightsalmon'
window.add(r_foot)
r_palm = GOval(45, 40, x=65, y=235)
r_palm.filled = True
r_palm.fill_color = 'Chocolate'
r_palm.color = 'Chocolate'
window.add(r_palm)
r_nail1 = GPolygon()
r_nail1.add_vertex((80, 210))
r_nail1.add_vertex((88, 223))
r_nail1.add_vertex((78, 224))
r_nail1.filled = True
r_nail1.fill_color = 'white'
window.add(r_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((52, 220))
r_nail2.add_vertex((65, 228))
r_nail2.add_vertex((57, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((43, 250))
r_nail3.add_vertex((54, 248))
r_nail3.add_vertex((52, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
l_foot = GOval(65, 60, x=185, y=220)
l_foot.filled = True
l_foot.fill_color = 'lightsalmon'
l_foot.color = 'lightsalmon'
window.add(l_foot)
l_palm = GOval(45, 40, x=190, y=235)
l_palm.filled = True
l_palm.fill_color = 'Chocolate'
l_palm.color = 'Chocolate'
window.add(l_palm)
l_nail1 = GPolygon()
l_nail1.add_vertex((220, 210))
l_nail1.add_vertex((212, 223))
l_nail1.add_vertex((222, 224))
l_nail1.filled = True
l_nail1.fill_color = 'white'
window.add(l_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((248, 220))
r_nail2.add_vertex((235, 228))
r_nail2.add_vertex((243, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((257, 250))
r_nail3.add_vertex((246, 248))
r_nail3.add_vertex((248, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
word = GLabel('stanCode', x=123, y=185)
word.font = '-8-bold'
window.add(word)
bubble1 = GOval(10, 10, x=140, y=35)
window.add(bubble1)
bubble2 = GOval(15, 15, x=155, y=23)
window.add(bubble2)
bubble3 = GOval(20, 20, x=175, y=12)
window.add(bubble3)
bubble4 = GOval(95, 85, x=200, y=5)
window.add(bubble4)
word2 = GLabel('Python', x=207, y=50)
word2.font = 'Courier-18'
window.add(word2)
word3 = GLabel('Python', x=220, y=80)
word3.font = 'Courier-13'
window.add(word3)
word4 = GLabel('Python', x=242, y=60)
word4.font = 'Courier-8'
window.add(word4)
if __name__ == '__main__':
main()
| """
File: my_drawing.py
Name: 黃科諺
----------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc
from campy.graphics.gwindow import GWindow
def main():
"""
Meet Snorlax (卡比獸) of stanCode! He dreams of Python when he sleeps. Be like Snorlax.
"""
window = GWindow(width=300, height=300)
face_outer = GOval(120, 75, x=(window.width-120)/2, y=50)
face_outer.filled = True
face_outer.fill_color = 'darkcyan'
face_outer.color = 'darkcyan'
window.add(face_outer)
face_inner = GOval(100, 65, x=(window.width-100)/2, y=60)
face_inner.filled = True
face_inner.fill_color = 'lightsalmon'
face_inner.color = 'lightsalmon'
window.add(face_inner)
forehead = GPolygon()
forehead.add_vertex((135, 60))
forehead.add_vertex((165, 60))
forehead.add_vertex((150, 68))
forehead.filled = True
forehead.fill_color = 'darkcyan'
forehead.color = 'darkcyan'
window.add(forehead)
r_ear = GPolygon()
r_ear.add_vertex((113, 35))
r_ear.add_vertex((95, 75))
r_ear.add_vertex((140, 50))
r_ear.filled = True
r_ear.fill_color = 'darkcyan'
r_ear.color = 'darkcyan'
window.add(r_ear)
l_ear = GPolygon()
l_ear.add_vertex((187, 35))
l_ear.add_vertex((205, 75))
l_ear.add_vertex((160, 50))
l_ear.filled = True
l_ear.fill_color = 'darkcyan'
l_ear.color = 'darkcyan'
window.add(l_ear)
r_eye = GLine (120, 75, 140, 75)
window.add(r_eye)
l_eye = GLine(180, 75, 160, 75)
window.add(l_eye)
mouth = GLine(135, 85, 165, 85)
window.add(mouth)
r_tooth = GPolygon()
r_tooth.add_vertex((135, 84))
r_tooth.add_vertex((139, 84))
r_tooth.add_vertex((137, 80))
r_tooth.filled = True
r_tooth.fill_color = 'white'
r_tooth.color = 'white'
window.add(r_tooth)
l_tooth = GPolygon()
l_tooth.add_vertex((165, 84))
l_tooth.add_vertex((161, 84))
l_tooth.add_vertex((163, 80))
l_tooth.filled = True
l_tooth.fill_color = 'white'
l_tooth.color = 'white'
window.add(l_tooth)
r_arm = GOval(100, 45, x=25, y=98)
r_arm.filled = True
r_arm.fill_color = 'darkcyan'
r_arm.color = 'darkcyan'
window.add(r_arm)
l_arm = GOval(100, 45, x=175, y=98)
l_arm.filled = True
l_arm.fill_color = 'darkcyan'
l_arm.color = 'darkcyan'
window.add(l_arm)
body = GOval(200, 160, x=(window.width - 200) / 2, y=95)
body.filled = True
body.fill_color = 'darkcyan'
body.color = 'darkcyan'
window.add(body)
belly = GOval(176, 120, x=(window.width - 176) / 2, y=95)
belly.filled = True
belly.fill_color = 'lightsalmon'
window.add(belly)
r_claw1 = GPolygon()
r_claw1.add_vertex((38, 100))
r_claw1.add_vertex((44, 102))
r_claw1.add_vertex((40, 106))
r_claw1.filled = True
r_claw1.fill_color = 'white'
window.add(r_claw1)
r_claw2 = GPolygon()
r_claw2.add_vertex((32, 102))
r_claw2.add_vertex((38, 104))
r_claw2.add_vertex((35, 108))
r_claw2.filled = True
r_claw2.fill_color = 'white'
window.add(r_claw2)
r_claw3 = GPolygon()
r_claw3.add_vertex((28, 104))
r_claw3.add_vertex((34, 106))
r_claw3.add_vertex((31, 110))
r_claw3.filled = True
r_claw3.fill_color = 'white'
window.add(r_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((24, 109))
r_claw4.add_vertex((30, 111))
r_claw4.add_vertex((27, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((19, 122))
r_claw5.add_vertex((25, 121))
r_claw5.add_vertex((28, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
l_claw1 = GPolygon()
l_claw1.add_vertex((262, 100))
l_claw1.add_vertex((256, 102))
l_claw1.add_vertex((260, 106))
l_claw1.filled = True
l_claw1.fill_color = 'white'
window.add(l_claw1)
l_claw2 = GPolygon()
l_claw2.add_vertex((268, 102))
l_claw2.add_vertex((262, 104))
l_claw2.add_vertex((265, 108))
l_claw2.filled = True
l_claw2.fill_color = 'white'
window.add(l_claw2)
l_claw3 = GPolygon()
l_claw3.add_vertex((272, 104))
l_claw3.add_vertex((266, 106))
l_claw3.add_vertex((269, 110))
l_claw3.filled = True
l_claw3.fill_color = 'white'
window.add(l_claw3)
r_claw4 = GPolygon()
r_claw4.add_vertex((276, 109))
r_claw4.add_vertex((270, 111))
r_claw4.add_vertex((273, 115))
r_claw4.filled = True
r_claw4.fill_color = 'white'
window.add(r_claw4)
r_claw5 = GPolygon()
r_claw5.add_vertex((281, 122))
r_claw5.add_vertex((275, 121))
r_claw5.add_vertex((272, 127))
r_claw5.filled = True
r_claw5.fill_color = 'white'
window.add(r_claw5)
r_foot = GOval(65, 60, x=50, y=220)
r_foot.filled = True
r_foot.fill_color = 'lightsalmon'
r_foot.color = 'lightsalmon'
window.add(r_foot)
r_palm = GOval(45, 40, x=65, y=235)
r_palm.filled = True
r_palm.fill_color = 'Chocolate'
r_palm.color = 'Chocolate'
window.add(r_palm)
r_nail1 = GPolygon()
r_nail1.add_vertex((80, 210))
r_nail1.add_vertex((88, 223))
r_nail1.add_vertex((78, 224))
r_nail1.filled = True
r_nail1.fill_color = 'white'
window.add(r_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((52, 220))
r_nail2.add_vertex((65, 228))
r_nail2.add_vertex((57, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((43, 250))
r_nail3.add_vertex((54, 248))
r_nail3.add_vertex((52, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
l_foot = GOval(65, 60, x=185, y=220)
l_foot.filled = True
l_foot.fill_color = 'lightsalmon'
l_foot.color = 'lightsalmon'
window.add(l_foot)
l_palm = GOval(45, 40, x=190, y=235)
l_palm.filled = True
l_palm.fill_color = 'Chocolate'
l_palm.color = 'Chocolate'
window.add(l_palm)
l_nail1 = GPolygon()
l_nail1.add_vertex((220, 210))
l_nail1.add_vertex((212, 223))
l_nail1.add_vertex((222, 224))
l_nail1.filled = True
l_nail1.fill_color = 'white'
window.add(l_nail1)
r_nail2 = GPolygon()
r_nail2.add_vertex((248, 220))
r_nail2.add_vertex((235, 228))
r_nail2.add_vertex((243, 235))
r_nail2.filled = True
r_nail2.fill_color = 'white'
window.add(r_nail2)
r_nail3 = GPolygon()
r_nail3.add_vertex((257, 250))
r_nail3.add_vertex((246, 248))
r_nail3.add_vertex((248, 258))
r_nail3.filled = True
r_nail3.fill_color = 'white'
window.add(r_nail3)
word = GLabel('stanCode', x=123, y=185)
word.font = '-8-bold'
window.add(word)
bubble1 = GOval(10, 10, x=140, y=35)
window.add(bubble1)
bubble2 = GOval(15, 15, x=155, y=23)
window.add(bubble2)
bubble3 = GOval(20, 20, x=175, y=12)
window.add(bubble3)
bubble4 = GOval(95, 85, x=200, y=5)
window.add(bubble4)
word2 = GLabel('Python', x=207, y=50)
word2.font = 'Courier-18'
window.add(word2)
word3 = GLabel('Python', x=220, y=80)
word3.font = 'Courier-13'
window.add(word3)
word4 = GLabel('Python', x=242, y=60)
word4.font = 'Courier-8'
window.add(word4)
if __name__ == '__main__':
main()
| zh | 0.225849 | 2.859063 | 3 |
kivy/core/clipboard/clipboard_xsel.py | CharaD7/kivy | 2 | 14971 | <reponame>CharaD7/kivy
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard._clipboard_ext import ClipboardExternalBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardExternalBase):
@staticmethod
def _clip(inout, selection):
pipe = {'std' + inout: subprocess.PIPE}
sel = 'b' if selection == 'clipboard' else selection[0]
io = inout[0]
return subprocess.Popen(
['xsel', '-' + sel + io], **pipe)
| '''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard._clipboard_ext import ClipboardExternalBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardExternalBase):
@staticmethod
def _clip(inout, selection):
pipe = {'std' + inout: subprocess.PIPE}
sel = 'b' if selection == 'clipboard' else selection[0]
io = inout[0]
return subprocess.Popen(
['xsel', '-' + sel + io], **pipe) | pt | 0.15587 | 2.785663 | 3 |
plugins/hashsum_download/girder_hashsum_download/settings.py | JKitok/girder | 395 | 14972 | from girder.exceptions import ValidationException
from girder.utility import setting_utilities
class PluginSettings:
AUTO_COMPUTE = 'hashsum_download.auto_compute'
@setting_utilities.default(PluginSettings.AUTO_COMPUTE)
def _defaultAutoCompute():
return False
@setting_utilities.validator(PluginSettings.AUTO_COMPUTE)
def _validateAutoCompute(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Auto-compute hash setting must be true or false.')
| from girder.exceptions import ValidationException
from girder.utility import setting_utilities
class PluginSettings:
AUTO_COMPUTE = 'hashsum_download.auto_compute'
@setting_utilities.default(PluginSettings.AUTO_COMPUTE)
def _defaultAutoCompute():
return False
@setting_utilities.validator(PluginSettings.AUTO_COMPUTE)
def _validateAutoCompute(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Auto-compute hash setting must be true or false.')
| none | 1 | 2.412325 | 2 |
forager_server/forager_server_api/views.py | jeremyephron/forager | 1 | 14973 | from collections import defaultdict, namedtuple
from dataclasses import dataclass
import distutils.util
import functools
import itertools
import json
import math
import operator
import os
import random
import uuid
import shutil
import logging
import time
from typing import List, Dict, NamedTuple, Optional
from django.db.models import Q
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, get_list_or_404
from django.conf import settings
from google.cloud import storage
from rest_framework.decorators import api_view
import requests
from expiringdict import ExpiringDict
from .models import (
Dataset,
DatasetItem,
Category,
Mode,
User,
Annotation,
DNNModel,
CategoryCount,
)
BUILTIN_MODES = ["POSITIVE", "NEGATIVE", "HARD_NEGATIVE", "UNSURE"]
logger = logging.getLogger(__name__)
@api_view(["POST"])
@csrf_exempt
def start_cluster(request):
# TODO(mihirg): Remove this setting from Django; it's now managed by Terraform
# (or figure out how to set it from the frontend if we need that)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_cluster",
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"cluster_id": response_data["cluster_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_cluster_status(request, cluster_id):
params = {"cluster_id": cluster_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/cluster_status", params=params
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_cluster(request, cluster_id):
params = {"cluster_id": cluster_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_cluster",
json=params,
)
return JsonResponse(
{
"status": "success",
}
)
@api_view(["POST"])
@csrf_exempt
def create_model(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_name = payload["model_name"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
pos_tags = parse_tag_set_from_query_v2(payload["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(payload["neg_tags"])
val_pos_tags = parse_tag_set_from_query_v2(payload["val_pos_tags"])
val_neg_tags = parse_tag_set_from_query_v2(payload["val_neg_tags"])
augment_negs = bool(payload["augment_negs"])
model_kwargs = payload["model_kwargs"]
resume_model_id = payload.get("resume", None)
dataset = get_object_or_404(Dataset, name=dataset_name)
eligible_images = DatasetItem.objects.filter(dataset=dataset, is_val=False)
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, val_pos_tags, val_neg_tags)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_images,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
val_pos_dataset_item_pks = []
val_neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags for t in tags):
neg_dataset_item_pks.append(pk)
elif any(t in val_pos_tags for t in tags):
val_pos_dataset_item_pks.append(pk)
elif any(t in val_neg_tags for t in tags):
val_neg_dataset_item_pks.append(pk)
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.BGSPLIT_NUM_NEGS_MULTIPLIER * len(
pos_dataset_item_pks
) - len(neg_dataset_item_pks)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from request
all_eligible_pks = filtered_images_v2(
request,
dataset,
exclude_pks=(
pos_dataset_item_pks
+ neg_dataset_item_pks
+ val_pos_dataset_item_pks
+ val_neg_dataset_item_pks
),
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
if resume_model_id:
resume_model = get_object_or_404(DNNModel, model_id=resume_model_id)
resume_model_path = resume_model.checkpoint_path
else:
resume_model = None
resume_model_path = None
params = {
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"val_pos_identifiers": val_pos_dataset_item_internal_identifiers,
"val_neg_identifiers": val_neg_dataset_item_internal_identifiers,
"augment_negs": augment_negs,
"model_kwargs": model_kwargs,
"model_name": model_name,
"bucket": bucket_name,
"cluster_id": cluster_id,
"index_id": index_id,
"resume_from": resume_model_path,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_job",
json=params,
)
response_data = r.json()
if r.status_code != 200:
return JsonResponse(
{"status": "failure", "reason": response_data.get("reason", "")},
status=r.status_code,
)
m = DNNModel(
dataset=dataset,
name=model_name,
model_id=response_data["model_id"],
category_spec={
"augment_negs": augment_negs,
"pos_tags": payload["pos_tags"],
"neg_tags": payload["neg_tags"],
"augment_negs_include": payload.get("include", []) if augment_negs else [],
"augment_negs_exclude": payload.get("exclude", []) if augment_negs else [],
},
)
model_epoch = -1 + model_kwargs.get("epochs_to_run", 1)
if resume_model_id:
m.resume_model_id = resume_model_id
if model_kwargs.get("resume_training", False):
model_epoch += resume_model.epoch + 1
m.epoch = model_epoch
m.save()
return JsonResponse(
{
"status": "success",
"model_id": response_data["model_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_status(request, model_id):
params = {"model_id": model_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_job_status", params=params
)
response_data = r.json()
if response_data["has_model"]:
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.checkpoint_path = response_data["checkpoint_path"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def update_model_v2(request):
payload = json.loads(request.body)
# user = payload["user"]
old_model_name = payload["old_model_name"]
new_model_name = payload["new_model_name"]
models = get_list_or_404(DNNModel, name=old_model_name)
for m in models:
m.name = new_model_name
m.save()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def delete_model_v2(request):
payload = json.loads(request.body)
model_name = payload["model_name"]
# cluster_id = payload['cluster_id']
models = get_list_or_404(DNNModel, name=model_name)
for m in models:
# TODO(fpoms): delete model data stored on NFS?
# shutil.rmtree(os.path.join(m.checkpoint_path, '..'))
shutil.rmtree(m.output_directory, ignore_errors=True)
m.delete()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def run_model_inference(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_id = payload["model_id"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
dataset = get_object_or_404(Dataset, name=dataset_name)
model_checkpoint_path = get_object_or_404(
DNNModel, model_id=model_id
).checkpoint_path
if model_checkpoint_path is None or len(model_checkpoint_path) == 0:
return JsonResponse(
{
"status": "failure",
"reason": f"Model {model_id} does not have a model checkpoint.",
},
status=400,
)
params = {
"bucket": bucket_name,
"model_id": model_id,
"checkpoint_path": model_checkpoint_path,
"cluster_id": cluster_id,
"index_id": index_id,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_inference_job",
json=params,
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"job_id": response_data["job_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_inference_status(request, job_id):
params = {"job_id": job_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_inference_job_status",
params=params,
)
response_data = r.json()
if response_data["has_output"]:
model_id = response_data["model_id"]
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.output_directory = response_data["output_dir"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_model_inference(request, job_id):
params = {"job_id": job_id}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_bgsplit_inference_job", json=params
)
response_data = r.json()
return JsonResponse(response_data, status=r.status_code)
#
# V2 ENDPOINTS
# TODO(mihirg): Make these faster
#
Tag = namedtuple("Tag", "category value") # type: NamedTuple[str, str]
Box = namedtuple(
"Box", "category value x1 y1 x2 y2"
) # type: NamedTuple[str, str, float, float, float, float]
PkType = int
@dataclass
class ResultSet:
type: str
ranking: List[PkType]
distances: List[float]
model: Optional[str]
# TODO(fpoms): this needs to be wrapped in a lock so that
# updates are atomic across concurrent requests
current_result_sets = ExpiringDict(
max_age_seconds=30 * 60,
max_len=50,
) # type: Dict[str, ResultSet]
def parse_tag_set_from_query_v2(s):
if isinstance(s, list):
parts = s
elif isinstance(s, str) and s:
parts = s.split(",")
else:
parts = []
ts = set()
for part in parts:
if not part:
continue
category, value_str = part.split(":")
ts.add(Tag(category, value_str))
return ts
def tag_sets_to_query(*tagsets):
merged = set().union(*tagsets)
if not merged:
return Q()
return Q(
annotation__in=Annotation.objects.filter(
functools.reduce(
operator.or_,
[Q(category__name=t.category, mode__name=t.value) for t in merged],
)
)
)
def serialize_tag_set_for_client_v2(ts):
return [{"category": t.category, "value": t.value} for t in sorted(list(ts))]
def serialize_boxes_for_client_v2(bs):
return [
{
"category": b.category,
"value": b.value,
"x1": b.x1,
"y1": b.y1,
"x2": b.x2,
"y2": b.y2,
}
for b in sorted(list(bs))
]
def get_tags_from_annotations_v2(annotations):
tags_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=False)
ann_dicts = annotations.values("dataset_item__pk", "category__name", "mode__name")
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
tags_by_pk[pk].append(Tag(category, mode))
return tags_by_pk
def get_boxes_from_annotations_v2(annotations):
boxes_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=True)
ann_dicts = annotations.values(
"dataset_item__pk",
"category__name",
"mode__name",
"bbox_x1",
"bbox_y1",
"bbox_x2",
"bbox_y2",
)
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
box = (ann["bbox_x1"], ann["bbox_y1"], ann["bbox_x2"], ann["bbox_y2"])
boxes_by_pk[pk].append(Box(category, mode, *box))
return boxes_by_pk
def filtered_images_v2(request, dataset, exclude_pks=None) -> List[PkType]:
filt_start = time.time()
if request.method == "POST":
payload = json.loads(request.body)
include_tags = parse_tag_set_from_query_v2(payload.get("include"))
exclude_tags = parse_tag_set_from_query_v2(payload.get("exclude"))
pks = [i for i in payload.get("subset", []) if i]
split = payload.get("split", "train")
offset_to_return = int(payload.get("offset", 0))
num_to_return = int(payload.get("num", -1))
else:
include_tags = parse_tag_set_from_query_v2(request.GET.get("include"))
exclude_tags = parse_tag_set_from_query_v2(request.GET.get("exclude"))
pks = [i for i in request.GET.get("subset", "").split(",") if i]
split = request.GET.get("split", "train")
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", -1))
end_to_return = None if num_to_return == -1 else offset_to_return + num_to_return
dataset_items = None
is_val = split == "val"
db_start = time.time()
# Get pks for dataset items of interest
if pks and exclude_pks:
# Get specific pks - excluded pks if requested
exclude_pks = set(exclude_pks)
pks = [pk for pk in pks if pk not in exclude_pks]
elif not pks:
# Otherwise get all dataset items - exclude pks
dataset_items = DatasetItem.objects.filter(dataset=dataset, is_val=is_val)
if exclude_pks:
dataset_items = dataset_items.exclude(pk__in=exclude_pks)
pks = dataset_items.values_list("pk", flat=True)
db_end = time.time()
result = None
db_tag_start = time.time()
if not include_tags and not exclude_tags:
# If no tags specified, just return retrieved pks
result = pks
else:
# Otherwise, filter using include and exclude tags
if dataset_items is None:
dataset_items = DatasetItem.objects.filter(pk__in=pks)
if include_tags:
dataset_items = dataset_items.filter(tag_sets_to_query(include_tags))
if exclude_tags:
dataset_items = dataset_items.exclude(tag_sets_to_query(exclude_tags))
result = dataset_items.values_list("pk", flat=True)
db_tag_end = time.time()
result = list(result[offset_to_return:end_to_return])
filt_end = time.time()
print(
f"filtered_images_v2: tot: {filt_end-filt_start}, "
f"db ({len(result)} items): {db_end-db_start}, db tag: {db_tag_end-db_tag_start}"
)
return result
def process_image_query_results_v2(request, dataset, query_response):
filtered_pks = filtered_images_v2(request, dataset)
# TODO(mihirg): Eliminate this database call by directly returning pks from backend
dataset_items = DatasetItem.objects.filter(pk__in=filtered_pks)
dataset_items_by_path = {di.path: di for di in dataset_items}
distances = []
ordered_pks = []
for r in query_response["results"]:
if r["label"] in dataset_items_by_path:
ordered_pks.append(dataset_items_by_path[r["label"]].pk)
distances.append(r["dist"])
return dict(
pks=ordered_pks,
distances=distances,
)
def create_result_set_v2(results, type, model=None):
pks = results["pks"]
distances = results["distances"]
result_set_id = str(uuid.uuid4())
current_result_sets[result_set_id] = ResultSet(
type=type, ranking=pks, distances=distances, model=model
)
return {
"id": result_set_id,
"num_results": len(pks),
"type": type,
}
@api_view(["GET"])
@csrf_exempt
def get_results_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
index_id = request.GET["index_id"]
result_set_id = request.GET["result_set_id"]
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", 500))
clustering_model = request.GET.get("clustering_model", None)
result_set = current_result_sets[result_set_id]
pks = result_set.ranking[offset_to_return : offset_to_return + num_to_return]
distances = result_set.distances[
offset_to_return : offset_to_return + num_to_return
]
dataset_items_by_pk = DatasetItem.objects.in_bulk(pks)
dataset_items = [dataset_items_by_pk[pk] for pk in pks] # preserve order
bucket_name = dataset.train_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
internal_identifiers = [di.identifier for di in dataset_items]
params = {
"index_id": index_id,
"identifiers": internal_identifiers,
}
if clustering_model:
params["model"] = clustering_model
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/perform_clustering",
json=params,
)
clustering_data = r.json()
dataset_item_paths = [
(di.path if di.path.find("http") != -1 else path_template.format(di.path))
for di in dataset_items
]
dataset_item_identifiers = [di.pk for di in dataset_items]
return JsonResponse(
{
"paths": dataset_item_paths,
"identifiers": dataset_item_identifiers,
"distances": distances,
"clustering": clustering_data["clustering"],
}
)
@api_view(["POST"])
@csrf_exempt
def keep_alive_v2(request):
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/keep_alive",
)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def generate_embedding_v2(request):
payload = json.loads(request.body)
image_id = payload.get("image_id")
if image_id:
payload["identifier"] = DatasetItem.objects.get(pk=image_id).identifier
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def generate_text_embedding_v2(request):
payload = json.loads(request.body)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_text_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def query_knn_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
embeddings = payload["embeddings"]
use_full_image = bool(payload.get("use_full_image", True))
use_dot_product = bool(payload.get("use_dot_product", False))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
query_knn_start = time.time()
params = {
"index_id": index_id,
"embeddings": embeddings,
"use_full_image": use_full_image,
"use_dot_product": use_dot_product,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_knn_v2",
json=params,
)
response_data = r.json()
query_knn_end = time.time()
logger.debug("query_knn_v2 time: {:f}".format(query_knn_end - query_knn_start))
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "knn", model=model))
@api_view(["GET"])
@csrf_exempt
def train_svm_v2(request, dataset_name):
index_id = request.GET["index_id"]
model = request.GET.get("model", "imagenet")
pos_tags = parse_tag_set_from_query_v2(request.GET["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(request.GET.get("neg_tags"))
augment_negs = bool(
distutils.util.strtobool(request.GET.get("augment_negs", "false"))
)
dataset = get_object_or_404(Dataset, name=dataset_name)
pos_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(pos_tags),
dataset=dataset,
is_val=False,
)
pos_dataset_item_pks = list(pos_dataset_items.values_list("pk", flat=True))
if neg_tags:
neg_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(neg_tags),
dataset=dataset,
is_val=False,
).difference(pos_dataset_items)
neg_dataset_item_pks = list(neg_dataset_items.values_list("pk", flat=True))
else:
neg_dataset_item_pks = []
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.SVM_NUM_NEGS_MULTIPLIER * len(pos_dataset_item_pks) - len(
neg_dataset_item_pks
)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from GET request
all_eligible_pks = filtered_images_v2(
request, dataset, exclude_pks=pos_dataset_item_pks + neg_dataset_item_pks
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
params = {
"index_id": index_id,
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/train_svm_v2",
json=params,
)
return JsonResponse(r.json()) # {"svm_vector": base64-encoded string}
@api_view(["POST"])
@csrf_exempt
def query_svm_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
svm_vector = payload["svm_vector"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"svm_vector": svm_vector,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_svm_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "svm"))
@api_view(["POST"])
@csrf_exempt
def query_ranking_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload["model"]
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_ranking_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "ranking", model=model))
@api_view(["POST"])
@csrf_exempt
def query_images_v2(request, dataset_name):
query_start = time.time()
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
order = payload.get("order", "id")
filter_start = time.time()
result_pks = filtered_images_v2(request, dataset)
filter_end = time.time()
if order == "random":
random.shuffle(result_pks)
elif order == "id":
result_pks.sort()
results = {"pks": result_pks, "distances": [-1 for _ in result_pks]}
resp = JsonResponse(create_result_set_v2(results, "query"))
query_end = time.time()
print(
f"query_images_v2: tot: {query_end-query_start}, "
f"filter: {filter_end-filter_start}"
)
return resp
#
# ACTIVE VALIDATION
#
VAL_NEGATIVE_TYPE = "model_val_negative"
def get_val_examples_v2(dataset, model_id):
# Get positive and negative categories
model = get_object_or_404(DNNModel, model_id=model_id)
pos_tags = parse_tag_set_from_query_v2(model.category_spec["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(model.category_spec["neg_tags"])
augment_negs = model.category_spec.get("augment_negs", False)
augment_negs_include = (
parse_tag_set_from_query_v2(model.category_spec.get("augment_negs_include", []))
if augment_negs
else set()
)
# Limit to validation set
eligible_dataset_items = DatasetItem.objects.filter(
dataset=dataset,
is_val=True,
)
# Get positives and negatives matching these categories
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, augment_negs_include)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags or t in augment_negs_include for t in tags):
neg_dataset_item_pks.append(pk)
# Get extra negatives
if augment_negs:
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
label_category=model_id,
label_type=VAL_NEGATIVE_TYPE,
)
neg_dataset_item_pks.extend(ann.dataset_item.pk for ann in annotations)
return pos_dataset_item_pks, neg_dataset_item_pks
@api_view(["POST"])
def query_metrics_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
internal_identifiers_to_weights = payload["weights"] # type: Dict[str, int]
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct identifiers, labels, and weights
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
weights = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifier = di.identifier
weight = internal_identifiers_to_weights.get(identifier)
if weight is None:
continue
identifiers.append(identifier)
labels.append(label)
weights.append(weight)
# TODO(mihirg): Parse false positives and false negatives
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"weights": weights,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_metrics",
json=params,
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
def query_active_validation_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
current_f1 = payload.get("current_f1")
if current_f1 is None:
current_f1 = 0.5
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct paths, identifiers, and labels
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifiers.append(di.identifier)
labels.append(label)
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"current_f1": current_f1,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_active_validation",
json=params,
)
response_data = r.json()
if response_data["identifiers"]:
pks_and_paths = list(
DatasetItem.objects.filter(
dataset=dataset,
identifier__in=response_data["identifiers"],
is_val=True,
).values_list("pk", "path")
)
random.shuffle(pks_and_paths)
pks, paths = zip(*pks_and_paths)
else:
pks, paths = [], []
bucket_name = dataset.val_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
paths = [path_template.format(p) for p in paths]
return JsonResponse(
{
"paths": paths,
"identifiers": pks,
"weights": response_data["weights"],
}
)
@api_view(["POST"])
def add_val_annotations_v2(request):
payload = json.loads(request.body)
annotations = payload["annotations"]
user_email = payload["user"]
model = payload["model"]
anns = []
cat_modes = defaultdict(int)
dataset = None
for ann_payload in annotations:
image_pk = ann_payload["identifier"]
is_other_negative = ann_payload.get("is_other_negative", False)
mode_str = "NEGATIVE" if is_other_negative else ann_payload["mode"]
category_name = (
"active:" + model if is_other_negative else ann_payload["category"]
)
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_str)
di = DatasetItem.objects.get(pk=image_pk)
dataset = di.dataset
assert di.is_val
ann = Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
misc_data={"created_by": "active_val"},
)
cat_modes[(category, mode)] += 1
anns.append(ann)
Annotation.objects.bulk_create(anns)
for (cat, mode), c in cat_modes.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset, category=cat, mode=mode
)
category_count.count += c
category_count.save()
return JsonResponse({"created": len(anns)})
# DATASET INFO
@api_view(["GET"])
@csrf_exempt
def get_datasets_v2(request):
datasets = Dataset.objects.filter(hidden=False)
dataset_names = list(datasets.values_list("name", flat=True))
return JsonResponse({"dataset_names": dataset_names})
@api_view(["GET"])
@csrf_exempt
def get_dataset_info_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
num_train = dataset.datasetitem_set.filter(is_val=False).count()
num_val = dataset.datasetitem_set.filter(is_val=True).count()
return JsonResponse(
{
"index_id": dataset.index_id,
"num_train": num_train,
"num_val": num_val,
}
)
@api_view(["GET"])
@csrf_exempt
def get_models_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
model_objs = DNNModel.objects.filter(
dataset=dataset,
checkpoint_path__isnull=False,
).order_by("-last_updated")
model_names = set()
latest = {}
with_output = {}
for model in model_objs:
model_names.add(model.name)
if model.name not in latest:
latest[model.name] = model
if model.output_directory and model.name not in with_output:
with_output[model.name] = model
models = [
{
"name": model_name,
"latest": model_info(latest[model_name]),
"with_output": model_info(with_output.get(model_name)),
}
for model_name in model_names
]
return JsonResponse({"models": models})
def model_info(model):
if model is None:
return None
pos_tags = parse_tag_set_from_query_v2(model.category_spec.get("pos_tags", []))
neg_tags = parse_tag_set_from_query_v2(model.category_spec.get("neg_tags", []))
augment_negs_include = parse_tag_set_from_query_v2(
model.category_spec.get("augment_negs_include", [])
)
return {
"model_id": model.model_id,
"timestamp": model.last_updated,
"has_checkpoint": model.checkpoint_path is not None,
"has_output": model.output_directory is not None,
"pos_tags": serialize_tag_set_for_client_v2(pos_tags),
"neg_tags": serialize_tag_set_for_client_v2(neg_tags | augment_negs_include),
"augment_negs": model.category_spec.get("augment_negs", False),
"epoch": model.epoch,
}
@api_view(["POST"])
@csrf_exempt
def create_dataset_v2(request):
payload = json.loads(request.body)
name = payload["dataset"]
train_directory = payload["train_path"]
val_directory = payload["val_path"]
index_id = payload["index_id"]
assert all(d.startswith("gs://") for d in (train_directory, val_directory))
# Download index on index server
params = {"index_id": index_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/download_index",
json=params,
)
client = storage.Client()
all_blobs = []
for d, is_val in ((train_directory, False), (val_directory, True)):
split_dir = d[len("gs://") :].split("/")
bucket_name = split_dir[0]
bucket_path = "/".join(split_dir[1:])
all_blobs.extend(
(blob, is_val)
for blob in client.list_blobs(bucket_name, prefix=bucket_path)
)
dataset = Dataset(
name=name,
train_directory=train_directory,
val_directory=val_directory,
index_id=index_id,
)
dataset.save()
# Create all the DatasetItems for this dataset
items = [
DatasetItem(
dataset=dataset,
identifier=os.path.splitext(os.path.basename(blob.name))[0],
path=blob.name,
is_val=is_val,
)
for blob, is_val in all_blobs
if (
blob.name.endswith(".jpg")
or blob.name.endswith(".jpeg")
or blob.name.endswith(".png")
)
]
DatasetItem.objects.bulk_create(items, batch_size=10000)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def get_annotations_v2(request):
payload = json.loads(request.body)
image_pks = [i for i in payload["identifiers"] if i]
if not image_pks:
return JsonResponse({})
annotations = Annotation.objects.filter(
dataset_item__in=DatasetItem.objects.filter(pk__in=image_pks),
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
boxes_by_pk = get_boxes_from_annotations_v2(annotations)
annotations_by_pk = defaultdict(lambda: {"tags": [], "boxes": []})
for pk, tags in tags_by_pk.items():
annotations_by_pk[pk]["tags"] = serialize_tag_set_for_client_v2(tags)
for pk, boxes in boxes_by_pk.items():
annotations_by_pk[pk]["boxes"] = serialize_boxes_for_client_v2(boxes)
return JsonResponse(annotations_by_pk)
@api_view(["POST"])
@csrf_exempt
def add_annotations_v2(request):
payload = json.loads(request.body)
image_pks = payload["identifiers"]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_multi_v2(request):
payload = json.loads(request.body)
num_created = bulk_add_multi_annotations_v2(payload)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_by_internal_identifiers_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
image_identifiers = payload["identifiers"]
images = DatasetItem.objects.filter(
dataset=dataset, identifier__in=image_identifiers
)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_to_result_set_v2(request):
payload = json.loads(request.body)
result_set_id = payload["result_set_id"]
lower_bound = float(payload["from"])
upper_bound = float(payload["to"])
result_set = current_result_sets[result_set_id]
result_ranking = result_set.ranking
# e.g., lower_bound=0.0, upper_bound=0.5 -> second half of the result set
start_index = math.ceil(len(result_ranking) * (1.0 - upper_bound))
end_index = math.floor(len(result_ranking) * (1.0 - lower_bound))
image_pks = result_ranking[start_index:end_index]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
def bulk_add_single_tag_annotations_v2(payload, images):
'''Adds annotations for a single tag to many dataset items'''
if not images:
return 0
user_email = payload["user"]
category_name = payload["category"]
mode_name = payload["mode"]
created_by = payload.get("created_by",
"tag" if len(images) == 1 else "tag-bulk")
dataset = None
if len(images) > 0:
dataset = images[0].dataset
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_name)
Annotation.objects.filter(
dataset_item__in=images, category=category, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
annotations = [
Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
is_box=False,
misc_data={"created_by": created_by},
)
for di in images
]
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_multi_annotations_v2(payload : Dict):
'''Adds multiple annotations for the same dataset and user to the database
at once'''
dataset_name = payload["dataset"]
dataset = get_object_or_404(Dataset, name=dataset_name)
user_email = payload["user"]
user, _ = User.objects.get_or_create(email=user_email)
created_by = payload.get("created_by",
"tag" if len(payload["annotations"]) == 1 else
"tag-bulk")
# Get pks
idents = [ann['identifier'] for ann in payload["annotations"]
if 'identifier' in ann]
di_pks = list(DatasetItem.objects.filter(
dataset=dataset, identifier__in=idents
).values_list("pk", "identifier"))
ident_to_pk = {ident: pk for pk, ident in di_pks}
cats = {}
modes = {}
to_delete = defaultdict(set)
annotations = []
for ann in payload["annotations"]:
db_ann = Annotation()
category_name = ann["category"]
mode_name = ann["mode"]
if category_name not in cats:
cats[category_name] = Category.objects.get_or_create(
name=category_name)[0]
if mode_name not in modes:
modes[mode_name] = Mode.objects.get_or_create(
name=mode_name)[0]
if "identifier" in ann:
pk = ident_to_pk[ann["identifier"]]
else:
pk = ann["pk"]
db_ann.dataset_item_id = pk
db_ann.user = user
db_ann.category = cats[category_name]
db_ann.mode = modes[mode_name]
db_ann.is_box = ann.get("is_box", False)
if db_ann.is_box:
db_ann.bbox_x1 = ann["x1"]
db_ann.bbox_y1 = ann["y1"]
db_ann.bbox_x2 = ann["x2"]
db_ann.bbox_y2 = ann["y2"]
else:
to_delete[db_ann.category].add(pk)
db_ann.misc_data={"created_by": created_by}
annotations.append(db_ann)
for cat, pks in to_delete.items():
# Delete per-frame annotations for the category if they exist since
# we should only have on mode per image
Annotation.objects.filter(
category=cat, dataset_item_id__in=pks, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_annotations_v2(dataset, annotations):
'''Handles book keeping for adding many annotations at once'''
Annotation.objects.bulk_create(annotations)
counts = defaultdict(int)
for ann in annotations:
counts[(ann.category, ann.mode)] += 1
for (cat, mode), count in counts.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset,
category=cat,
mode=mode
)
category_count.count += count
category_count.save()
@api_view(["POST"])
@csrf_exempt
def delete_category_v2(request):
payload = json.loads(request.body)
category = payload["category"]
category = Category.objects.get(name=category)
category.delete()
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def update_category_v2(request):
payload = json.loads(request.body)
old_category_name = payload["oldCategory"]
new_category_name = payload["newCategory"]
category = Category.objects.get(name=old_category_name)
category.name = new_category_name
category.save()
return JsonResponse({"status": "success"})
@api_view(["GET"])
@csrf_exempt
def get_category_counts_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
counts = CategoryCount.objects.filter(dataset=dataset).values(
"category__name", "mode__name", "count"
)
n_labeled = defaultdict(dict)
for c in counts:
category = c["category__name"]
mode = c["mode__name"]
count = c["count"]
n_labeled[category][mode] = count
return JsonResponse(n_labeled)
| from collections import defaultdict, namedtuple
from dataclasses import dataclass
import distutils.util
import functools
import itertools
import json
import math
import operator
import os
import random
import uuid
import shutil
import logging
import time
from typing import List, Dict, NamedTuple, Optional
from django.db.models import Q
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, get_list_or_404
from django.conf import settings
from google.cloud import storage
from rest_framework.decorators import api_view
import requests
from expiringdict import ExpiringDict
from .models import (
Dataset,
DatasetItem,
Category,
Mode,
User,
Annotation,
DNNModel,
CategoryCount,
)
BUILTIN_MODES = ["POSITIVE", "NEGATIVE", "HARD_NEGATIVE", "UNSURE"]
logger = logging.getLogger(__name__)
@api_view(["POST"])
@csrf_exempt
def start_cluster(request):
# TODO(mihirg): Remove this setting from Django; it's now managed by Terraform
# (or figure out how to set it from the frontend if we need that)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_cluster",
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"cluster_id": response_data["cluster_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_cluster_status(request, cluster_id):
params = {"cluster_id": cluster_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/cluster_status", params=params
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_cluster(request, cluster_id):
params = {"cluster_id": cluster_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_cluster",
json=params,
)
return JsonResponse(
{
"status": "success",
}
)
@api_view(["POST"])
@csrf_exempt
def create_model(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_name = payload["model_name"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
pos_tags = parse_tag_set_from_query_v2(payload["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(payload["neg_tags"])
val_pos_tags = parse_tag_set_from_query_v2(payload["val_pos_tags"])
val_neg_tags = parse_tag_set_from_query_v2(payload["val_neg_tags"])
augment_negs = bool(payload["augment_negs"])
model_kwargs = payload["model_kwargs"]
resume_model_id = payload.get("resume", None)
dataset = get_object_or_404(Dataset, name=dataset_name)
eligible_images = DatasetItem.objects.filter(dataset=dataset, is_val=False)
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, val_pos_tags, val_neg_tags)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_images,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
val_pos_dataset_item_pks = []
val_neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags for t in tags):
neg_dataset_item_pks.append(pk)
elif any(t in val_pos_tags for t in tags):
val_pos_dataset_item_pks.append(pk)
elif any(t in val_neg_tags for t in tags):
val_neg_dataset_item_pks.append(pk)
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.BGSPLIT_NUM_NEGS_MULTIPLIER * len(
pos_dataset_item_pks
) - len(neg_dataset_item_pks)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from request
all_eligible_pks = filtered_images_v2(
request,
dataset,
exclude_pks=(
pos_dataset_item_pks
+ neg_dataset_item_pks
+ val_pos_dataset_item_pks
+ val_neg_dataset_item_pks
),
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
if resume_model_id:
resume_model = get_object_or_404(DNNModel, model_id=resume_model_id)
resume_model_path = resume_model.checkpoint_path
else:
resume_model = None
resume_model_path = None
params = {
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"val_pos_identifiers": val_pos_dataset_item_internal_identifiers,
"val_neg_identifiers": val_neg_dataset_item_internal_identifiers,
"augment_negs": augment_negs,
"model_kwargs": model_kwargs,
"model_name": model_name,
"bucket": bucket_name,
"cluster_id": cluster_id,
"index_id": index_id,
"resume_from": resume_model_path,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_job",
json=params,
)
response_data = r.json()
if r.status_code != 200:
return JsonResponse(
{"status": "failure", "reason": response_data.get("reason", "")},
status=r.status_code,
)
m = DNNModel(
dataset=dataset,
name=model_name,
model_id=response_data["model_id"],
category_spec={
"augment_negs": augment_negs,
"pos_tags": payload["pos_tags"],
"neg_tags": payload["neg_tags"],
"augment_negs_include": payload.get("include", []) if augment_negs else [],
"augment_negs_exclude": payload.get("exclude", []) if augment_negs else [],
},
)
model_epoch = -1 + model_kwargs.get("epochs_to_run", 1)
if resume_model_id:
m.resume_model_id = resume_model_id
if model_kwargs.get("resume_training", False):
model_epoch += resume_model.epoch + 1
m.epoch = model_epoch
m.save()
return JsonResponse(
{
"status": "success",
"model_id": response_data["model_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_status(request, model_id):
params = {"model_id": model_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_job_status", params=params
)
response_data = r.json()
if response_data["has_model"]:
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.checkpoint_path = response_data["checkpoint_path"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def update_model_v2(request):
payload = json.loads(request.body)
# user = payload["user"]
old_model_name = payload["old_model_name"]
new_model_name = payload["new_model_name"]
models = get_list_or_404(DNNModel, name=old_model_name)
for m in models:
m.name = new_model_name
m.save()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def delete_model_v2(request):
payload = json.loads(request.body)
model_name = payload["model_name"]
# cluster_id = payload['cluster_id']
models = get_list_or_404(DNNModel, name=model_name)
for m in models:
# TODO(fpoms): delete model data stored on NFS?
# shutil.rmtree(os.path.join(m.checkpoint_path, '..'))
shutil.rmtree(m.output_directory, ignore_errors=True)
m.delete()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def run_model_inference(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_id = payload["model_id"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
dataset = get_object_or_404(Dataset, name=dataset_name)
model_checkpoint_path = get_object_or_404(
DNNModel, model_id=model_id
).checkpoint_path
if model_checkpoint_path is None or len(model_checkpoint_path) == 0:
return JsonResponse(
{
"status": "failure",
"reason": f"Model {model_id} does not have a model checkpoint.",
},
status=400,
)
params = {
"bucket": bucket_name,
"model_id": model_id,
"checkpoint_path": model_checkpoint_path,
"cluster_id": cluster_id,
"index_id": index_id,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_inference_job",
json=params,
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"job_id": response_data["job_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_inference_status(request, job_id):
params = {"job_id": job_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_inference_job_status",
params=params,
)
response_data = r.json()
if response_data["has_output"]:
model_id = response_data["model_id"]
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.output_directory = response_data["output_dir"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_model_inference(request, job_id):
params = {"job_id": job_id}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_bgsplit_inference_job", json=params
)
response_data = r.json()
return JsonResponse(response_data, status=r.status_code)
#
# V2 ENDPOINTS
# TODO(mihirg): Make these faster
#
Tag = namedtuple("Tag", "category value") # type: NamedTuple[str, str]
Box = namedtuple(
"Box", "category value x1 y1 x2 y2"
) # type: NamedTuple[str, str, float, float, float, float]
PkType = int
@dataclass
class ResultSet:
type: str
ranking: List[PkType]
distances: List[float]
model: Optional[str]
# TODO(fpoms): this needs to be wrapped in a lock so that
# updates are atomic across concurrent requests
current_result_sets = ExpiringDict(
max_age_seconds=30 * 60,
max_len=50,
) # type: Dict[str, ResultSet]
def parse_tag_set_from_query_v2(s):
if isinstance(s, list):
parts = s
elif isinstance(s, str) and s:
parts = s.split(",")
else:
parts = []
ts = set()
for part in parts:
if not part:
continue
category, value_str = part.split(":")
ts.add(Tag(category, value_str))
return ts
def tag_sets_to_query(*tagsets):
merged = set().union(*tagsets)
if not merged:
return Q()
return Q(
annotation__in=Annotation.objects.filter(
functools.reduce(
operator.or_,
[Q(category__name=t.category, mode__name=t.value) for t in merged],
)
)
)
def serialize_tag_set_for_client_v2(ts):
return [{"category": t.category, "value": t.value} for t in sorted(list(ts))]
def serialize_boxes_for_client_v2(bs):
return [
{
"category": b.category,
"value": b.value,
"x1": b.x1,
"y1": b.y1,
"x2": b.x2,
"y2": b.y2,
}
for b in sorted(list(bs))
]
def get_tags_from_annotations_v2(annotations):
tags_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=False)
ann_dicts = annotations.values("dataset_item__pk", "category__name", "mode__name")
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
tags_by_pk[pk].append(Tag(category, mode))
return tags_by_pk
def get_boxes_from_annotations_v2(annotations):
boxes_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=True)
ann_dicts = annotations.values(
"dataset_item__pk",
"category__name",
"mode__name",
"bbox_x1",
"bbox_y1",
"bbox_x2",
"bbox_y2",
)
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
box = (ann["bbox_x1"], ann["bbox_y1"], ann["bbox_x2"], ann["bbox_y2"])
boxes_by_pk[pk].append(Box(category, mode, *box))
return boxes_by_pk
def filtered_images_v2(request, dataset, exclude_pks=None) -> List[PkType]:
filt_start = time.time()
if request.method == "POST":
payload = json.loads(request.body)
include_tags = parse_tag_set_from_query_v2(payload.get("include"))
exclude_tags = parse_tag_set_from_query_v2(payload.get("exclude"))
pks = [i for i in payload.get("subset", []) if i]
split = payload.get("split", "train")
offset_to_return = int(payload.get("offset", 0))
num_to_return = int(payload.get("num", -1))
else:
include_tags = parse_tag_set_from_query_v2(request.GET.get("include"))
exclude_tags = parse_tag_set_from_query_v2(request.GET.get("exclude"))
pks = [i for i in request.GET.get("subset", "").split(",") if i]
split = request.GET.get("split", "train")
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", -1))
end_to_return = None if num_to_return == -1 else offset_to_return + num_to_return
dataset_items = None
is_val = split == "val"
db_start = time.time()
# Get pks for dataset items of interest
if pks and exclude_pks:
# Get specific pks - excluded pks if requested
exclude_pks = set(exclude_pks)
pks = [pk for pk in pks if pk not in exclude_pks]
elif not pks:
# Otherwise get all dataset items - exclude pks
dataset_items = DatasetItem.objects.filter(dataset=dataset, is_val=is_val)
if exclude_pks:
dataset_items = dataset_items.exclude(pk__in=exclude_pks)
pks = dataset_items.values_list("pk", flat=True)
db_end = time.time()
result = None
db_tag_start = time.time()
if not include_tags and not exclude_tags:
# If no tags specified, just return retrieved pks
result = pks
else:
# Otherwise, filter using include and exclude tags
if dataset_items is None:
dataset_items = DatasetItem.objects.filter(pk__in=pks)
if include_tags:
dataset_items = dataset_items.filter(tag_sets_to_query(include_tags))
if exclude_tags:
dataset_items = dataset_items.exclude(tag_sets_to_query(exclude_tags))
result = dataset_items.values_list("pk", flat=True)
db_tag_end = time.time()
result = list(result[offset_to_return:end_to_return])
filt_end = time.time()
print(
f"filtered_images_v2: tot: {filt_end-filt_start}, "
f"db ({len(result)} items): {db_end-db_start}, db tag: {db_tag_end-db_tag_start}"
)
return result
def process_image_query_results_v2(request, dataset, query_response):
filtered_pks = filtered_images_v2(request, dataset)
# TODO(mihirg): Eliminate this database call by directly returning pks from backend
dataset_items = DatasetItem.objects.filter(pk__in=filtered_pks)
dataset_items_by_path = {di.path: di for di in dataset_items}
distances = []
ordered_pks = []
for r in query_response["results"]:
if r["label"] in dataset_items_by_path:
ordered_pks.append(dataset_items_by_path[r["label"]].pk)
distances.append(r["dist"])
return dict(
pks=ordered_pks,
distances=distances,
)
def create_result_set_v2(results, type, model=None):
pks = results["pks"]
distances = results["distances"]
result_set_id = str(uuid.uuid4())
current_result_sets[result_set_id] = ResultSet(
type=type, ranking=pks, distances=distances, model=model
)
return {
"id": result_set_id,
"num_results": len(pks),
"type": type,
}
@api_view(["GET"])
@csrf_exempt
def get_results_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
index_id = request.GET["index_id"]
result_set_id = request.GET["result_set_id"]
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", 500))
clustering_model = request.GET.get("clustering_model", None)
result_set = current_result_sets[result_set_id]
pks = result_set.ranking[offset_to_return : offset_to_return + num_to_return]
distances = result_set.distances[
offset_to_return : offset_to_return + num_to_return
]
dataset_items_by_pk = DatasetItem.objects.in_bulk(pks)
dataset_items = [dataset_items_by_pk[pk] for pk in pks] # preserve order
bucket_name = dataset.train_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
internal_identifiers = [di.identifier for di in dataset_items]
params = {
"index_id": index_id,
"identifiers": internal_identifiers,
}
if clustering_model:
params["model"] = clustering_model
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/perform_clustering",
json=params,
)
clustering_data = r.json()
dataset_item_paths = [
(di.path if di.path.find("http") != -1 else path_template.format(di.path))
for di in dataset_items
]
dataset_item_identifiers = [di.pk for di in dataset_items]
return JsonResponse(
{
"paths": dataset_item_paths,
"identifiers": dataset_item_identifiers,
"distances": distances,
"clustering": clustering_data["clustering"],
}
)
@api_view(["POST"])
@csrf_exempt
def keep_alive_v2(request):
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/keep_alive",
)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def generate_embedding_v2(request):
payload = json.loads(request.body)
image_id = payload.get("image_id")
if image_id:
payload["identifier"] = DatasetItem.objects.get(pk=image_id).identifier
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def generate_text_embedding_v2(request):
payload = json.loads(request.body)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_text_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def query_knn_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
embeddings = payload["embeddings"]
use_full_image = bool(payload.get("use_full_image", True))
use_dot_product = bool(payload.get("use_dot_product", False))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
query_knn_start = time.time()
params = {
"index_id": index_id,
"embeddings": embeddings,
"use_full_image": use_full_image,
"use_dot_product": use_dot_product,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_knn_v2",
json=params,
)
response_data = r.json()
query_knn_end = time.time()
logger.debug("query_knn_v2 time: {:f}".format(query_knn_end - query_knn_start))
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "knn", model=model))
@api_view(["GET"])
@csrf_exempt
def train_svm_v2(request, dataset_name):
index_id = request.GET["index_id"]
model = request.GET.get("model", "imagenet")
pos_tags = parse_tag_set_from_query_v2(request.GET["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(request.GET.get("neg_tags"))
augment_negs = bool(
distutils.util.strtobool(request.GET.get("augment_negs", "false"))
)
dataset = get_object_or_404(Dataset, name=dataset_name)
pos_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(pos_tags),
dataset=dataset,
is_val=False,
)
pos_dataset_item_pks = list(pos_dataset_items.values_list("pk", flat=True))
if neg_tags:
neg_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(neg_tags),
dataset=dataset,
is_val=False,
).difference(pos_dataset_items)
neg_dataset_item_pks = list(neg_dataset_items.values_list("pk", flat=True))
else:
neg_dataset_item_pks = []
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.SVM_NUM_NEGS_MULTIPLIER * len(pos_dataset_item_pks) - len(
neg_dataset_item_pks
)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from GET request
all_eligible_pks = filtered_images_v2(
request, dataset, exclude_pks=pos_dataset_item_pks + neg_dataset_item_pks
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
params = {
"index_id": index_id,
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/train_svm_v2",
json=params,
)
return JsonResponse(r.json()) # {"svm_vector": base64-encoded string}
@api_view(["POST"])
@csrf_exempt
def query_svm_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
svm_vector = payload["svm_vector"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"svm_vector": svm_vector,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_svm_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "svm"))
@api_view(["POST"])
@csrf_exempt
def query_ranking_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload["model"]
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_ranking_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "ranking", model=model))
@api_view(["POST"])
@csrf_exempt
def query_images_v2(request, dataset_name):
query_start = time.time()
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
order = payload.get("order", "id")
filter_start = time.time()
result_pks = filtered_images_v2(request, dataset)
filter_end = time.time()
if order == "random":
random.shuffle(result_pks)
elif order == "id":
result_pks.sort()
results = {"pks": result_pks, "distances": [-1 for _ in result_pks]}
resp = JsonResponse(create_result_set_v2(results, "query"))
query_end = time.time()
print(
f"query_images_v2: tot: {query_end-query_start}, "
f"filter: {filter_end-filter_start}"
)
return resp
#
# ACTIVE VALIDATION
#
VAL_NEGATIVE_TYPE = "model_val_negative"
def get_val_examples_v2(dataset, model_id):
# Get positive and negative categories
model = get_object_or_404(DNNModel, model_id=model_id)
pos_tags = parse_tag_set_from_query_v2(model.category_spec["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(model.category_spec["neg_tags"])
augment_negs = model.category_spec.get("augment_negs", False)
augment_negs_include = (
parse_tag_set_from_query_v2(model.category_spec.get("augment_negs_include", []))
if augment_negs
else set()
)
# Limit to validation set
eligible_dataset_items = DatasetItem.objects.filter(
dataset=dataset,
is_val=True,
)
# Get positives and negatives matching these categories
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, augment_negs_include)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags or t in augment_negs_include for t in tags):
neg_dataset_item_pks.append(pk)
# Get extra negatives
if augment_negs:
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
label_category=model_id,
label_type=VAL_NEGATIVE_TYPE,
)
neg_dataset_item_pks.extend(ann.dataset_item.pk for ann in annotations)
return pos_dataset_item_pks, neg_dataset_item_pks
@api_view(["POST"])
def query_metrics_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
internal_identifiers_to_weights = payload["weights"] # type: Dict[str, int]
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct identifiers, labels, and weights
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
weights = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifier = di.identifier
weight = internal_identifiers_to_weights.get(identifier)
if weight is None:
continue
identifiers.append(identifier)
labels.append(label)
weights.append(weight)
# TODO(mihirg): Parse false positives and false negatives
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"weights": weights,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_metrics",
json=params,
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
def query_active_validation_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
current_f1 = payload.get("current_f1")
if current_f1 is None:
current_f1 = 0.5
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct paths, identifiers, and labels
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifiers.append(di.identifier)
labels.append(label)
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"current_f1": current_f1,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_active_validation",
json=params,
)
response_data = r.json()
if response_data["identifiers"]:
pks_and_paths = list(
DatasetItem.objects.filter(
dataset=dataset,
identifier__in=response_data["identifiers"],
is_val=True,
).values_list("pk", "path")
)
random.shuffle(pks_and_paths)
pks, paths = zip(*pks_and_paths)
else:
pks, paths = [], []
bucket_name = dataset.val_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
paths = [path_template.format(p) for p in paths]
return JsonResponse(
{
"paths": paths,
"identifiers": pks,
"weights": response_data["weights"],
}
)
@api_view(["POST"])
def add_val_annotations_v2(request):
payload = json.loads(request.body)
annotations = payload["annotations"]
user_email = payload["user"]
model = payload["model"]
anns = []
cat_modes = defaultdict(int)
dataset = None
for ann_payload in annotations:
image_pk = ann_payload["identifier"]
is_other_negative = ann_payload.get("is_other_negative", False)
mode_str = "NEGATIVE" if is_other_negative else ann_payload["mode"]
category_name = (
"active:" + model if is_other_negative else ann_payload["category"]
)
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_str)
di = DatasetItem.objects.get(pk=image_pk)
dataset = di.dataset
assert di.is_val
ann = Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
misc_data={"created_by": "active_val"},
)
cat_modes[(category, mode)] += 1
anns.append(ann)
Annotation.objects.bulk_create(anns)
for (cat, mode), c in cat_modes.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset, category=cat, mode=mode
)
category_count.count += c
category_count.save()
return JsonResponse({"created": len(anns)})
# DATASET INFO
@api_view(["GET"])
@csrf_exempt
def get_datasets_v2(request):
datasets = Dataset.objects.filter(hidden=False)
dataset_names = list(datasets.values_list("name", flat=True))
return JsonResponse({"dataset_names": dataset_names})
@api_view(["GET"])
@csrf_exempt
def get_dataset_info_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
num_train = dataset.datasetitem_set.filter(is_val=False).count()
num_val = dataset.datasetitem_set.filter(is_val=True).count()
return JsonResponse(
{
"index_id": dataset.index_id,
"num_train": num_train,
"num_val": num_val,
}
)
@api_view(["GET"])
@csrf_exempt
def get_models_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
model_objs = DNNModel.objects.filter(
dataset=dataset,
checkpoint_path__isnull=False,
).order_by("-last_updated")
model_names = set()
latest = {}
with_output = {}
for model in model_objs:
model_names.add(model.name)
if model.name not in latest:
latest[model.name] = model
if model.output_directory and model.name not in with_output:
with_output[model.name] = model
models = [
{
"name": model_name,
"latest": model_info(latest[model_name]),
"with_output": model_info(with_output.get(model_name)),
}
for model_name in model_names
]
return JsonResponse({"models": models})
def model_info(model):
if model is None:
return None
pos_tags = parse_tag_set_from_query_v2(model.category_spec.get("pos_tags", []))
neg_tags = parse_tag_set_from_query_v2(model.category_spec.get("neg_tags", []))
augment_negs_include = parse_tag_set_from_query_v2(
model.category_spec.get("augment_negs_include", [])
)
return {
"model_id": model.model_id,
"timestamp": model.last_updated,
"has_checkpoint": model.checkpoint_path is not None,
"has_output": model.output_directory is not None,
"pos_tags": serialize_tag_set_for_client_v2(pos_tags),
"neg_tags": serialize_tag_set_for_client_v2(neg_tags | augment_negs_include),
"augment_negs": model.category_spec.get("augment_negs", False),
"epoch": model.epoch,
}
@api_view(["POST"])
@csrf_exempt
def create_dataset_v2(request):
payload = json.loads(request.body)
name = payload["dataset"]
train_directory = payload["train_path"]
val_directory = payload["val_path"]
index_id = payload["index_id"]
assert all(d.startswith("gs://") for d in (train_directory, val_directory))
# Download index on index server
params = {"index_id": index_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/download_index",
json=params,
)
client = storage.Client()
all_blobs = []
for d, is_val in ((train_directory, False), (val_directory, True)):
split_dir = d[len("gs://") :].split("/")
bucket_name = split_dir[0]
bucket_path = "/".join(split_dir[1:])
all_blobs.extend(
(blob, is_val)
for blob in client.list_blobs(bucket_name, prefix=bucket_path)
)
dataset = Dataset(
name=name,
train_directory=train_directory,
val_directory=val_directory,
index_id=index_id,
)
dataset.save()
# Create all the DatasetItems for this dataset
items = [
DatasetItem(
dataset=dataset,
identifier=os.path.splitext(os.path.basename(blob.name))[0],
path=blob.name,
is_val=is_val,
)
for blob, is_val in all_blobs
if (
blob.name.endswith(".jpg")
or blob.name.endswith(".jpeg")
or blob.name.endswith(".png")
)
]
DatasetItem.objects.bulk_create(items, batch_size=10000)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def get_annotations_v2(request):
payload = json.loads(request.body)
image_pks = [i for i in payload["identifiers"] if i]
if not image_pks:
return JsonResponse({})
annotations = Annotation.objects.filter(
dataset_item__in=DatasetItem.objects.filter(pk__in=image_pks),
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
boxes_by_pk = get_boxes_from_annotations_v2(annotations)
annotations_by_pk = defaultdict(lambda: {"tags": [], "boxes": []})
for pk, tags in tags_by_pk.items():
annotations_by_pk[pk]["tags"] = serialize_tag_set_for_client_v2(tags)
for pk, boxes in boxes_by_pk.items():
annotations_by_pk[pk]["boxes"] = serialize_boxes_for_client_v2(boxes)
return JsonResponse(annotations_by_pk)
@api_view(["POST"])
@csrf_exempt
def add_annotations_v2(request):
payload = json.loads(request.body)
image_pks = payload["identifiers"]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_multi_v2(request):
payload = json.loads(request.body)
num_created = bulk_add_multi_annotations_v2(payload)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_by_internal_identifiers_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
image_identifiers = payload["identifiers"]
images = DatasetItem.objects.filter(
dataset=dataset, identifier__in=image_identifiers
)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_to_result_set_v2(request):
payload = json.loads(request.body)
result_set_id = payload["result_set_id"]
lower_bound = float(payload["from"])
upper_bound = float(payload["to"])
result_set = current_result_sets[result_set_id]
result_ranking = result_set.ranking
# e.g., lower_bound=0.0, upper_bound=0.5 -> second half of the result set
start_index = math.ceil(len(result_ranking) * (1.0 - upper_bound))
end_index = math.floor(len(result_ranking) * (1.0 - lower_bound))
image_pks = result_ranking[start_index:end_index]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
def bulk_add_single_tag_annotations_v2(payload, images):
'''Adds annotations for a single tag to many dataset items'''
if not images:
return 0
user_email = payload["user"]
category_name = payload["category"]
mode_name = payload["mode"]
created_by = payload.get("created_by",
"tag" if len(images) == 1 else "tag-bulk")
dataset = None
if len(images) > 0:
dataset = images[0].dataset
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_name)
Annotation.objects.filter(
dataset_item__in=images, category=category, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
annotations = [
Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
is_box=False,
misc_data={"created_by": created_by},
)
for di in images
]
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_multi_annotations_v2(payload : Dict):
'''Adds multiple annotations for the same dataset and user to the database
at once'''
dataset_name = payload["dataset"]
dataset = get_object_or_404(Dataset, name=dataset_name)
user_email = payload["user"]
user, _ = User.objects.get_or_create(email=user_email)
created_by = payload.get("created_by",
"tag" if len(payload["annotations"]) == 1 else
"tag-bulk")
# Get pks
idents = [ann['identifier'] for ann in payload["annotations"]
if 'identifier' in ann]
di_pks = list(DatasetItem.objects.filter(
dataset=dataset, identifier__in=idents
).values_list("pk", "identifier"))
ident_to_pk = {ident: pk for pk, ident in di_pks}
cats = {}
modes = {}
to_delete = defaultdict(set)
annotations = []
for ann in payload["annotations"]:
db_ann = Annotation()
category_name = ann["category"]
mode_name = ann["mode"]
if category_name not in cats:
cats[category_name] = Category.objects.get_or_create(
name=category_name)[0]
if mode_name not in modes:
modes[mode_name] = Mode.objects.get_or_create(
name=mode_name)[0]
if "identifier" in ann:
pk = ident_to_pk[ann["identifier"]]
else:
pk = ann["pk"]
db_ann.dataset_item_id = pk
db_ann.user = user
db_ann.category = cats[category_name]
db_ann.mode = modes[mode_name]
db_ann.is_box = ann.get("is_box", False)
if db_ann.is_box:
db_ann.bbox_x1 = ann["x1"]
db_ann.bbox_y1 = ann["y1"]
db_ann.bbox_x2 = ann["x2"]
db_ann.bbox_y2 = ann["y2"]
else:
to_delete[db_ann.category].add(pk)
db_ann.misc_data={"created_by": created_by}
annotations.append(db_ann)
for cat, pks in to_delete.items():
# Delete per-frame annotations for the category if they exist since
# we should only have on mode per image
Annotation.objects.filter(
category=cat, dataset_item_id__in=pks, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_annotations_v2(dataset, annotations):
'''Handles book keeping for adding many annotations at once'''
Annotation.objects.bulk_create(annotations)
counts = defaultdict(int)
for ann in annotations:
counts[(ann.category, ann.mode)] += 1
for (cat, mode), count in counts.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset,
category=cat,
mode=mode
)
category_count.count += count
category_count.save()
@api_view(["POST"])
@csrf_exempt
def delete_category_v2(request):
payload = json.loads(request.body)
category = payload["category"]
category = Category.objects.get(name=category)
category.delete()
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def update_category_v2(request):
payload = json.loads(request.body)
old_category_name = payload["oldCategory"]
new_category_name = payload["newCategory"]
category = Category.objects.get(name=old_category_name)
category.name = new_category_name
category.save()
return JsonResponse({"status": "success"})
@api_view(["GET"])
@csrf_exempt
def get_category_counts_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
counts = CategoryCount.objects.filter(dataset=dataset).values(
"category__name", "mode__name", "count"
)
n_labeled = defaultdict(dict)
for c in counts:
category = c["category__name"]
mode = c["mode__name"]
count = c["count"]
n_labeled[category][mode] = count
return JsonResponse(n_labeled)
| pt | 0.218893 | 1.900871 | 2 |
ccmlib/cluster_factory.py | justinchuch/ccm | 626 | 14974 |
from __future__ import absolute_import
import os
import yaml
from ccmlib import common, extension, repository
from ccmlib.cluster import Cluster
from ccmlib.dse_cluster import DseCluster
from ccmlib.node import Node
from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module
class ClusterFactory():
@staticmethod
def load(path, name):
cluster_path = os.path.join(path, name)
filename = os.path.join(cluster_path, 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.safe_load(f)
try:
install_dir = None
if 'install_dir' in data:
install_dir = data['install_dir']
repository.validate(install_dir)
if install_dir is None and 'cassandra_dir' in data:
install_dir = data['cassandra_dir']
repository.validate(install_dir)
cassandra_version = None
if 'cassandra_version' in data:
cassandra_version = LooseVersion(data['cassandra_version'])
if common.isDse(install_dir):
cluster = DseCluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
else:
cluster = Cluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
node_list = data['nodes']
seed_list = data['seeds']
if 'partitioner' in data:
cluster.partitioner = data['partitioner']
if 'config_options' in data:
cluster._config_options = data['config_options']
if 'dse_config_options' in data:
cluster._dse_config_options = data['dse_config_options']
if 'misc_config_options' in data:
cluster._misc_config_options = data['misc_config_options']
if 'log_level' in data:
cluster.__log_level = data['log_level']
if 'use_vnodes' in data:
cluster.use_vnodes = data['use_vnodes']
if 'datadirs' in data:
cluster.data_dir_count = int(data['datadirs'])
extension.load_from_cluster_config(cluster, data)
except KeyError as k:
raise common.LoadError("Error Loading " + filename + ", missing property:" + k)
for node_name in node_list:
cluster.nodes[node_name] = Node.load(cluster_path, node_name, cluster)
for seed in seed_list:
cluster.seeds.append(seed)
return cluster
|
from __future__ import absolute_import
import os
import yaml
from ccmlib import common, extension, repository
from ccmlib.cluster import Cluster
from ccmlib.dse_cluster import DseCluster
from ccmlib.node import Node
from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module
class ClusterFactory():
@staticmethod
def load(path, name):
cluster_path = os.path.join(path, name)
filename = os.path.join(cluster_path, 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.safe_load(f)
try:
install_dir = None
if 'install_dir' in data:
install_dir = data['install_dir']
repository.validate(install_dir)
if install_dir is None and 'cassandra_dir' in data:
install_dir = data['cassandra_dir']
repository.validate(install_dir)
cassandra_version = None
if 'cassandra_version' in data:
cassandra_version = LooseVersion(data['cassandra_version'])
if common.isDse(install_dir):
cluster = DseCluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
else:
cluster = Cluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
node_list = data['nodes']
seed_list = data['seeds']
if 'partitioner' in data:
cluster.partitioner = data['partitioner']
if 'config_options' in data:
cluster._config_options = data['config_options']
if 'dse_config_options' in data:
cluster._dse_config_options = data['dse_config_options']
if 'misc_config_options' in data:
cluster._misc_config_options = data['misc_config_options']
if 'log_level' in data:
cluster.__log_level = data['log_level']
if 'use_vnodes' in data:
cluster.use_vnodes = data['use_vnodes']
if 'datadirs' in data:
cluster.data_dir_count = int(data['datadirs'])
extension.load_from_cluster_config(cluster, data)
except KeyError as k:
raise common.LoadError("Error Loading " + filename + ", missing property:" + k)
for node_name in node_list:
cluster.nodes[node_name] = Node.load(cluster_path, node_name, cluster)
for seed in seed_list:
cluster.seeds.append(seed)
return cluster
| pt | 0.167908 | 1.945101 | 2 |
causalnex/structure/pytorch/dist_type/_base.py | Rishab26/causalnex | 1,523 | 14975 | # Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior.
"""
import itertools
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy as np
import torch
from causalnex.structure.structuremodel import StructureModel
class DistTypeBase(metaclass=ABCMeta):
"""Base class defining the distribution default behavior and interface"""
def __init__(self, idx: int):
"""
Default constructor for the DistTypeBase class.
Unless overridden, provides default behavior to all subclasses.
Args:
idx: Positional index in data passed to the NOTEARS algorithm
which correspond to this datatype.
"""
self.idx = idx
def get_columns(
self,
X: np.ndarray,
) -> np.ndarray:
"""
Gets the column(s) associated with the instantiated DistType.
Args:
X: Full dataset to be selected from.
Returns:
1d or 2d np.ndarray of columns.
"""
return X[:, self.idx]
# pylint: disable=no-self-use
# pylint: disable=unused-argument
def preprocess_X(self, X: np.ndarray, fit_transform: bool = True) -> np.ndarray:
"""
Overload this method to perform any required preprocessing of the data
matrix. This can include data conversion, column expansion etc.
Changes to the tabu parameters should also be done here.
**WARN** This preprocessing CANNOT reorder the columns of X.
Args:
X: The original passed-in data.
fit_transform: Whether the class first fits
then transforms the data, or just transforms.
Just transforming is used to preprocess new data after the
initial NOTEARS fit.
Returns:
Preprocessed X
"""
return X
# pylint: disable=no-self-use
def preprocess_tabu_edges(
self, tabu_edges: List[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""
Overload this method to perform any required preprocessing of the tabu_edges.
Args:
tabu_edges: The original tabu_edges.
Returns:
Preprocessed tabu_edges.
"""
return tabu_edges
# pylint: disable=no-self-use
def preprocess_tabu_nodes(self, tabu_nodes: List[int]) -> List[int]:
"""
Overload this method to perform any required preprocessing of the tabu_nodes.
Args:
tabu_nodes: The original tabu_nodes.
Returns:
Preprocessed tabu_nodes.
"""
return tabu_nodes
# pylint: disable=no-self-use
def update_idx_col(self, idx_col: Dict[int, str]) -> Dict[int, str]:
"""
Overload this method to update the idx_col dict with expanded colnames.
Args:
idx_col: The original index to column mapping.
Returns:
Updated index to column mapping.
"""
return idx_col
def add_to_node(self, sm: StructureModel) -> StructureModel:
"""
Adds self to a node of a structure model corresponding to self.idx.
Args:
sm: The input StructureModel
Returns:
Updated StructureModel
"""
sm.nodes[self.idx]["dist_type"] = self
return sm
# pylint: disable=no-self-use
def modify_h(self, square_weight_mat: torch.Tensor) -> torch.Tensor:
"""
Overload this method to apply updates to the W matrix in h(W).
Typically used to prevent spurious cycles when using expended columns.
Args:
square_weight_mat: The weight matrix used in h(W).
Returns:
Updated weight matrix used in h(W).
"""
return square_weight_mat
# pylint: disable=no-self-use
def collapse_adj(self, adj: np.ndarray) -> np.ndarray:
"""
Overload this method to apply updates to collapse the W matrix
of a multi-parameter distribution
Likely has the same impact as modify_h.
Args:
adj: The adjacency matrix.
Returns:
Updated adjacency matrix.
"""
return adj
@abstractmethod
def loss(self, X: torch.Tensor, X_hat: torch.Tensor) -> torch.Tensor:
"""
Args:
X: The original data passed into NOTEARS (i.e. the reconstruction target).
X_hat: The reconstructed data.
Returns:
Scalar pytorch tensor of the reconstruction loss between X and X_hat.
"""
raise NotImplementedError("Must implement the loss() method")
@abstractmethod
def inverse_link_function(self, X_hat: torch.Tensor) -> torch.Tensor:
"""
Convert the transformed data from the latent space to the original dtype
using the inverse link function.
Args:
X_hat: Reconstructed data in the latent space.
Returns:
Modified X_hat.
MUST be same shape as passed in data.
Projects the self.idx column from the latent space to the dist_type space.
"""
raise NotImplementedError("Must implement the inverse_link_function() method")
class ExpandColumnsMixin:
"""
Mixin class providing convenience methods for column expansion.
"""
@staticmethod
def _expand_columns(X: np.ndarray, new_columns: np.ndarray) -> np.ndarray:
"""
Expands the data matrix columns without reordering the indices.
Args:
X: Base dataset to expand.
new_columns: The columns to expand the dataset by.
Returns:
Expanded dataset.
"""
return np.hstack([X, new_columns])
@staticmethod
def update_tabu_edges(
idx_group: List[int],
tabu_edges: List[Tuple[int, int]],
tabu_idx_group: bool,
) -> List[Tuple[int, int]]:
"""
Tabu edges are:
1. all user defined connections to original feature column
2. all inter-feature connections (optional)
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_edges: The list of tabu_edges to be updated.
tabu_idx_group: Whether inter-group edges should also be considered tabu.
I.e if a result of a column expansion, often want to prevent edges being learned
between parameters.
Returns:
Updated tabu_edges
"""
if tabu_edges is None:
tabu_edges = []
# copy to prevent mutations
tabu_edges = deepcopy(tabu_edges)
# handle 1.
new_tabu_edges = []
# for each original tabu pair
for (i, j) in tabu_edges:
# idx_group[0] is the original column index
if i == idx_group[0]:
new_tabu_edges += [(idx, j) for idx in idx_group[1:]]
elif j == idx_group[0]:
new_tabu_edges += [(i, idx) for idx in idx_group[1:]]
# all new edges added to tabu_edges
tabu_edges += new_tabu_edges
# handle 2.
if tabu_idx_group:
# add on all pairwise permutations of particular feature group
# NOTE: permutations are needed for edge directionality
tabu_edges += list(itertools.permutations(idx_group, 2))
return tabu_edges
@staticmethod
def update_tabu_nodes(
idx_group: List[int], tabu_nodes: List[int]
) -> List[Tuple[int, int]]:
"""
Tabu nodes are:
1. all user defined connections to original feature column
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_nodes: The list of tabu_nodes to be updated.
Returns:
Updated tabu_nodes
"""
if tabu_nodes is None:
return tabu_nodes
# copy to prevent mutations
tabu_nodes = deepcopy(tabu_nodes)
new_tabu_nodes = []
for i in tabu_nodes:
# NOTE: the first element in the idx_group is guaranteed as self.idx
if i == idx_group[0]:
new_tabu_nodes += idx_group[1:]
# add on the new tabu nodes
tabu_nodes += new_tabu_nodes
return tabu_nodes
| # Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior.
"""
import itertools
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy as np
import torch
from causalnex.structure.structuremodel import StructureModel
class DistTypeBase(metaclass=ABCMeta):
"""Base class defining the distribution default behavior and interface"""
def __init__(self, idx: int):
"""
Default constructor for the DistTypeBase class.
Unless overridden, provides default behavior to all subclasses.
Args:
idx: Positional index in data passed to the NOTEARS algorithm
which correspond to this datatype.
"""
self.idx = idx
def get_columns(
self,
X: np.ndarray,
) -> np.ndarray:
"""
Gets the column(s) associated with the instantiated DistType.
Args:
X: Full dataset to be selected from.
Returns:
1d or 2d np.ndarray of columns.
"""
return X[:, self.idx]
# pylint: disable=no-self-use
# pylint: disable=unused-argument
def preprocess_X(self, X: np.ndarray, fit_transform: bool = True) -> np.ndarray:
"""
Overload this method to perform any required preprocessing of the data
matrix. This can include data conversion, column expansion etc.
Changes to the tabu parameters should also be done here.
**WARN** This preprocessing CANNOT reorder the columns of X.
Args:
X: The original passed-in data.
fit_transform: Whether the class first fits
then transforms the data, or just transforms.
Just transforming is used to preprocess new data after the
initial NOTEARS fit.
Returns:
Preprocessed X
"""
return X
# pylint: disable=no-self-use
def preprocess_tabu_edges(
self, tabu_edges: List[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""
Overload this method to perform any required preprocessing of the tabu_edges.
Args:
tabu_edges: The original tabu_edges.
Returns:
Preprocessed tabu_edges.
"""
return tabu_edges
# pylint: disable=no-self-use
def preprocess_tabu_nodes(self, tabu_nodes: List[int]) -> List[int]:
"""
Overload this method to perform any required preprocessing of the tabu_nodes.
Args:
tabu_nodes: The original tabu_nodes.
Returns:
Preprocessed tabu_nodes.
"""
return tabu_nodes
# pylint: disable=no-self-use
def update_idx_col(self, idx_col: Dict[int, str]) -> Dict[int, str]:
"""
Overload this method to update the idx_col dict with expanded colnames.
Args:
idx_col: The original index to column mapping.
Returns:
Updated index to column mapping.
"""
return idx_col
def add_to_node(self, sm: StructureModel) -> StructureModel:
"""
Adds self to a node of a structure model corresponding to self.idx.
Args:
sm: The input StructureModel
Returns:
Updated StructureModel
"""
sm.nodes[self.idx]["dist_type"] = self
return sm
# pylint: disable=no-self-use
def modify_h(self, square_weight_mat: torch.Tensor) -> torch.Tensor:
"""
Overload this method to apply updates to the W matrix in h(W).
Typically used to prevent spurious cycles when using expended columns.
Args:
square_weight_mat: The weight matrix used in h(W).
Returns:
Updated weight matrix used in h(W).
"""
return square_weight_mat
# pylint: disable=no-self-use
def collapse_adj(self, adj: np.ndarray) -> np.ndarray:
"""
Overload this method to apply updates to collapse the W matrix
of a multi-parameter distribution
Likely has the same impact as modify_h.
Args:
adj: The adjacency matrix.
Returns:
Updated adjacency matrix.
"""
return adj
@abstractmethod
def loss(self, X: torch.Tensor, X_hat: torch.Tensor) -> torch.Tensor:
"""
Args:
X: The original data passed into NOTEARS (i.e. the reconstruction target).
X_hat: The reconstructed data.
Returns:
Scalar pytorch tensor of the reconstruction loss between X and X_hat.
"""
raise NotImplementedError("Must implement the loss() method")
@abstractmethod
def inverse_link_function(self, X_hat: torch.Tensor) -> torch.Tensor:
"""
Convert the transformed data from the latent space to the original dtype
using the inverse link function.
Args:
X_hat: Reconstructed data in the latent space.
Returns:
Modified X_hat.
MUST be same shape as passed in data.
Projects the self.idx column from the latent space to the dist_type space.
"""
raise NotImplementedError("Must implement the inverse_link_function() method")
class ExpandColumnsMixin:
"""
Mixin class providing convenience methods for column expansion.
"""
@staticmethod
def _expand_columns(X: np.ndarray, new_columns: np.ndarray) -> np.ndarray:
"""
Expands the data matrix columns without reordering the indices.
Args:
X: Base dataset to expand.
new_columns: The columns to expand the dataset by.
Returns:
Expanded dataset.
"""
return np.hstack([X, new_columns])
@staticmethod
def update_tabu_edges(
idx_group: List[int],
tabu_edges: List[Tuple[int, int]],
tabu_idx_group: bool,
) -> List[Tuple[int, int]]:
"""
Tabu edges are:
1. all user defined connections to original feature column
2. all inter-feature connections (optional)
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_edges: The list of tabu_edges to be updated.
tabu_idx_group: Whether inter-group edges should also be considered tabu.
I.e if a result of a column expansion, often want to prevent edges being learned
between parameters.
Returns:
Updated tabu_edges
"""
if tabu_edges is None:
tabu_edges = []
# copy to prevent mutations
tabu_edges = deepcopy(tabu_edges)
# handle 1.
new_tabu_edges = []
# for each original tabu pair
for (i, j) in tabu_edges:
# idx_group[0] is the original column index
if i == idx_group[0]:
new_tabu_edges += [(idx, j) for idx in idx_group[1:]]
elif j == idx_group[0]:
new_tabu_edges += [(i, idx) for idx in idx_group[1:]]
# all new edges added to tabu_edges
tabu_edges += new_tabu_edges
# handle 2.
if tabu_idx_group:
# add on all pairwise permutations of particular feature group
# NOTE: permutations are needed for edge directionality
tabu_edges += list(itertools.permutations(idx_group, 2))
return tabu_edges
@staticmethod
def update_tabu_nodes(
idx_group: List[int], tabu_nodes: List[int]
) -> List[Tuple[int, int]]:
"""
Tabu nodes are:
1. all user defined connections to original feature column
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_nodes: The list of tabu_nodes to be updated.
Returns:
Updated tabu_nodes
"""
if tabu_nodes is None:
return tabu_nodes
# copy to prevent mutations
tabu_nodes = deepcopy(tabu_nodes)
new_tabu_nodes = []
for i in tabu_nodes:
# NOTE: the first element in the idx_group is guaranteed as self.idx
if i == idx_group[0]:
new_tabu_nodes += idx_group[1:]
# add on the new tabu nodes
tabu_nodes += new_tabu_nodes
return tabu_nodes
| pt | 0.199388 | 1.297901 | 1 |
src/cmdsh/utils.py | kotfu/cmdsh | 0 | 14976 | <gh_stars>0
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Utility functions (not classes)
"""
import inspect
import types
from typing import Callable
def validate_callable_param_count(func: Callable, count: int) -> None:
"""Ensure a function has the given number of parameters."""
signature = inspect.signature(func)
# validate that the callable has the right number of parameters
nparam = len(signature.parameters)
if nparam != count:
raise TypeError('{} has {} positional arguments, expected {}'.format(
func.__name__,
nparam,
count,
))
def validate_callable_argument(func, argnum, typ) -> None:
"""Validate that a certain argument of func is annotated for a specific type"""
signature = inspect.signature(func)
paramname = list(signature.parameters.keys())[argnum-1]
param = signature.parameters[paramname]
if param.annotation != typ:
raise TypeError('argument {} of {} has incompatible type {}, expected {}'.format(
argnum,
func.__name__,
param.annotation,
typ.__name__,
))
def validate_callable_return(func, typ) -> None:
"""Validate that func is annotated to return a specific type"""
signature = inspect.signature(func)
if typ:
typname = typ.__name__
else:
typname = 'None'
if signature.return_annotation != typ:
raise TypeError("{} must declare return a return type of '{}'".format(
func.__name__,
typname,
))
def rebind_method(method, obj) -> None:
"""Rebind method from one object to another
Call it something like this:
rebind_method(obj1, obj2.do_command)
This rebinds the ``do_command`` method from obj2 to obj1. Meaning
after this function call you can:
obj1.do_command()
This works only on instantiated objects, not on classes.
"""
#
# this is dark python magic
#
# if we were doing this in a hardcoded way, we might do:
#
# obj.method_name = types.MethodType(self.method_name.__func__, obj)
#
# TODO add force keyword parameter which defaults to false. If false, raise an
# exception if the method already exists on obj
method_name = method.__name__
setattr(obj, method_name, types.MethodType(method.__func__, obj))
def bind_function(func, obj) -> None:
"""Bind a function to an object
You must define func with a ``self`` parameter, which is gonna look wierd:
def myfunc(self, param):
return param
shell = cmdsh.Shell()
utils.bind_function(myfunc, shell)
You can use this function to bind a function to a class, so that all future
objects of that class have the method:
cmdsh.utils.bind_function(cmdsh.parsers.SimpleParser.parse, cmdsh.Shell)
"""
#
# this is dark python magic
#
# if we were doing this in a hardcoded way, we would:
#
# obj.method_name = types.Methodtype(func, obj)
#
func_name = func.__name__
setattr(obj, func_name, types.MethodType(func, obj))
# TODO write bind_attribute()
| #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Utility functions (not classes)
"""
import inspect
import types
from typing import Callable
def validate_callable_param_count(func: Callable, count: int) -> None:
"""Ensure a function has the given number of parameters."""
signature = inspect.signature(func)
# validate that the callable has the right number of parameters
nparam = len(signature.parameters)
if nparam != count:
raise TypeError('{} has {} positional arguments, expected {}'.format(
func.__name__,
nparam,
count,
))
def validate_callable_argument(func, argnum, typ) -> None:
"""Validate that a certain argument of func is annotated for a specific type"""
signature = inspect.signature(func)
paramname = list(signature.parameters.keys())[argnum-1]
param = signature.parameters[paramname]
if param.annotation != typ:
raise TypeError('argument {} of {} has incompatible type {}, expected {}'.format(
argnum,
func.__name__,
param.annotation,
typ.__name__,
))
def validate_callable_return(func, typ) -> None:
"""Validate that func is annotated to return a specific type"""
signature = inspect.signature(func)
if typ:
typname = typ.__name__
else:
typname = 'None'
if signature.return_annotation != typ:
raise TypeError("{} must declare return a return type of '{}'".format(
func.__name__,
typname,
))
def rebind_method(method, obj) -> None:
"""Rebind method from one object to another
Call it something like this:
rebind_method(obj1, obj2.do_command)
This rebinds the ``do_command`` method from obj2 to obj1. Meaning
after this function call you can:
obj1.do_command()
This works only on instantiated objects, not on classes.
"""
#
# this is dark python magic
#
# if we were doing this in a hardcoded way, we might do:
#
# obj.method_name = types.MethodType(self.method_name.__func__, obj)
#
# TODO add force keyword parameter which defaults to false. If false, raise an
# exception if the method already exists on obj
method_name = method.__name__
setattr(obj, method_name, types.MethodType(method.__func__, obj))
def bind_function(func, obj) -> None:
"""Bind a function to an object
You must define func with a ``self`` parameter, which is gonna look wierd:
def myfunc(self, param):
return param
shell = cmdsh.Shell()
utils.bind_function(myfunc, shell)
You can use this function to bind a function to a class, so that all future
objects of that class have the method:
cmdsh.utils.bind_function(cmdsh.parsers.SimpleParser.parse, cmdsh.Shell)
"""
#
# this is dark python magic
#
# if we were doing this in a hardcoded way, we would:
#
# obj.method_name = types.Methodtype(func, obj)
#
func_name = func.__name__
setattr(obj, func_name, types.MethodType(func, obj))
# TODO write bind_attribute() | pt | 0.207074 | 2.542173 | 3 |
tensorflow/python/tpu/tpu_outside_compilation_test.py | Arushacked/tensorflow | 78 | 14977 | <filename>tensorflow/python/tpu/tpu_outside_compilation_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU outside compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
return tpu_lib.TPUStrategy(resolver)
class TpuOutsideCompilationTest(test.TestCase):
def testResourceVariableAssignOnHost(self):
strategy = get_tpu_strategy()
with strategy.scope():
v = variables.Variable(
0.0, aggregation=variables.VariableAggregation.MEAN)
v2 = variables.Variable(0.0, aggregation=variables.VariableAggregation.MEAN)
def assign_fn():
v2.assign_add(4.0)
@def_function.function
def train_step():
def assign_add():
v.assign_add(2.0)
tpu.outside_compilation(assign_fn)
v.assign_add(3.0)
strategy.run(assign_add)
return
train_step()
self.assertAllEqual(4.0 * strategy.num_replicas_in_sync, v2.numpy())
self.assertAllEqual(5.0, v.numpy())
def testHostInputOnly(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOutput(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output = tpu.outside_compilation(outside_fn, x2)
return output
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowIf(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
if x < 50.0:
return tpu.outside_compilation(outside_fn, x2)
else:
return x2
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowWhile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
while x2 < 50.0:
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
if __name__ == "__main__":
test.main()
| <filename>tensorflow/python/tpu/tpu_outside_compilation_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU outside compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
return tpu_lib.TPUStrategy(resolver)
class TpuOutsideCompilationTest(test.TestCase):
def testResourceVariableAssignOnHost(self):
strategy = get_tpu_strategy()
with strategy.scope():
v = variables.Variable(
0.0, aggregation=variables.VariableAggregation.MEAN)
v2 = variables.Variable(0.0, aggregation=variables.VariableAggregation.MEAN)
def assign_fn():
v2.assign_add(4.0)
@def_function.function
def train_step():
def assign_add():
v.assign_add(2.0)
tpu.outside_compilation(assign_fn)
v.assign_add(3.0)
strategy.run(assign_add)
return
train_step()
self.assertAllEqual(4.0 * strategy.num_replicas_in_sync, v2.numpy())
self.assertAllEqual(5.0, v.numpy())
def testHostInputOnly(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOutput(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output = tpu.outside_compilation(outside_fn, x2)
return output
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowIf(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
if x < 50.0:
return tpu.outside_compilation(outside_fn, x2)
else:
return x2
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowWhile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
while x2 < 50.0:
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
if __name__ == "__main__":
test.main()
| it | 0.186161 | 2.037626 | 2 |
Source/Oyooni/Text Recognition/server.py | Oyooni5245/Oyooni | 0 | 14978 | <gh_stars>0
from flask import Flask, request
from flask_restful import Resource, Api
from test import get_models, getTextFromImage
from testDocument import getText
from time import time
app = Flask(__name__)
api = Api(app)
net, refine_net = get_models()
class TextRecognizerService(Resource):
def post(self):
try:
json = request.get_json()
image_path = json["ImagePath"]
isDocument = bool(json["IsDocument"])
if isDocument == False:
start = time()
brand_name, texts, language = getTextFromImage(
image_path, net, refine_net)
end = time()
return {
"brand_name": brand_name,
"texts": texts,
"language": language,
"inference_time": end - start
}, 200
else:
text, language = getText(image_path, 'fullDocument.json')
return {
"text": text,
"language": language
}, 200
except Exception as e:
return {
'message': e
}, 501
api.add_resource(TextRecognizerService, "/recognize-text")
if __name__ == "__main__":
port = 5006
app.run(debug=True, port=port)
| from flask import Flask, request
from flask_restful import Resource, Api
from test import get_models, getTextFromImage
from testDocument import getText
from time import time
app = Flask(__name__)
api = Api(app)
net, refine_net = get_models()
class TextRecognizerService(Resource):
def post(self):
try:
json = request.get_json()
image_path = json["ImagePath"]
isDocument = bool(json["IsDocument"])
if isDocument == False:
start = time()
brand_name, texts, language = getTextFromImage(
image_path, net, refine_net)
end = time()
return {
"brand_name": brand_name,
"texts": texts,
"language": language,
"inference_time": end - start
}, 200
else:
text, language = getText(image_path, 'fullDocument.json')
return {
"text": text,
"language": language
}, 200
except Exception as e:
return {
'message': e
}, 501
api.add_resource(TextRecognizerService, "/recognize-text")
if __name__ == "__main__":
port = 5006
app.run(debug=True, port=port) | none | 1 | 2.483673 | 2 |
varats/varats/plots/blame_interaction_graph_plots.py | Kaufi-Jonas/VaRA-Tool-Suite | 0 | 14979 | """Module for BlameInteractionGraph plots."""
import typing as tp
from datetime import datetime
from pathlib import Path
import click
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import plotly.offline as offply
from matplotlib import style
from varats.data.reports.blame_interaction_graph import (
create_blame_interaction_graph,
CIGNodeAttrs,
CIGEdgeAttrs,
AIGNodeAttrs,
CAIGNodeAttrs,
)
from varats.data.reports.blame_report import BlameReport
from varats.mapping.commit_map import get_commit_map
from varats.paper_mgmt.case_study import (
newest_processed_revision_for_case_study,
)
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plot.plots import (
PlotGenerator,
REQUIRE_CASE_STUDY,
REQUIRE_REVISION,
)
from varats.plots.chord_plot_utils import (
make_chord_plot,
make_arc_plot,
NodeTy,
ChordPlotNodeInfo,
ChordPlotEdgeInfo,
ArcPlotEdgeInfo,
ArcPlotNodeInfo,
)
from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option
from varats.utils.git_util import (
CommitRepoPair,
create_commit_lookup_helper,
UNCOMMITTED_COMMIT_HASH,
FullCommitHash,
ShortCommitHash,
)
class CommitInteractionGraphPlot(Plot, plot_name='cig_plot'):
"""Creates a dot file for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
# Nothing to do here.
pass
def save(self, plot_dir: Path, filetype: str = 'svg') -> None:
project_name = self.plot_kwargs["project"]
revision = self.plot_kwargs["revision"]
cig = create_blame_interaction_graph(project_name, revision
).commit_interaction_graph()
nx.set_node_attributes(
cig, {node: cig.nodes[node]["commit_hash"] for node in cig.nodes},
"label"
)
# pylint: disable=import-outside-toplevel
from networkx.drawing.nx_agraph import write_dot
write_dot(cig, plot_dir / self.plot_file_name("dot"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CommitInteractionGraphPlotGenerator(
PlotGenerator,
generator_name="cig-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Plot a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphPlot(self.plot_config, **self.plot_kwargs)
]
NodeInfoTy = tp.TypeVar("NodeInfoTy", ChordPlotNodeInfo, ArcPlotNodeInfo)
EdgeInfoTy = tp.TypeVar("EdgeInfoTy", ChordPlotEdgeInfo, ArcPlotEdgeInfo)
def _prepare_cig_plotly(
project_name: str, revision: FullCommitHash,
create_node_info: tp.Callable[[NodeTy, CommitRepoPair, nx.DiGraph],
NodeInfoTy],
create_edge_info: tp.Callable[[CommitRepoPair, CommitRepoPair, int],
EdgeInfoTy]
) -> tp.Tuple[tp.List[tp.Tuple[NodeTy, NodeInfoTy]], tp.List[tp.Tuple[
NodeTy, NodeTy, EdgeInfoTy]]]:
commit_lookup = create_commit_lookup_helper(project_name)
cig = create_blame_interaction_graph(project_name,
revision).commit_interaction_graph()
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
commit = commit_lookup(node)
if not commit:
return False
# make filter configurable
return datetime.utcfromtimestamp(commit.commit_time
) >= datetime(2015, 1, 1)
nodes: tp.List[tp.Tuple[NodeTy, NodeInfoTy]] = []
node_meta: tp.Dict[NodeTy, CommitRepoPair] = {}
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
node_meta[node] = commit
nodes.append((node, create_node_info(node, commit, cig)))
nodes = sorted(
nodes, key=lambda x: int(commit_lookup(node_meta[x[0]]).commit_time)
)
edges: tp.List[tp.Tuple[NodeTy, NodeTy, EdgeInfoTy]] = []
for source, sink in cig.edges:
amount = tp.cast(CIGEdgeAttrs, cig[source][sink])["amount"]
source_commit = tp.cast(CIGNodeAttrs, cig.nodes[source])["commit"]
sink_commit = tp.cast(CIGNodeAttrs, cig.nodes[sink])["commit"]
if not filter_nodes(source_commit) or not filter_nodes(sink_commit):
continue
edges.append((
source, sink, create_edge_info(source_commit, sink_commit, amount)
))
return nodes, edges
class CommitInteractionGraphChordPlot(Plot, plot_name='cig_chord_plot'):
"""Chord plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ChordPlotNodeInfo:
del node
del cig
return {"info": commit.commit_hash.short_hash, "color": 1}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ChordPlotEdgeInfo:
return {
"size": amount,
"color": 1,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_chord_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGChordPlotGenerator(
PlotGenerator,
generator_name="cig-chord-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates a chord plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphChordPlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitInteractionGraphArcPlot(Plot, plot_name='cig_arc_plot'):
"""Arc plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ArcPlotNodeInfo:
return {
"info": commit.commit_hash.short_hash,
"size": cig.degree(node),
"fill_color": cig.out_degree(node),
"line_color": cig.in_degree(node)
}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ArcPlotEdgeInfo:
return {
"size": amount,
"color": amount,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_arc_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGArcPlotGenerator(
PlotGenerator,
generator_name="cig-arc-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates an arc plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphArcPlot(self.plot_config, **self.plot_kwargs)
]
OPTIONAL_SORT_METHOD: CLIOptionTy = make_cli_option(
"--sort-by",
type=click.Choice(["degree", "time"]),
default="degree",
required=False,
help="Sort method for commit interaction graph nodes."
)
class CommitInteractionGraphNodeDegreePlot(Plot, plot_name='cig_node_degrees'):
"""
Plot node degrees of a commit interaction graph.
Additional arguments:
- sort: criteria to sort the revisions [degree, time]
"""
def plot(self, view_mode: bool) -> None:
sort = self.plot_kwargs["sort"]
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
xlabel = ""
if sort == "time":
xlabel = "Time (old to new)"
elif sort == "degree":
xlabel = "Commits"
axes.set_xlabel(xlabel)
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
cig = create_blame_interaction_graph(case_study.project_name, revision
).commit_interaction_graph()
commit_lookup = create_commit_lookup_helper(case_study.project_name)
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
return bool(commit_lookup(node))
def commit_time(node: CommitRepoPair) -> datetime:
return datetime.utcfromtimestamp(commit_lookup(node).commit_time)
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
nodes.append(({
"commit_hash": commit.commit_hash,
"commit_time": commit_time(commit),
"node_degree": cig.degree(node),
"node_out_degree": cig.out_degree(node),
"node_in_degree": cig.in_degree(node),
}))
data = pd.DataFrame(nodes)
if sort == "time":
data.sort_values(by="commit_time", inplace=True)
node_degrees = data.loc[:, ["commit_hash", "node_degree"]]
node_out_degrees = data.loc[:, ["commit_hash", "node_out_degree"]]
node_in_degrees = data.loc[:, ["commit_hash", "node_in_degree"]]
if sort == "degree":
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="cig-node-degrees",
options=[REQUIRE_CASE_STUDY, OPTIONAL_SORT_METHOD]
):
"""Generates a plot of node degrees of a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class AuthorInteractionGraphNodeDegreePlot(Plot, plot_name='aig_node_degrees'):
"""Plot node degrees of a author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Author Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
axes.set_xlabel("Authors")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
aig = create_blame_interaction_graph(project_name, revision
).author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in aig.nodes:
node_attrs = tp.cast(AIGNodeAttrs, aig.nodes[node])
author = node_attrs["author"]
nodes.append(({
"author": author,
"node_degree": aig.degree(node),
"node_out_degree": aig.out_degree(node),
"node_in_degree": aig.in_degree(node),
}))
data = pd.DataFrame(nodes)
node_degrees = data.loc[:, ["author", "node_degree"]]
node_out_degrees = data.loc[:, ["author", "node_out_degree"]]
node_in_degrees = data.loc[:, ["author", "node_in_degree"]]
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class AIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="aig-node-degrees",
options=[REQUIRE_CASE_STUDY]
):
"""Generates a plot of node degrees of a author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
AuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitAuthorInteractionGraphNodeDegreePlot(
Plot, plot_name='caig_node_degrees'
):
"""Plot node degrees of commits in a commit-author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit-Author Interaction Graph - # Interacting Authors")
axes.set_title(case_study.project_name)
axes.set_ylabel("Authors")
axes.set_xlabel("Commits")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
caig = create_blame_interaction_graph(project_name, revision
).commit_author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in caig.nodes:
node_attrs = tp.cast(CAIGNodeAttrs, caig.nodes[node])
commit = node_attrs["commit"]
if commit:
nodes.append(({
"commit": commit.commit_hash,
"num_authors": caig.degree(node)
}))
data = pd.DataFrame(nodes)
num_authors = data.loc[:, ["commit", "num_authors"]]
num_authors.sort_values(by="num_authors", inplace=True)
axes.plot(num_authors["num_authors"].values)
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CAIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="caig-node-degrees",
options=[
REQUIRE_CASE_STUDY,
]
):
"""Generates a plot of node degrees of a commit-author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitAuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
| """Module for BlameInteractionGraph plots."""
import typing as tp
from datetime import datetime
from pathlib import Path
import click
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import plotly.offline as offply
from matplotlib import style
from varats.data.reports.blame_interaction_graph import (
create_blame_interaction_graph,
CIGNodeAttrs,
CIGEdgeAttrs,
AIGNodeAttrs,
CAIGNodeAttrs,
)
from varats.data.reports.blame_report import BlameReport
from varats.mapping.commit_map import get_commit_map
from varats.paper_mgmt.case_study import (
newest_processed_revision_for_case_study,
)
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plot.plots import (
PlotGenerator,
REQUIRE_CASE_STUDY,
REQUIRE_REVISION,
)
from varats.plots.chord_plot_utils import (
make_chord_plot,
make_arc_plot,
NodeTy,
ChordPlotNodeInfo,
ChordPlotEdgeInfo,
ArcPlotEdgeInfo,
ArcPlotNodeInfo,
)
from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option
from varats.utils.git_util import (
CommitRepoPair,
create_commit_lookup_helper,
UNCOMMITTED_COMMIT_HASH,
FullCommitHash,
ShortCommitHash,
)
class CommitInteractionGraphPlot(Plot, plot_name='cig_plot'):
"""Creates a dot file for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
# Nothing to do here.
pass
def save(self, plot_dir: Path, filetype: str = 'svg') -> None:
project_name = self.plot_kwargs["project"]
revision = self.plot_kwargs["revision"]
cig = create_blame_interaction_graph(project_name, revision
).commit_interaction_graph()
nx.set_node_attributes(
cig, {node: cig.nodes[node]["commit_hash"] for node in cig.nodes},
"label"
)
# pylint: disable=import-outside-toplevel
from networkx.drawing.nx_agraph import write_dot
write_dot(cig, plot_dir / self.plot_file_name("dot"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CommitInteractionGraphPlotGenerator(
PlotGenerator,
generator_name="cig-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Plot a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphPlot(self.plot_config, **self.plot_kwargs)
]
NodeInfoTy = tp.TypeVar("NodeInfoTy", ChordPlotNodeInfo, ArcPlotNodeInfo)
EdgeInfoTy = tp.TypeVar("EdgeInfoTy", ChordPlotEdgeInfo, ArcPlotEdgeInfo)
def _prepare_cig_plotly(
project_name: str, revision: FullCommitHash,
create_node_info: tp.Callable[[NodeTy, CommitRepoPair, nx.DiGraph],
NodeInfoTy],
create_edge_info: tp.Callable[[CommitRepoPair, CommitRepoPair, int],
EdgeInfoTy]
) -> tp.Tuple[tp.List[tp.Tuple[NodeTy, NodeInfoTy]], tp.List[tp.Tuple[
NodeTy, NodeTy, EdgeInfoTy]]]:
commit_lookup = create_commit_lookup_helper(project_name)
cig = create_blame_interaction_graph(project_name,
revision).commit_interaction_graph()
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
commit = commit_lookup(node)
if not commit:
return False
# make filter configurable
return datetime.utcfromtimestamp(commit.commit_time
) >= datetime(2015, 1, 1)
nodes: tp.List[tp.Tuple[NodeTy, NodeInfoTy]] = []
node_meta: tp.Dict[NodeTy, CommitRepoPair] = {}
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
node_meta[node] = commit
nodes.append((node, create_node_info(node, commit, cig)))
nodes = sorted(
nodes, key=lambda x: int(commit_lookup(node_meta[x[0]]).commit_time)
)
edges: tp.List[tp.Tuple[NodeTy, NodeTy, EdgeInfoTy]] = []
for source, sink in cig.edges:
amount = tp.cast(CIGEdgeAttrs, cig[source][sink])["amount"]
source_commit = tp.cast(CIGNodeAttrs, cig.nodes[source])["commit"]
sink_commit = tp.cast(CIGNodeAttrs, cig.nodes[sink])["commit"]
if not filter_nodes(source_commit) or not filter_nodes(sink_commit):
continue
edges.append((
source, sink, create_edge_info(source_commit, sink_commit, amount)
))
return nodes, edges
class CommitInteractionGraphChordPlot(Plot, plot_name='cig_chord_plot'):
"""Chord plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ChordPlotNodeInfo:
del node
del cig
return {"info": commit.commit_hash.short_hash, "color": 1}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ChordPlotEdgeInfo:
return {
"size": amount,
"color": 1,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_chord_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGChordPlotGenerator(
PlotGenerator,
generator_name="cig-chord-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates a chord plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphChordPlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitInteractionGraphArcPlot(Plot, plot_name='cig_arc_plot'):
"""Arc plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ArcPlotNodeInfo:
return {
"info": commit.commit_hash.short_hash,
"size": cig.degree(node),
"fill_color": cig.out_degree(node),
"line_color": cig.in_degree(node)
}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ArcPlotEdgeInfo:
return {
"size": amount,
"color": amount,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_arc_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGArcPlotGenerator(
PlotGenerator,
generator_name="cig-arc-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates an arc plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphArcPlot(self.plot_config, **self.plot_kwargs)
]
OPTIONAL_SORT_METHOD: CLIOptionTy = make_cli_option(
"--sort-by",
type=click.Choice(["degree", "time"]),
default="degree",
required=False,
help="Sort method for commit interaction graph nodes."
)
class CommitInteractionGraphNodeDegreePlot(Plot, plot_name='cig_node_degrees'):
"""
Plot node degrees of a commit interaction graph.
Additional arguments:
- sort: criteria to sort the revisions [degree, time]
"""
def plot(self, view_mode: bool) -> None:
sort = self.plot_kwargs["sort"]
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
xlabel = ""
if sort == "time":
xlabel = "Time (old to new)"
elif sort == "degree":
xlabel = "Commits"
axes.set_xlabel(xlabel)
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
cig = create_blame_interaction_graph(case_study.project_name, revision
).commit_interaction_graph()
commit_lookup = create_commit_lookup_helper(case_study.project_name)
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
return bool(commit_lookup(node))
def commit_time(node: CommitRepoPair) -> datetime:
return datetime.utcfromtimestamp(commit_lookup(node).commit_time)
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
nodes.append(({
"commit_hash": commit.commit_hash,
"commit_time": commit_time(commit),
"node_degree": cig.degree(node),
"node_out_degree": cig.out_degree(node),
"node_in_degree": cig.in_degree(node),
}))
data = pd.DataFrame(nodes)
if sort == "time":
data.sort_values(by="commit_time", inplace=True)
node_degrees = data.loc[:, ["commit_hash", "node_degree"]]
node_out_degrees = data.loc[:, ["commit_hash", "node_out_degree"]]
node_in_degrees = data.loc[:, ["commit_hash", "node_in_degree"]]
if sort == "degree":
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="cig-node-degrees",
options=[REQUIRE_CASE_STUDY, OPTIONAL_SORT_METHOD]
):
"""Generates a plot of node degrees of a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class AuthorInteractionGraphNodeDegreePlot(Plot, plot_name='aig_node_degrees'):
"""Plot node degrees of a author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Author Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
axes.set_xlabel("Authors")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
aig = create_blame_interaction_graph(project_name, revision
).author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in aig.nodes:
node_attrs = tp.cast(AIGNodeAttrs, aig.nodes[node])
author = node_attrs["author"]
nodes.append(({
"author": author,
"node_degree": aig.degree(node),
"node_out_degree": aig.out_degree(node),
"node_in_degree": aig.in_degree(node),
}))
data = pd.DataFrame(nodes)
node_degrees = data.loc[:, ["author", "node_degree"]]
node_out_degrees = data.loc[:, ["author", "node_out_degree"]]
node_in_degrees = data.loc[:, ["author", "node_in_degree"]]
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class AIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="aig-node-degrees",
options=[REQUIRE_CASE_STUDY]
):
"""Generates a plot of node degrees of a author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
AuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitAuthorInteractionGraphNodeDegreePlot(
Plot, plot_name='caig_node_degrees'
):
"""Plot node degrees of commits in a commit-author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit-Author Interaction Graph - # Interacting Authors")
axes.set_title(case_study.project_name)
axes.set_ylabel("Authors")
axes.set_xlabel("Commits")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
caig = create_blame_interaction_graph(project_name, revision
).commit_author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in caig.nodes:
node_attrs = tp.cast(CAIGNodeAttrs, caig.nodes[node])
commit = node_attrs["commit"]
if commit:
nodes.append(({
"commit": commit.commit_hash,
"num_authors": caig.degree(node)
}))
data = pd.DataFrame(nodes)
num_authors = data.loc[:, ["commit", "num_authors"]]
num_authors.sort_values(by="num_authors", inplace=True)
axes.plot(num_authors["num_authors"].values)
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CAIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="caig-node-degrees",
options=[
REQUIRE_CASE_STUDY,
]
):
"""Generates a plot of node degrees of a commit-author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitAuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
| pt | 0.243368 | 2.127263 | 2 |
Day 7/Day 7.py | Dullstar/Advent-Of-Code-2020 | 0 | 14980 | import re
class Rule:
def __init__(self, line):
line = line.strip().split(" contain ")
line[1] = line[1].strip(".").split(", ")
self.contents = {}
for item in line[1]:
# Grab that number out in front
regex = re.compile(r"[0-9]+")
# If we didn't find one that means it's no bags inside
if match := regex.match(item):
quantity = int(item[match.span()[0]:match.span()[1]])
# The +1 deals with the space
bag_type = item[match.span()[1] + 1:]
if quantity > 1:
# This gets rid of the s if it's plural
bag_type = bag_type[:-1]
self.contents[bag_type] = quantity
# The s makes things irritating so I want it gone
self.bag_type = line[0][:-1]
def contains_directly(self, bag_type: str):
return bag_type in self.contents
# Warning: recursive
def contains(self, bag_type: str, rule_dict: dict):
if self.contains_directly(bag_type):
return True
else:
for bag in self.contents:
if bag in rule_dict:
if rule_dict[bag].contains(bag_type, rule_dict):
return True
else:
print("An unexpected bag was discovered!")
return False
def count_internal_bags(self, rule_dict: dict):
internal_bags = 0
for bag in self.contents:
# count these bags...
internal_bags += self.contents[bag] # recall that this value represents the quantity
# ...and count the bags inside of it
internal_bags += rule_dict[bag].count_internal_bags(rule_dict) * self.contents[bag]
return internal_bags
def parse_input(filename: str):
with open(filename, "r") as file:
rules = {}
for line in file:
rule = Rule(line)
print(f"{rule.bag_type} contains {rule.contents}")
rules[rule.bag_type] = rule
return rules
def main():
rule_dict = parse_input("input.txt")
shiny_gold = 0
for rule_entry in rule_dict.keys():
rule = rule_dict[rule_entry]
if rule.contains("shiny gold bag", rule_dict):
print(f"Found {rule.contents} in {rule.bag_type}")
shiny_gold += 1
print("\n")
print(f"Found {shiny_gold} bags containing at least one shiny gold bag.")
print(f"A shiny gold bag contains {rule_dict['shiny gold bag'].count_internal_bags(rule_dict)} bags.")
if __name__ == "__main__":
main()
| import re
class Rule:
def __init__(self, line):
line = line.strip().split(" contain ")
line[1] = line[1].strip(".").split(", ")
self.contents = {}
for item in line[1]:
# Grab that number out in front
regex = re.compile(r"[0-9]+")
# If we didn't find one that means it's no bags inside
if match := regex.match(item):
quantity = int(item[match.span()[0]:match.span()[1]])
# The +1 deals with the space
bag_type = item[match.span()[1] + 1:]
if quantity > 1:
# This gets rid of the s if it's plural
bag_type = bag_type[:-1]
self.contents[bag_type] = quantity
# The s makes things irritating so I want it gone
self.bag_type = line[0][:-1]
def contains_directly(self, bag_type: str):
return bag_type in self.contents
# Warning: recursive
def contains(self, bag_type: str, rule_dict: dict):
if self.contains_directly(bag_type):
return True
else:
for bag in self.contents:
if bag in rule_dict:
if rule_dict[bag].contains(bag_type, rule_dict):
return True
else:
print("An unexpected bag was discovered!")
return False
def count_internal_bags(self, rule_dict: dict):
internal_bags = 0
for bag in self.contents:
# count these bags...
internal_bags += self.contents[bag] # recall that this value represents the quantity
# ...and count the bags inside of it
internal_bags += rule_dict[bag].count_internal_bags(rule_dict) * self.contents[bag]
return internal_bags
def parse_input(filename: str):
with open(filename, "r") as file:
rules = {}
for line in file:
rule = Rule(line)
print(f"{rule.bag_type} contains {rule.contents}")
rules[rule.bag_type] = rule
return rules
def main():
rule_dict = parse_input("input.txt")
shiny_gold = 0
for rule_entry in rule_dict.keys():
rule = rule_dict[rule_entry]
if rule.contains("shiny gold bag", rule_dict):
print(f"Found {rule.contents} in {rule.bag_type}")
shiny_gold += 1
print("\n")
print(f"Found {shiny_gold} bags containing at least one shiny gold bag.")
print(f"A shiny gold bag contains {rule_dict['shiny gold bag'].count_internal_bags(rule_dict)} bags.")
if __name__ == "__main__":
main()
| pt | 0.117186 | 3.573402 | 4 |
sdk/python/pulumi_aws_native/apigateway/outputs.py | AaronFriel/pulumi-aws-native | 29 | 14981 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ApiKeyStageKey',
'ApiKeyTag',
'ClientCertificateTag',
'DeploymentAccessLogSetting',
'DeploymentCanarySetting',
'DeploymentCanarySettings',
'DeploymentMethodSetting',
'DeploymentStageDescription',
'DeploymentTag',
'DocumentationPartLocation',
'DomainNameEndpointConfiguration',
'DomainNameMutualTlsAuthentication',
'DomainNameTag',
'MethodIntegration',
'MethodIntegrationResponse',
'MethodResponse',
'RestApiEndpointConfiguration',
'RestApiS3Location',
'RestApiTag',
'StageAccessLogSetting',
'StageCanarySetting',
'StageMethodSetting',
'StageTag',
'UsagePlanApiStage',
'UsagePlanQuotaSettings',
'UsagePlanTag',
'UsagePlanThrottleSettings',
'VpcLinkTag',
]
@pulumi.output_type
class ApiKeyStageKey(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "restApiId":
suggest = "rest_api_id"
elif key == "stageName":
suggest = "stage_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiKeyStageKey. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiKeyStageKey.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiKeyStageKey.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
rest_api_id: Optional[str] = None,
stage_name: Optional[str] = None):
"""
:param str rest_api_id: The ID of a RestApi resource that includes the stage with which you want to associate the API key.
:param str stage_name: The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property.
"""
if rest_api_id is not None:
pulumi.set(__self__, "rest_api_id", rest_api_id)
if stage_name is not None:
pulumi.set(__self__, "stage_name", stage_name)
@property
@pulumi.getter(name="restApiId")
def rest_api_id(self) -> Optional[str]:
"""
The ID of a RestApi resource that includes the stage with which you want to associate the API key.
"""
return pulumi.get(self, "rest_api_id")
@property
@pulumi.getter(name="stageName")
def stage_name(self) -> Optional[str]:
"""
The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property.
"""
return pulumi.get(self, "stage_name")
@pulumi.output_type
class ApiKeyTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ClientCertificateTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class DeploymentAccessLogSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "destinationArn":
suggest = "destination_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentAccessLogSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentAccessLogSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentAccessLogSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
destination_arn: Optional[str] = None,
format: Optional[str] = None):
"""
:param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-.
:param str format: A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.
"""
if destination_arn is not None:
pulumi.set(__self__, "destination_arn", destination_arn)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter(name="destinationArn")
def destination_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-.
"""
return pulumi.get(self, "destination_arn")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.
"""
return pulumi.get(self, "format")
@pulumi.output_type
class DeploymentCanarySetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentCanarySetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentCanarySetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
:param float percent_traffic: The percent (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache or not.
"""
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percent (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache or not.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class DeploymentCanarySettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentCanarySettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentCanarySettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
:param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache.
"""
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percentage (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class DeploymentMethodSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "httpMethod":
suggest = "http_method"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "resourcePath":
suggest = "resource_path"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentMethodSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentMethodSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentMethodSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
data_trace_enabled: Optional[bool] = None,
http_method: Optional[str] = None,
logging_level: Optional[str] = None,
metrics_enabled: Optional[bool] = None,
resource_path: Optional[str] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None):
"""
:param bool cache_data_encrypted: Indicates whether the cached responses are encrypted
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str http_method: The HTTP method.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
Indicates whether the cached responses are encrypted
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
The HTTP method.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[str]:
"""
The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash.
"""
return pulumi.get(self, "resource_path")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@pulumi.output_type
class DeploymentStageDescription(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessLogSetting":
suggest = "access_log_setting"
elif key == "cacheClusterEnabled":
suggest = "cache_cluster_enabled"
elif key == "cacheClusterSize":
suggest = "cache_cluster_size"
elif key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "canarySetting":
suggest = "canary_setting"
elif key == "clientCertificateId":
suggest = "client_certificate_id"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "documentationVersion":
suggest = "documentation_version"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "methodSettings":
suggest = "method_settings"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
elif key == "tracingEnabled":
suggest = "tracing_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentStageDescription. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentStageDescription.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentStageDescription.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_log_setting: Optional['outputs.DeploymentAccessLogSetting'] = None,
cache_cluster_enabled: Optional[bool] = None,
cache_cluster_size: Optional[str] = None,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
canary_setting: Optional['outputs.DeploymentCanarySetting'] = None,
client_certificate_id: Optional[str] = None,
data_trace_enabled: Optional[bool] = None,
description: Optional[str] = None,
documentation_version: Optional[str] = None,
logging_level: Optional[str] = None,
method_settings: Optional[Sequence['outputs.DeploymentMethodSetting']] = None,
metrics_enabled: Optional[bool] = None,
tags: Optional[Sequence['outputs.DeploymentTag']] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None,
tracing_enabled: Optional[bool] = None,
variables: Optional[Any] = None):
"""
:param 'DeploymentAccessLogSetting' access_log_setting: Specifies settings for logging access in this stage.
:param bool cache_cluster_enabled: Indicates whether cache clustering is enabled for the stage.
:param str cache_cluster_size: The size of the stage's cache cluster.
:param bool cache_data_encrypted: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param 'DeploymentCanarySetting' canary_setting: Specifies settings for the canary deployment in this stage.
:param str client_certificate_id: The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str description: A description of the purpose of the stage.
:param str documentation_version: The version identifier of the API documentation snapshot.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
:param Sequence['DeploymentMethodSetting'] method_settings: Configures settings for all of the stage's methods.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param Sequence['DeploymentTag'] tags: An array of arbitrary tags (key-value pairs) to associate with the stage.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param bool tracing_enabled: Specifies whether active tracing with X-ray is enabled for this stage.
:param Any variables: A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
if access_log_setting is not None:
pulumi.set(__self__, "access_log_setting", access_log_setting)
if cache_cluster_enabled is not None:
pulumi.set(__self__, "cache_cluster_enabled", cache_cluster_enabled)
if cache_cluster_size is not None:
pulumi.set(__self__, "cache_cluster_size", cache_cluster_size)
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if canary_setting is not None:
pulumi.set(__self__, "canary_setting", canary_setting)
if client_certificate_id is not None:
pulumi.set(__self__, "client_certificate_id", client_certificate_id)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if description is not None:
pulumi.set(__self__, "description", description)
if documentation_version is not None:
pulumi.set(__self__, "documentation_version", documentation_version)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if method_settings is not None:
pulumi.set(__self__, "method_settings", method_settings)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
if tracing_enabled is not None:
pulumi.set(__self__, "tracing_enabled", tracing_enabled)
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter(name="accessLogSetting")
def access_log_setting(self) -> Optional['outputs.DeploymentAccessLogSetting']:
"""
Specifies settings for logging access in this stage.
"""
return pulumi.get(self, "access_log_setting")
@property
@pulumi.getter(name="cacheClusterEnabled")
def cache_cluster_enabled(self) -> Optional[bool]:
"""
Indicates whether cache clustering is enabled for the stage.
"""
return pulumi.get(self, "cache_cluster_enabled")
@property
@pulumi.getter(name="cacheClusterSize")
def cache_cluster_size(self) -> Optional[str]:
"""
The size of the stage's cache cluster.
"""
return pulumi.get(self, "cache_cluster_size")
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="canarySetting")
def canary_setting(self) -> Optional['outputs.DeploymentCanarySetting']:
"""
Specifies settings for the canary deployment in this stage.
"""
return pulumi.get(self, "canary_setting")
@property
@pulumi.getter(name="clientCertificateId")
def client_certificate_id(self) -> Optional[str]:
"""
The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage.
"""
return pulumi.get(self, "client_certificate_id")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of the purpose of the stage.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="documentationVersion")
def documentation_version(self) -> Optional[str]:
"""
The version identifier of the API documentation snapshot.
"""
return pulumi.get(self, "documentation_version")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="methodSettings")
def method_settings(self) -> Optional[Sequence['outputs.DeploymentMethodSetting']]:
"""
Configures settings for all of the stage's methods.
"""
return pulumi.get(self, "method_settings")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.DeploymentTag']]:
"""
An array of arbitrary tags (key-value pairs) to associate with the stage.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@property
@pulumi.getter(name="tracingEnabled")
def tracing_enabled(self) -> Optional[bool]:
"""
Specifies whether active tracing with X-ray is enabled for this stage.
"""
return pulumi.get(self, "tracing_enabled")
@property
@pulumi.getter
def variables(self) -> Optional[Any]:
"""
A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
return pulumi.get(self, "variables")
@pulumi.output_type
class DeploymentTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag
:param str value: The value for the tag
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag
"""
return pulumi.get(self, "value")
@pulumi.output_type
class DocumentationPartLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DocumentationPartLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
method: Optional[str] = None,
name: Optional[str] = None,
path: Optional[str] = None,
status_code: Optional[str] = None,
type: Optional[str] = None):
if method is not None:
pulumi.set(__self__, "method", method)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def method(self) -> Optional[str]:
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
@pulumi.output_type
class DomainNameEndpointConfiguration(dict):
def __init__(__self__, *,
types: Optional[Sequence[str]] = None):
if types is not None:
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@pulumi.output_type
class DomainNameMutualTlsAuthentication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "truststoreUri":
suggest = "truststore_uri"
elif key == "truststoreVersion":
suggest = "truststore_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainNameMutualTlsAuthentication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
truststore_uri: Optional[str] = None,
truststore_version: Optional[str] = None):
if truststore_uri is not None:
pulumi.set(__self__, "truststore_uri", truststore_uri)
if truststore_version is not None:
pulumi.set(__self__, "truststore_version", truststore_version)
@property
@pulumi.getter(name="truststoreUri")
def truststore_uri(self) -> Optional[str]:
return pulumi.get(self, "truststore_uri")
@property
@pulumi.getter(name="truststoreVersion")
def truststore_version(self) -> Optional[str]:
return pulumi.get(self, "truststore_version")
@pulumi.output_type
class DomainNameTag(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class MethodIntegration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheKeyParameters":
suggest = "cache_key_parameters"
elif key == "cacheNamespace":
suggest = "cache_namespace"
elif key == "connectionId":
suggest = "connection_id"
elif key == "connectionType":
suggest = "connection_type"
elif key == "contentHandling":
suggest = "content_handling"
elif key == "integrationHttpMethod":
suggest = "integration_http_method"
elif key == "integrationResponses":
suggest = "integration_responses"
elif key == "passthroughBehavior":
suggest = "passthrough_behavior"
elif key == "requestParameters":
suggest = "request_parameters"
elif key == "requestTemplates":
suggest = "request_templates"
elif key == "timeoutInMillis":
suggest = "timeout_in_millis"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodIntegration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodIntegration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodIntegration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: 'MethodIntegrationType',
cache_key_parameters: Optional[Sequence[str]] = None,
cache_namespace: Optional[str] = None,
connection_id: Optional[str] = None,
connection_type: Optional['MethodIntegrationConnectionType'] = None,
content_handling: Optional['MethodIntegrationContentHandling'] = None,
credentials: Optional[str] = None,
integration_http_method: Optional[str] = None,
integration_responses: Optional[Sequence['outputs.MethodIntegrationResponse']] = None,
passthrough_behavior: Optional['MethodIntegrationPassthroughBehavior'] = None,
request_parameters: Optional[Any] = None,
request_templates: Optional[Any] = None,
timeout_in_millis: Optional[int] = None,
uri: Optional[str] = None):
"""
:param 'MethodIntegrationType' type: The type of backend that your method is running.
:param Sequence[str] cache_key_parameters: A list of request parameters whose values API Gateway caches.
:param str cache_namespace: An API-specific tag group of related cached parameters.
:param str connection_id: The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.
:param 'MethodIntegrationConnectionType' connection_type: The type of the network connection to the integration endpoint.
:param 'MethodIntegrationContentHandling' content_handling: Specifies how to handle request payload content type conversions.
:param str credentials: The credentials that are required for the integration.
:param str integration_http_method: The integration's HTTP method type.
:param Sequence['MethodIntegrationResponse'] integration_responses: The response that API Gateway provides after a method's backend completes processing a request.
:param 'MethodIntegrationPassthroughBehavior' passthrough_behavior: Indicates when API Gateway passes requests to the targeted backend.
:param Any request_parameters: The request parameters that API Gateway sends with the backend request.
:param Any request_templates: A map of Apache Velocity templates that are applied on the request payload.
:param int timeout_in_millis: Custom timeout between 50 and 29,000 milliseconds.
:param str uri: The Uniform Resource Identifier (URI) for the integration.
"""
pulumi.set(__self__, "type", type)
if cache_key_parameters is not None:
pulumi.set(__self__, "cache_key_parameters", cache_key_parameters)
if cache_namespace is not None:
pulumi.set(__self__, "cache_namespace", cache_namespace)
if connection_id is not None:
pulumi.set(__self__, "connection_id", connection_id)
if connection_type is not None:
pulumi.set(__self__, "connection_type", connection_type)
if content_handling is not None:
pulumi.set(__self__, "content_handling", content_handling)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if integration_http_method is not None:
pulumi.set(__self__, "integration_http_method", integration_http_method)
if integration_responses is not None:
pulumi.set(__self__, "integration_responses", integration_responses)
if passthrough_behavior is not None:
pulumi.set(__self__, "passthrough_behavior", passthrough_behavior)
if request_parameters is not None:
pulumi.set(__self__, "request_parameters", request_parameters)
if request_templates is not None:
pulumi.set(__self__, "request_templates", request_templates)
if timeout_in_millis is not None:
pulumi.set(__self__, "timeout_in_millis", timeout_in_millis)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def type(self) -> 'MethodIntegrationType':
"""
The type of backend that your method is running.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="cacheKeyParameters")
def cache_key_parameters(self) -> Optional[Sequence[str]]:
"""
A list of request parameters whose values API Gateway caches.
"""
return pulumi.get(self, "cache_key_parameters")
@property
@pulumi.getter(name="cacheNamespace")
def cache_namespace(self) -> Optional[str]:
"""
An API-specific tag group of related cached parameters.
"""
return pulumi.get(self, "cache_namespace")
@property
@pulumi.getter(name="connectionId")
def connection_id(self) -> Optional[str]:
"""
The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.
"""
return pulumi.get(self, "connection_id")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> Optional['MethodIntegrationConnectionType']:
"""
The type of the network connection to the integration endpoint.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="contentHandling")
def content_handling(self) -> Optional['MethodIntegrationContentHandling']:
"""
Specifies how to handle request payload content type conversions.
"""
return pulumi.get(self, "content_handling")
@property
@pulumi.getter
def credentials(self) -> Optional[str]:
"""
The credentials that are required for the integration.
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter(name="integrationHttpMethod")
def integration_http_method(self) -> Optional[str]:
"""
The integration's HTTP method type.
"""
return pulumi.get(self, "integration_http_method")
@property
@pulumi.getter(name="integrationResponses")
def integration_responses(self) -> Optional[Sequence['outputs.MethodIntegrationResponse']]:
"""
The response that API Gateway provides after a method's backend completes processing a request.
"""
return pulumi.get(self, "integration_responses")
@property
@pulumi.getter(name="passthroughBehavior")
def passthrough_behavior(self) -> Optional['MethodIntegrationPassthroughBehavior']:
"""
Indicates when API Gateway passes requests to the targeted backend.
"""
return pulumi.get(self, "passthrough_behavior")
@property
@pulumi.getter(name="requestParameters")
def request_parameters(self) -> Optional[Any]:
"""
The request parameters that API Gateway sends with the backend request.
"""
return pulumi.get(self, "request_parameters")
@property
@pulumi.getter(name="requestTemplates")
def request_templates(self) -> Optional[Any]:
"""
A map of Apache Velocity templates that are applied on the request payload.
"""
return pulumi.get(self, "request_templates")
@property
@pulumi.getter(name="timeoutInMillis")
def timeout_in_millis(self) -> Optional[int]:
"""
Custom timeout between 50 and 29,000 milliseconds.
"""
return pulumi.get(self, "timeout_in_millis")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The Uniform Resource Identifier (URI) for the integration.
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class MethodIntegrationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "contentHandling":
suggest = "content_handling"
elif key == "responseParameters":
suggest = "response_parameters"
elif key == "responseTemplates":
suggest = "response_templates"
elif key == "selectionPattern":
suggest = "selection_pattern"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodIntegrationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodIntegrationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodIntegrationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
content_handling: Optional['MethodIntegrationResponseContentHandling'] = None,
response_parameters: Optional[Any] = None,
response_templates: Optional[Any] = None,
selection_pattern: Optional[str] = None):
"""
:param str status_code: The status code that API Gateway uses to map the integration response to a MethodResponse status code.
:param 'MethodIntegrationResponseContentHandling' content_handling: Specifies how to handle request payload content type conversions.
:param Any response_parameters: The response parameters from the backend response that API Gateway sends to the method response.
:param Any response_templates: The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.
:param str selection_pattern: A regular expression that specifies which error strings or status codes from the backend map to the integration response.
"""
pulumi.set(__self__, "status_code", status_code)
if content_handling is not None:
pulumi.set(__self__, "content_handling", content_handling)
if response_parameters is not None:
pulumi.set(__self__, "response_parameters", response_parameters)
if response_templates is not None:
pulumi.set(__self__, "response_templates", response_templates)
if selection_pattern is not None:
pulumi.set(__self__, "selection_pattern", selection_pattern)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
"""
The status code that API Gateway uses to map the integration response to a MethodResponse status code.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="contentHandling")
def content_handling(self) -> Optional['MethodIntegrationResponseContentHandling']:
"""
Specifies how to handle request payload content type conversions.
"""
return pulumi.get(self, "content_handling")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
The response parameters from the backend response that API Gateway sends to the method response.
"""
return pulumi.get(self, "response_parameters")
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> Optional[Any]:
"""
The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.
"""
return pulumi.get(self, "response_templates")
@property
@pulumi.getter(name="selectionPattern")
def selection_pattern(self) -> Optional[str]:
"""
A regular expression that specifies which error strings or status codes from the backend map to the integration response.
"""
return pulumi.get(self, "selection_pattern")
@pulumi.output_type
class MethodResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "responseModels":
suggest = "response_models"
elif key == "responseParameters":
suggest = "response_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
response_models: Optional[Any] = None,
response_parameters: Optional[Any] = None):
"""
:param str status_code: The method response's status code, which you map to an IntegrationResponse.
:param Any response_models: The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.
:param Any response_parameters: Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.
"""
pulumi.set(__self__, "status_code", status_code)
if response_models is not None:
pulumi.set(__self__, "response_models", response_models)
if response_parameters is not None:
pulumi.set(__self__, "response_parameters", response_parameters)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
"""
The method response's status code, which you map to an IntegrationResponse.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="responseModels")
def response_models(self) -> Optional[Any]:
"""
The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.
"""
return pulumi.get(self, "response_models")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.
"""
return pulumi.get(self, "response_parameters")
@pulumi.output_type
class RestApiEndpointConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "vpcEndpointIds":
suggest = "vpc_endpoint_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RestApiEndpointConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RestApiEndpointConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RestApiEndpointConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
types: Optional[Sequence[str]] = None,
vpc_endpoint_ids: Optional[Sequence[str]] = None):
if types is not None:
pulumi.set(__self__, "types", types)
if vpc_endpoint_ids is not None:
pulumi.set(__self__, "vpc_endpoint_ids", vpc_endpoint_ids)
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@property
@pulumi.getter(name="vpcEndpointIds")
def vpc_endpoint_ids(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "vpc_endpoint_ids")
@pulumi.output_type
class RestApiS3Location(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eTag":
suggest = "e_tag"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RestApiS3Location. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RestApiS3Location.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RestApiS3Location.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket: Optional[str] = None,
e_tag: Optional[str] = None,
key: Optional[str] = None,
version: Optional[str] = None):
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if key is not None:
pulumi.set(__self__, "key", key)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> Optional[str]:
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class RestApiTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class StageAccessLogSetting(dict):
"""
Specifies settings for logging access in this stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "destinationArn":
suggest = "destination_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageAccessLogSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageAccessLogSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageAccessLogSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
destination_arn: Optional[str] = None,
format: Optional[str] = None):
"""
Specifies settings for logging access in this stage.
:param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.
:param str format: A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.
"""
if destination_arn is not None:
pulumi.set(__self__, "destination_arn", destination_arn)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter(name="destinationArn")
def destination_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.
"""
return pulumi.get(self, "destination_arn")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.
"""
return pulumi.get(self, "format")
@pulumi.output_type
class StageCanarySetting(dict):
"""
Specifies settings for the canary deployment in this stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deploymentId":
suggest = "deployment_id"
elif key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageCanarySetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageCanarySetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageCanarySetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
deployment_id: Optional[str] = None,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
Specifies settings for the canary deployment in this stage.
:param str deployment_id: The identifier of the deployment that the stage points to.
:param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache or not.
"""
if deployment_id is not None:
pulumi.set(__self__, "deployment_id", deployment_id)
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="deploymentId")
def deployment_id(self) -> Optional[str]:
"""
The identifier of the deployment that the stage points to.
"""
return pulumi.get(self, "deployment_id")
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percentage (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache or not.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class StageMethodSetting(dict):
"""
Configures settings for all methods in a stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "httpMethod":
suggest = "http_method"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "resourcePath":
suggest = "resource_path"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageMethodSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageMethodSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageMethodSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
data_trace_enabled: Optional[bool] = None,
http_method: Optional[str] = None,
logging_level: Optional[str] = None,
metrics_enabled: Optional[bool] = None,
resource_path: Optional[str] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None):
"""
Configures settings for all methods in a stage.
:param bool cache_data_encrypted: Indicates whether the cached responses are encrypted.
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str http_method: The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
Indicates whether the cached responses are encrypted.
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[str]:
"""
The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
"""
return pulumi.get(self, "resource_path")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@pulumi.output_type
class StageTag(dict):
"""
Identify and categorize resources.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
Identify and categorize resources.
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class UsagePlanApiStage(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "apiId":
suggest = "api_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UsagePlanApiStage. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UsagePlanApiStage.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UsagePlanApiStage.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
api_id: Optional[str] = None,
stage: Optional[str] = None,
throttle: Optional[Any] = None):
"""
:param str api_id: The ID of an API that is in the specified Stage property that you want to associate with the usage plan.
:param str stage: The name of the stage to associate with the usage plan.
:param Any throttle: Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.
"""
if api_id is not None:
pulumi.set(__self__, "api_id", api_id)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if throttle is not None:
pulumi.set(__self__, "throttle", throttle)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
"""
The ID of an API that is in the specified Stage property that you want to associate with the usage plan.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter
def stage(self) -> Optional[str]:
"""
The name of the stage to associate with the usage plan.
"""
return pulumi.get(self, "stage")
@property
@pulumi.getter
def throttle(self) -> Optional[Any]:
"""
Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.
"""
return pulumi.get(self, "throttle")
@pulumi.output_type
class UsagePlanQuotaSettings(dict):
def __init__(__self__, *,
limit: Optional[int] = None,
offset: Optional[int] = None,
period: Optional[str] = None):
"""
:param int limit: The maximum number of requests that users can make within the specified time period.
:param int offset: For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period.
:param str period: The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference.
"""
if limit is not None:
pulumi.set(__self__, "limit", limit)
if offset is not None:
pulumi.set(__self__, "offset", offset)
if period is not None:
pulumi.set(__self__, "period", period)
@property
@pulumi.getter
def limit(self) -> Optional[int]:
"""
The maximum number of requests that users can make within the specified time period.
"""
return pulumi.get(self, "limit")
@property
@pulumi.getter
def offset(self) -> Optional[int]:
"""
For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period.
"""
return pulumi.get(self, "offset")
@property
@pulumi.getter
def period(self) -> Optional[str]:
"""
The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference.
"""
return pulumi.get(self, "period")
@pulumi.output_type
class UsagePlanTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class UsagePlanThrottleSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "burstLimit":
suggest = "burst_limit"
elif key == "rateLimit":
suggest = "rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UsagePlanThrottleSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UsagePlanThrottleSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UsagePlanThrottleSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
burst_limit: Optional[int] = None,
rate_limit: Optional[float] = None):
"""
:param int burst_limit: The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity.
:param float rate_limit: The API request steady-state rate limit (average requests per second over an extended period of time).
"""
if burst_limit is not None:
pulumi.set(__self__, "burst_limit", burst_limit)
if rate_limit is not None:
pulumi.set(__self__, "rate_limit", rate_limit)
@property
@pulumi.getter(name="burstLimit")
def burst_limit(self) -> Optional[int]:
"""
The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity.
"""
return pulumi.get(self, "burst_limit")
@property
@pulumi.getter(name="rateLimit")
def rate_limit(self) -> Optional[float]:
"""
The API request steady-state rate limit (average requests per second over an extended period of time).
"""
return pulumi.get(self, "rate_limit")
@pulumi.output_type
class VpcLinkTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ApiKeyStageKey',
'ApiKeyTag',
'ClientCertificateTag',
'DeploymentAccessLogSetting',
'DeploymentCanarySetting',
'DeploymentCanarySettings',
'DeploymentMethodSetting',
'DeploymentStageDescription',
'DeploymentTag',
'DocumentationPartLocation',
'DomainNameEndpointConfiguration',
'DomainNameMutualTlsAuthentication',
'DomainNameTag',
'MethodIntegration',
'MethodIntegrationResponse',
'MethodResponse',
'RestApiEndpointConfiguration',
'RestApiS3Location',
'RestApiTag',
'StageAccessLogSetting',
'StageCanarySetting',
'StageMethodSetting',
'StageTag',
'UsagePlanApiStage',
'UsagePlanQuotaSettings',
'UsagePlanTag',
'UsagePlanThrottleSettings',
'VpcLinkTag',
]
@pulumi.output_type
class ApiKeyStageKey(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "restApiId":
suggest = "rest_api_id"
elif key == "stageName":
suggest = "stage_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiKeyStageKey. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiKeyStageKey.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiKeyStageKey.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
rest_api_id: Optional[str] = None,
stage_name: Optional[str] = None):
"""
:param str rest_api_id: The ID of a RestApi resource that includes the stage with which you want to associate the API key.
:param str stage_name: The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property.
"""
if rest_api_id is not None:
pulumi.set(__self__, "rest_api_id", rest_api_id)
if stage_name is not None:
pulumi.set(__self__, "stage_name", stage_name)
@property
@pulumi.getter(name="restApiId")
def rest_api_id(self) -> Optional[str]:
"""
The ID of a RestApi resource that includes the stage with which you want to associate the API key.
"""
return pulumi.get(self, "rest_api_id")
@property
@pulumi.getter(name="stageName")
def stage_name(self) -> Optional[str]:
"""
The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property.
"""
return pulumi.get(self, "stage_name")
@pulumi.output_type
class ApiKeyTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ClientCertificateTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class DeploymentAccessLogSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "destinationArn":
suggest = "destination_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentAccessLogSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentAccessLogSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentAccessLogSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
destination_arn: Optional[str] = None,
format: Optional[str] = None):
"""
:param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-.
:param str format: A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.
"""
if destination_arn is not None:
pulumi.set(__self__, "destination_arn", destination_arn)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter(name="destinationArn")
def destination_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-.
"""
return pulumi.get(self, "destination_arn")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.
"""
return pulumi.get(self, "format")
@pulumi.output_type
class DeploymentCanarySetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentCanarySetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentCanarySetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
:param float percent_traffic: The percent (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache or not.
"""
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percent (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache or not.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class DeploymentCanarySettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentCanarySettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentCanarySettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
:param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache.
"""
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percentage (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class DeploymentMethodSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "httpMethod":
suggest = "http_method"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "resourcePath":
suggest = "resource_path"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentMethodSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentMethodSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentMethodSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
data_trace_enabled: Optional[bool] = None,
http_method: Optional[str] = None,
logging_level: Optional[str] = None,
metrics_enabled: Optional[bool] = None,
resource_path: Optional[str] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None):
"""
:param bool cache_data_encrypted: Indicates whether the cached responses are encrypted
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str http_method: The HTTP method.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
Indicates whether the cached responses are encrypted
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
The HTTP method.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[str]:
"""
The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash.
"""
return pulumi.get(self, "resource_path")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@pulumi.output_type
class DeploymentStageDescription(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessLogSetting":
suggest = "access_log_setting"
elif key == "cacheClusterEnabled":
suggest = "cache_cluster_enabled"
elif key == "cacheClusterSize":
suggest = "cache_cluster_size"
elif key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "canarySetting":
suggest = "canary_setting"
elif key == "clientCertificateId":
suggest = "client_certificate_id"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "documentationVersion":
suggest = "documentation_version"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "methodSettings":
suggest = "method_settings"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
elif key == "tracingEnabled":
suggest = "tracing_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentStageDescription. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentStageDescription.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentStageDescription.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_log_setting: Optional['outputs.DeploymentAccessLogSetting'] = None,
cache_cluster_enabled: Optional[bool] = None,
cache_cluster_size: Optional[str] = None,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
canary_setting: Optional['outputs.DeploymentCanarySetting'] = None,
client_certificate_id: Optional[str] = None,
data_trace_enabled: Optional[bool] = None,
description: Optional[str] = None,
documentation_version: Optional[str] = None,
logging_level: Optional[str] = None,
method_settings: Optional[Sequence['outputs.DeploymentMethodSetting']] = None,
metrics_enabled: Optional[bool] = None,
tags: Optional[Sequence['outputs.DeploymentTag']] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None,
tracing_enabled: Optional[bool] = None,
variables: Optional[Any] = None):
"""
:param 'DeploymentAccessLogSetting' access_log_setting: Specifies settings for logging access in this stage.
:param bool cache_cluster_enabled: Indicates whether cache clustering is enabled for the stage.
:param str cache_cluster_size: The size of the stage's cache cluster.
:param bool cache_data_encrypted: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param 'DeploymentCanarySetting' canary_setting: Specifies settings for the canary deployment in this stage.
:param str client_certificate_id: The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str description: A description of the purpose of the stage.
:param str documentation_version: The version identifier of the API documentation snapshot.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
:param Sequence['DeploymentMethodSetting'] method_settings: Configures settings for all of the stage's methods.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param Sequence['DeploymentTag'] tags: An array of arbitrary tags (key-value pairs) to associate with the stage.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param bool tracing_enabled: Specifies whether active tracing with X-ray is enabled for this stage.
:param Any variables: A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
if access_log_setting is not None:
pulumi.set(__self__, "access_log_setting", access_log_setting)
if cache_cluster_enabled is not None:
pulumi.set(__self__, "cache_cluster_enabled", cache_cluster_enabled)
if cache_cluster_size is not None:
pulumi.set(__self__, "cache_cluster_size", cache_cluster_size)
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if canary_setting is not None:
pulumi.set(__self__, "canary_setting", canary_setting)
if client_certificate_id is not None:
pulumi.set(__self__, "client_certificate_id", client_certificate_id)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if description is not None:
pulumi.set(__self__, "description", description)
if documentation_version is not None:
pulumi.set(__self__, "documentation_version", documentation_version)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if method_settings is not None:
pulumi.set(__self__, "method_settings", method_settings)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
if tracing_enabled is not None:
pulumi.set(__self__, "tracing_enabled", tracing_enabled)
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter(name="accessLogSetting")
def access_log_setting(self) -> Optional['outputs.DeploymentAccessLogSetting']:
"""
Specifies settings for logging access in this stage.
"""
return pulumi.get(self, "access_log_setting")
@property
@pulumi.getter(name="cacheClusterEnabled")
def cache_cluster_enabled(self) -> Optional[bool]:
"""
Indicates whether cache clustering is enabled for the stage.
"""
return pulumi.get(self, "cache_cluster_enabled")
@property
@pulumi.getter(name="cacheClusterSize")
def cache_cluster_size(self) -> Optional[str]:
"""
The size of the stage's cache cluster.
"""
return pulumi.get(self, "cache_cluster_size")
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="canarySetting")
def canary_setting(self) -> Optional['outputs.DeploymentCanarySetting']:
"""
Specifies settings for the canary deployment in this stage.
"""
return pulumi.get(self, "canary_setting")
@property
@pulumi.getter(name="clientCertificateId")
def client_certificate_id(self) -> Optional[str]:
"""
The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage.
"""
return pulumi.get(self, "client_certificate_id")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of the purpose of the stage.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="documentationVersion")
def documentation_version(self) -> Optional[str]:
"""
The version identifier of the API documentation snapshot.
"""
return pulumi.get(self, "documentation_version")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="methodSettings")
def method_settings(self) -> Optional[Sequence['outputs.DeploymentMethodSetting']]:
"""
Configures settings for all of the stage's methods.
"""
return pulumi.get(self, "method_settings")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.DeploymentTag']]:
"""
An array of arbitrary tags (key-value pairs) to associate with the stage.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@property
@pulumi.getter(name="tracingEnabled")
def tracing_enabled(self) -> Optional[bool]:
"""
Specifies whether active tracing with X-ray is enabled for this stage.
"""
return pulumi.get(self, "tracing_enabled")
@property
@pulumi.getter
def variables(self) -> Optional[Any]:
"""
A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+.
"""
return pulumi.get(self, "variables")
@pulumi.output_type
class DeploymentTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag
:param str value: The value for the tag
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag
"""
return pulumi.get(self, "value")
@pulumi.output_type
class DocumentationPartLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DocumentationPartLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DocumentationPartLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
method: Optional[str] = None,
name: Optional[str] = None,
path: Optional[str] = None,
status_code: Optional[str] = None,
type: Optional[str] = None):
if method is not None:
pulumi.set(__self__, "method", method)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def method(self) -> Optional[str]:
return pulumi.get(self, "method")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
@pulumi.output_type
class DomainNameEndpointConfiguration(dict):
def __init__(__self__, *,
types: Optional[Sequence[str]] = None):
if types is not None:
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@pulumi.output_type
class DomainNameMutualTlsAuthentication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "truststoreUri":
suggest = "truststore_uri"
elif key == "truststoreVersion":
suggest = "truststore_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainNameMutualTlsAuthentication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainNameMutualTlsAuthentication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
truststore_uri: Optional[str] = None,
truststore_version: Optional[str] = None):
if truststore_uri is not None:
pulumi.set(__self__, "truststore_uri", truststore_uri)
if truststore_version is not None:
pulumi.set(__self__, "truststore_version", truststore_version)
@property
@pulumi.getter(name="truststoreUri")
def truststore_uri(self) -> Optional[str]:
return pulumi.get(self, "truststore_uri")
@property
@pulumi.getter(name="truststoreVersion")
def truststore_version(self) -> Optional[str]:
return pulumi.get(self, "truststore_version")
@pulumi.output_type
class DomainNameTag(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class MethodIntegration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheKeyParameters":
suggest = "cache_key_parameters"
elif key == "cacheNamespace":
suggest = "cache_namespace"
elif key == "connectionId":
suggest = "connection_id"
elif key == "connectionType":
suggest = "connection_type"
elif key == "contentHandling":
suggest = "content_handling"
elif key == "integrationHttpMethod":
suggest = "integration_http_method"
elif key == "integrationResponses":
suggest = "integration_responses"
elif key == "passthroughBehavior":
suggest = "passthrough_behavior"
elif key == "requestParameters":
suggest = "request_parameters"
elif key == "requestTemplates":
suggest = "request_templates"
elif key == "timeoutInMillis":
suggest = "timeout_in_millis"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodIntegration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodIntegration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodIntegration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: 'MethodIntegrationType',
cache_key_parameters: Optional[Sequence[str]] = None,
cache_namespace: Optional[str] = None,
connection_id: Optional[str] = None,
connection_type: Optional['MethodIntegrationConnectionType'] = None,
content_handling: Optional['MethodIntegrationContentHandling'] = None,
credentials: Optional[str] = None,
integration_http_method: Optional[str] = None,
integration_responses: Optional[Sequence['outputs.MethodIntegrationResponse']] = None,
passthrough_behavior: Optional['MethodIntegrationPassthroughBehavior'] = None,
request_parameters: Optional[Any] = None,
request_templates: Optional[Any] = None,
timeout_in_millis: Optional[int] = None,
uri: Optional[str] = None):
"""
:param 'MethodIntegrationType' type: The type of backend that your method is running.
:param Sequence[str] cache_key_parameters: A list of request parameters whose values API Gateway caches.
:param str cache_namespace: An API-specific tag group of related cached parameters.
:param str connection_id: The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.
:param 'MethodIntegrationConnectionType' connection_type: The type of the network connection to the integration endpoint.
:param 'MethodIntegrationContentHandling' content_handling: Specifies how to handle request payload content type conversions.
:param str credentials: The credentials that are required for the integration.
:param str integration_http_method: The integration's HTTP method type.
:param Sequence['MethodIntegrationResponse'] integration_responses: The response that API Gateway provides after a method's backend completes processing a request.
:param 'MethodIntegrationPassthroughBehavior' passthrough_behavior: Indicates when API Gateway passes requests to the targeted backend.
:param Any request_parameters: The request parameters that API Gateway sends with the backend request.
:param Any request_templates: A map of Apache Velocity templates that are applied on the request payload.
:param int timeout_in_millis: Custom timeout between 50 and 29,000 milliseconds.
:param str uri: The Uniform Resource Identifier (URI) for the integration.
"""
pulumi.set(__self__, "type", type)
if cache_key_parameters is not None:
pulumi.set(__self__, "cache_key_parameters", cache_key_parameters)
if cache_namespace is not None:
pulumi.set(__self__, "cache_namespace", cache_namespace)
if connection_id is not None:
pulumi.set(__self__, "connection_id", connection_id)
if connection_type is not None:
pulumi.set(__self__, "connection_type", connection_type)
if content_handling is not None:
pulumi.set(__self__, "content_handling", content_handling)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if integration_http_method is not None:
pulumi.set(__self__, "integration_http_method", integration_http_method)
if integration_responses is not None:
pulumi.set(__self__, "integration_responses", integration_responses)
if passthrough_behavior is not None:
pulumi.set(__self__, "passthrough_behavior", passthrough_behavior)
if request_parameters is not None:
pulumi.set(__self__, "request_parameters", request_parameters)
if request_templates is not None:
pulumi.set(__self__, "request_templates", request_templates)
if timeout_in_millis is not None:
pulumi.set(__self__, "timeout_in_millis", timeout_in_millis)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def type(self) -> 'MethodIntegrationType':
"""
The type of backend that your method is running.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="cacheKeyParameters")
def cache_key_parameters(self) -> Optional[Sequence[str]]:
"""
A list of request parameters whose values API Gateway caches.
"""
return pulumi.get(self, "cache_key_parameters")
@property
@pulumi.getter(name="cacheNamespace")
def cache_namespace(self) -> Optional[str]:
"""
An API-specific tag group of related cached parameters.
"""
return pulumi.get(self, "cache_namespace")
@property
@pulumi.getter(name="connectionId")
def connection_id(self) -> Optional[str]:
"""
The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.
"""
return pulumi.get(self, "connection_id")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> Optional['MethodIntegrationConnectionType']:
"""
The type of the network connection to the integration endpoint.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="contentHandling")
def content_handling(self) -> Optional['MethodIntegrationContentHandling']:
"""
Specifies how to handle request payload content type conversions.
"""
return pulumi.get(self, "content_handling")
@property
@pulumi.getter
def credentials(self) -> Optional[str]:
"""
The credentials that are required for the integration.
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter(name="integrationHttpMethod")
def integration_http_method(self) -> Optional[str]:
"""
The integration's HTTP method type.
"""
return pulumi.get(self, "integration_http_method")
@property
@pulumi.getter(name="integrationResponses")
def integration_responses(self) -> Optional[Sequence['outputs.MethodIntegrationResponse']]:
"""
The response that API Gateway provides after a method's backend completes processing a request.
"""
return pulumi.get(self, "integration_responses")
@property
@pulumi.getter(name="passthroughBehavior")
def passthrough_behavior(self) -> Optional['MethodIntegrationPassthroughBehavior']:
"""
Indicates when API Gateway passes requests to the targeted backend.
"""
return pulumi.get(self, "passthrough_behavior")
@property
@pulumi.getter(name="requestParameters")
def request_parameters(self) -> Optional[Any]:
"""
The request parameters that API Gateway sends with the backend request.
"""
return pulumi.get(self, "request_parameters")
@property
@pulumi.getter(name="requestTemplates")
def request_templates(self) -> Optional[Any]:
"""
A map of Apache Velocity templates that are applied on the request payload.
"""
return pulumi.get(self, "request_templates")
@property
@pulumi.getter(name="timeoutInMillis")
def timeout_in_millis(self) -> Optional[int]:
"""
Custom timeout between 50 and 29,000 milliseconds.
"""
return pulumi.get(self, "timeout_in_millis")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The Uniform Resource Identifier (URI) for the integration.
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class MethodIntegrationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "contentHandling":
suggest = "content_handling"
elif key == "responseParameters":
suggest = "response_parameters"
elif key == "responseTemplates":
suggest = "response_templates"
elif key == "selectionPattern":
suggest = "selection_pattern"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodIntegrationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodIntegrationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodIntegrationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
content_handling: Optional['MethodIntegrationResponseContentHandling'] = None,
response_parameters: Optional[Any] = None,
response_templates: Optional[Any] = None,
selection_pattern: Optional[str] = None):
"""
:param str status_code: The status code that API Gateway uses to map the integration response to a MethodResponse status code.
:param 'MethodIntegrationResponseContentHandling' content_handling: Specifies how to handle request payload content type conversions.
:param Any response_parameters: The response parameters from the backend response that API Gateway sends to the method response.
:param Any response_templates: The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.
:param str selection_pattern: A regular expression that specifies which error strings or status codes from the backend map to the integration response.
"""
pulumi.set(__self__, "status_code", status_code)
if content_handling is not None:
pulumi.set(__self__, "content_handling", content_handling)
if response_parameters is not None:
pulumi.set(__self__, "response_parameters", response_parameters)
if response_templates is not None:
pulumi.set(__self__, "response_templates", response_templates)
if selection_pattern is not None:
pulumi.set(__self__, "selection_pattern", selection_pattern)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
"""
The status code that API Gateway uses to map the integration response to a MethodResponse status code.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="contentHandling")
def content_handling(self) -> Optional['MethodIntegrationResponseContentHandling']:
"""
Specifies how to handle request payload content type conversions.
"""
return pulumi.get(self, "content_handling")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
The response parameters from the backend response that API Gateway sends to the method response.
"""
return pulumi.get(self, "response_parameters")
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> Optional[Any]:
"""
The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.
"""
return pulumi.get(self, "response_templates")
@property
@pulumi.getter(name="selectionPattern")
def selection_pattern(self) -> Optional[str]:
"""
A regular expression that specifies which error strings or status codes from the backend map to the integration response.
"""
return pulumi.get(self, "selection_pattern")
@pulumi.output_type
class MethodResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "responseModels":
suggest = "response_models"
elif key == "responseParameters":
suggest = "response_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MethodResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MethodResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MethodResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
response_models: Optional[Any] = None,
response_parameters: Optional[Any] = None):
"""
:param str status_code: The method response's status code, which you map to an IntegrationResponse.
:param Any response_models: The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.
:param Any response_parameters: Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.
"""
pulumi.set(__self__, "status_code", status_code)
if response_models is not None:
pulumi.set(__self__, "response_models", response_models)
if response_parameters is not None:
pulumi.set(__self__, "response_parameters", response_parameters)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
"""
The method response's status code, which you map to an IntegrationResponse.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="responseModels")
def response_models(self) -> Optional[Any]:
"""
The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.
"""
return pulumi.get(self, "response_models")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.
"""
return pulumi.get(self, "response_parameters")
@pulumi.output_type
class RestApiEndpointConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "vpcEndpointIds":
suggest = "vpc_endpoint_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RestApiEndpointConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RestApiEndpointConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RestApiEndpointConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
types: Optional[Sequence[str]] = None,
vpc_endpoint_ids: Optional[Sequence[str]] = None):
if types is not None:
pulumi.set(__self__, "types", types)
if vpc_endpoint_ids is not None:
pulumi.set(__self__, "vpc_endpoint_ids", vpc_endpoint_ids)
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
@property
@pulumi.getter(name="vpcEndpointIds")
def vpc_endpoint_ids(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "vpc_endpoint_ids")
@pulumi.output_type
class RestApiS3Location(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eTag":
suggest = "e_tag"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RestApiS3Location. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RestApiS3Location.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RestApiS3Location.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket: Optional[str] = None,
e_tag: Optional[str] = None,
key: Optional[str] = None,
version: Optional[str] = None):
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if key is not None:
pulumi.set(__self__, "key", key)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> Optional[str]:
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class RestApiTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class StageAccessLogSetting(dict):
"""
Specifies settings for logging access in this stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "destinationArn":
suggest = "destination_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageAccessLogSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageAccessLogSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageAccessLogSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
destination_arn: Optional[str] = None,
format: Optional[str] = None):
"""
Specifies settings for logging access in this stage.
:param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.
:param str format: A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.
"""
if destination_arn is not None:
pulumi.set(__self__, "destination_arn", destination_arn)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter(name="destinationArn")
def destination_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.
"""
return pulumi.get(self, "destination_arn")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.
"""
return pulumi.get(self, "format")
@pulumi.output_type
class StageCanarySetting(dict):
"""
Specifies settings for the canary deployment in this stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deploymentId":
suggest = "deployment_id"
elif key == "percentTraffic":
suggest = "percent_traffic"
elif key == "stageVariableOverrides":
suggest = "stage_variable_overrides"
elif key == "useStageCache":
suggest = "use_stage_cache"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageCanarySetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageCanarySetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageCanarySetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
deployment_id: Optional[str] = None,
percent_traffic: Optional[float] = None,
stage_variable_overrides: Optional[Any] = None,
use_stage_cache: Optional[bool] = None):
"""
Specifies settings for the canary deployment in this stage.
:param str deployment_id: The identifier of the deployment that the stage points to.
:param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment.
:param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
:param bool use_stage_cache: Whether the canary deployment uses the stage cache or not.
"""
if deployment_id is not None:
pulumi.set(__self__, "deployment_id", deployment_id)
if percent_traffic is not None:
pulumi.set(__self__, "percent_traffic", percent_traffic)
if stage_variable_overrides is not None:
pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides)
if use_stage_cache is not None:
pulumi.set(__self__, "use_stage_cache", use_stage_cache)
@property
@pulumi.getter(name="deploymentId")
def deployment_id(self) -> Optional[str]:
"""
The identifier of the deployment that the stage points to.
"""
return pulumi.get(self, "deployment_id")
@property
@pulumi.getter(name="percentTraffic")
def percent_traffic(self) -> Optional[float]:
"""
The percentage (0-100) of traffic diverted to a canary deployment.
"""
return pulumi.get(self, "percent_traffic")
@property
@pulumi.getter(name="stageVariableOverrides")
def stage_variable_overrides(self) -> Optional[Any]:
"""
Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.
"""
return pulumi.get(self, "stage_variable_overrides")
@property
@pulumi.getter(name="useStageCache")
def use_stage_cache(self) -> Optional[bool]:
"""
Whether the canary deployment uses the stage cache or not.
"""
return pulumi.get(self, "use_stage_cache")
@pulumi.output_type
class StageMethodSetting(dict):
"""
Configures settings for all methods in a stage.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cacheDataEncrypted":
suggest = "cache_data_encrypted"
elif key == "cacheTtlInSeconds":
suggest = "cache_ttl_in_seconds"
elif key == "cachingEnabled":
suggest = "caching_enabled"
elif key == "dataTraceEnabled":
suggest = "data_trace_enabled"
elif key == "httpMethod":
suggest = "http_method"
elif key == "loggingLevel":
suggest = "logging_level"
elif key == "metricsEnabled":
suggest = "metrics_enabled"
elif key == "resourcePath":
suggest = "resource_path"
elif key == "throttlingBurstLimit":
suggest = "throttling_burst_limit"
elif key == "throttlingRateLimit":
suggest = "throttling_rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StageMethodSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StageMethodSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StageMethodSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cache_data_encrypted: Optional[bool] = None,
cache_ttl_in_seconds: Optional[int] = None,
caching_enabled: Optional[bool] = None,
data_trace_enabled: Optional[bool] = None,
http_method: Optional[str] = None,
logging_level: Optional[str] = None,
metrics_enabled: Optional[bool] = None,
resource_path: Optional[str] = None,
throttling_burst_limit: Optional[int] = None,
throttling_rate_limit: Optional[float] = None):
"""
Configures settings for all methods in a stage.
:param bool cache_data_encrypted: Indicates whether the cached responses are encrypted.
:param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
:param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
:param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
:param str http_method: The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
:param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.
:param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
:param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
:param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
:param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
if cache_data_encrypted is not None:
pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted)
if cache_ttl_in_seconds is not None:
pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds)
if caching_enabled is not None:
pulumi.set(__self__, "caching_enabled", caching_enabled)
if data_trace_enabled is not None:
pulumi.set(__self__, "data_trace_enabled", data_trace_enabled)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if logging_level is not None:
pulumi.set(__self__, "logging_level", logging_level)
if metrics_enabled is not None:
pulumi.set(__self__, "metrics_enabled", metrics_enabled)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if throttling_burst_limit is not None:
pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit)
if throttling_rate_limit is not None:
pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit)
@property
@pulumi.getter(name="cacheDataEncrypted")
def cache_data_encrypted(self) -> Optional[bool]:
"""
Indicates whether the cached responses are encrypted.
"""
return pulumi.get(self, "cache_data_encrypted")
@property
@pulumi.getter(name="cacheTtlInSeconds")
def cache_ttl_in_seconds(self) -> Optional[int]:
"""
The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.
"""
return pulumi.get(self, "cache_ttl_in_seconds")
@property
@pulumi.getter(name="cachingEnabled")
def caching_enabled(self) -> Optional[bool]:
"""
Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.
"""
return pulumi.get(self, "caching_enabled")
@property
@pulumi.getter(name="dataTraceEnabled")
def data_trace_enabled(self) -> Optional[bool]:
"""
Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "data_trace_enabled")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="loggingLevel")
def logging_level(self) -> Optional[str]:
"""
The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.
"""
return pulumi.get(self, "logging_level")
@property
@pulumi.getter(name="metricsEnabled")
def metrics_enabled(self) -> Optional[bool]:
"""
Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.
"""
return pulumi.get(self, "metrics_enabled")
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[str]:
"""
The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.
"""
return pulumi.get(self, "resource_path")
@property
@pulumi.getter(name="throttlingBurstLimit")
def throttling_burst_limit(self) -> Optional[int]:
"""
The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_burst_limit")
@property
@pulumi.getter(name="throttlingRateLimit")
def throttling_rate_limit(self) -> Optional[float]:
"""
The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.
"""
return pulumi.get(self, "throttling_rate_limit")
@pulumi.output_type
class StageTag(dict):
"""
Identify and categorize resources.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
Identify and categorize resources.
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class UsagePlanApiStage(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "apiId":
suggest = "api_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UsagePlanApiStage. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UsagePlanApiStage.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UsagePlanApiStage.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
api_id: Optional[str] = None,
stage: Optional[str] = None,
throttle: Optional[Any] = None):
"""
:param str api_id: The ID of an API that is in the specified Stage property that you want to associate with the usage plan.
:param str stage: The name of the stage to associate with the usage plan.
:param Any throttle: Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.
"""
if api_id is not None:
pulumi.set(__self__, "api_id", api_id)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if throttle is not None:
pulumi.set(__self__, "throttle", throttle)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
"""
The ID of an API that is in the specified Stage property that you want to associate with the usage plan.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter
def stage(self) -> Optional[str]:
"""
The name of the stage to associate with the usage plan.
"""
return pulumi.get(self, "stage")
@property
@pulumi.getter
def throttle(self) -> Optional[Any]:
"""
Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.
"""
return pulumi.get(self, "throttle")
@pulumi.output_type
class UsagePlanQuotaSettings(dict):
def __init__(__self__, *,
limit: Optional[int] = None,
offset: Optional[int] = None,
period: Optional[str] = None):
"""
:param int limit: The maximum number of requests that users can make within the specified time period.
:param int offset: For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period.
:param str period: The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference.
"""
if limit is not None:
pulumi.set(__self__, "limit", limit)
if offset is not None:
pulumi.set(__self__, "offset", offset)
if period is not None:
pulumi.set(__self__, "period", period)
@property
@pulumi.getter
def limit(self) -> Optional[int]:
"""
The maximum number of requests that users can make within the specified time period.
"""
return pulumi.get(self, "limit")
@property
@pulumi.getter
def offset(self) -> Optional[int]:
"""
For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period.
"""
return pulumi.get(self, "offset")
@property
@pulumi.getter
def period(self) -> Optional[str]:
"""
The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference.
"""
return pulumi.get(self, "period")
@pulumi.output_type
class UsagePlanTag(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class UsagePlanThrottleSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "burstLimit":
suggest = "burst_limit"
elif key == "rateLimit":
suggest = "rate_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UsagePlanThrottleSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UsagePlanThrottleSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UsagePlanThrottleSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
burst_limit: Optional[int] = None,
rate_limit: Optional[float] = None):
"""
:param int burst_limit: The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity.
:param float rate_limit: The API request steady-state rate limit (average requests per second over an extended period of time).
"""
if burst_limit is not None:
pulumi.set(__self__, "burst_limit", burst_limit)
if rate_limit is not None:
pulumi.set(__self__, "rate_limit", rate_limit)
@property
@pulumi.getter(name="burstLimit")
def burst_limit(self) -> Optional[int]:
"""
The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity.
"""
return pulumi.get(self, "burst_limit")
@property
@pulumi.getter(name="rateLimit")
def rate_limit(self) -> Optional[float]:
"""
The API request steady-state rate limit (average requests per second over an extended period of time).
"""
return pulumi.get(self, "rate_limit")
@pulumi.output_type
class VpcLinkTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
| pt | 0.178621 | 1.541016 | 2 |
bin/training_data/redmagic_ds_training_data.py | mclaughlin6464/pearce | 0 | 14982 | <filename>bin/training_data/redmagic_ds_training_data.py
#!/.conda/envs/hodemulator/bin/python
from pearce.emulator import make_training_data
from pearce.emulator import DEFAULT_PARAMS as ordered_params
ordered_params['f_c'] = (0.05, .5)
ordered_params['logMmin'] = (11.5, 13.0)#(13.0, 14.5)
ordered_params['sigma_logM'] = (0.05, 1.0)
ordered_params['logM1'] = (12.0, 15.0)
ordered_params['alpha'] = (0.8, 1.5)
ordered_params.update({'mean_occupation_centrals_assembias_param1':( -1.0, 1.0),
'mean_occupation_satellites_assembias_param1':( -1.0, 1.0)})
make_training_data('/u/ki/swmclau2/Git/pearce/bin/training_data/ds_redmagic.cfg',ordered_params)
| <filename>bin/training_data/redmagic_ds_training_data.py
#!/.conda/envs/hodemulator/bin/python
from pearce.emulator import make_training_data
from pearce.emulator import DEFAULT_PARAMS as ordered_params
ordered_params['f_c'] = (0.05, .5)
ordered_params['logMmin'] = (11.5, 13.0)#(13.0, 14.5)
ordered_params['sigma_logM'] = (0.05, 1.0)
ordered_params['logM1'] = (12.0, 15.0)
ordered_params['alpha'] = (0.8, 1.5)
ordered_params.update({'mean_occupation_centrals_assembias_param1':( -1.0, 1.0),
'mean_occupation_satellites_assembias_param1':( -1.0, 1.0)})
make_training_data('/u/ki/swmclau2/Git/pearce/bin/training_data/ds_redmagic.cfg',ordered_params)
| de | 0.091422 | 1.840184 | 2 |
src/legohdl/workspace.py | c-rus/legoHDL | 6 | 14983 | # ------------------------------------------------------------------------------
# Project: legohdl
# Script: workspace.py
# Author: <NAME>
# Description:
# The Workspace class. A Workspace object has a path and a list of available
# vendors. This is what the user keeps their work's scope within for a given
# "organization".
# ------------------------------------------------------------------------------
import os, shutil, glob
import logging as log
from datetime import datetime
from .vendor import Vendor
from .apparatus import Apparatus as apt
from .cfg import Cfg, Section, Key
from .map import Map
from .git import Git
from .block import Block
class Workspace:
#store all workspaces in dictionary
Jar = Map()
#active-workspace is a workspace object
_ActiveWorkspace = None
DIR = apt.fs(apt.HIDDEN+"workspaces/")
LOG_FILE = "refresh.log"
MIN_RATE = -1
MAX_RATE = 1440
def __init__(self, name, path, vendors=[], ask=True):
'''
Create a workspace instance.
Parameters:
name (str): the identity for the workspace
path (str): the local path where blocks will be looked for
vendors ([str]): the list of vendors that are tied to this workspace
ask (bool): will ask user if wishing to enter workspace path
Returns:
None
'''
self._name = name
#do not create workspace if the name is already taken
if(self.getName().lower() in self.Jar.keys()):
log.error("Skipping workspace "+self.getName()+" due to duplicate naming conflict.")
return
#set the path
self._path = ''
self.setPath(path)
#do not create workspace if the path is empty
if(self.getPath() == ''):
if(ask == False):
log.error("Skipping workspace "+self.getName()+" due to empty local path.")
return
else:
#keep asking to set path until one is decided/input
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
while(self.setPath(path) == False):
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
self._ws_dir = apt.fs(self.DIR+self.getName()+"/")
#ensure all workspace hidden directories exist
if(os.path.isdir(self.getDir()) == False):
log.info("Setting up workspace "+self.getName()+"...")
os.makedirs(self.getDir(), exist_ok=True)
#create workspace's cache where installed blocks will be stored
os.makedirs(self.getDir()+"cache", exist_ok=True)
#create the refresh log if DNE
if(os.path.isfile(self.getDir()+self.LOG_FILE) == False):
open(self.getDir()+self.LOG_FILE, 'w').close()
self._vendors = []
#find all vendor objects by name and store in list
for vndr in vendors:
if(vndr.lower() in Vendor.Jar.keys()):
self._vendors += [Vendor.Jar[vndr]]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
pass
#add to class Jar
self.Jar[self.getName()] = self
pass
def setPath(self, p):
'''
Set the workspace's local path to a new value. Will ask user if okay
to create the path if DNE.
Parameters:
p (str): the path string
Returns:
(bool): true if successfully changed the path attribute
'''
#cannot set an empty path
if(p == '' or p == None):
log.info("Local path for workspace "+self.getName()+" cannot be empty.")
return False
p = apt.fs(p)
#create the workspace's local path if it does not exist
if(os.path.exists(p) == False):
#prompt user
carry_on = apt.confirmation("Workspace "+self.getName()+"'s local path does not exist. Create "+p+"?")
if(carry_on):
os.makedirs(p, exist_ok=True)
self._path = p
return True
else:
log.info("Did not set "+p+" as local path.")
return False
else:
self._path = p
return True
def setName(self, n):
'''
Change the workspace's name if the name is not already taken.
Parameters:
n (str): new name for workspace
Returns:
(bool): true if name successfully altered and updated in Jar
'''
if(n == '' or n == None):
log.error("Workspace name cannot be empty.")
return False
if(n.lower() in self.Jar.keys()):
log.error("Cannot rename workspace to "+n+" due to name conflict.")
return False
else:
#remove old name from Jar
if(self.getName().lower() in self.Jar.keys()):
del self.Jar[self.getName()]
#rename hidden directory if exists
new_dir = apt.fs(self.DIR+n+"/")
if(hasattr(self, "_ws_dir")):
os.rename(self.getDir(), new_dir)
#set the hidden workspace directory
self._ws_dir = new_dir
#change to new name
self._name = n
#update the Jar
self.Jar[self.getName()] = self
return True
def remove(self):
'''
Removes the workspace object from the Jar and its hidden directory.
Parameters:
None
Returns:
None
'''
log.info("Removing workspace "+self.getName()+"...")
#delete the hidden workspace directory
shutil.rmtree(self.getDir(), onerror=apt.rmReadOnly)
#remove from class Jar
del self.Jar[self.getName()]
#remove from cfg file
apt.CFG.remove('workspace.'+self.getName())
apt.CFG.write()
pass
def linkVendor(self, vndr):
'''
Attempts to add a vendor to the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to add
Returns:
(bool): true if the vendor list was modified (successful add)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
return False
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
return True
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
return False
def setVendors(self, vndrs):
'''
Overrides entire _vendors attr by setting it equal to 'vndrs'.
Parameters:
vndrs ([str]): list of vendors
Returns:
(bool): success if all vendors listed were added
'''
#reset vendors list
self._vendors = []
success = True
#iterate through every given vendor
for vndr in vndrs:
#verify the vendor exists
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
#check if the vendor has already been linked
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
#link the vendor to this workspace
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
sucess = False
return success
def unlinkVendor(self, vndr):
'''
Attempts to remove a vendor from the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to remove
Returns:
(bool): true if the vendor list was modified (successful remove)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj not in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already unlinked from the workspace.")
return False
else:
log.info("Unlinking vendor "+vndr_obj.getName()+" from the workspace...")
self._vendors.remove(vndr_obj)
return True
else:
log.warning("Could not unlink unknown vendor "+vndr+" from "+self.getName()+".")
return False
def loadBlocks(self, id_dsgns=False):
'''
Loads all blocks found at all levels: dnld (workspace path), instl (workspace
cache), avail (workspace vendors).
When id_dsgns is True, this method uses the 'multi-develop' setting to
determine which level has precedence in loadHDL().
'multi-develop' set to False will only loadHDL() from cache. 'multi-develop'
set to True will first try to loadHDL() from dnld, and if DNE, then try
to loadHDL() from block's cache.
Either way, if inside a current block, that block's HDL will be loaded over
its cache.
Dynamically creates _visible_blocks ([Block]) attribute to be reused.
Parameters:
id_dsgns (bool): identify design units (loadHDL) from blocks
Returns:
_visible_blocks ([Block]): list of all block objects in cache or path
'''
if(hasattr(self, "_visible_blocks")):
return self._visible_blocks
self._visible_blocks = []
#read the setting for multi-develop
mult_dev = apt.getMultiDevelop()
#1. Search for downloaded blocks
#glob on the local workspace path
#print("Local Blocks on:",self.getPath())
marker_files = glob.glob(self.getPath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found downloads
for mf in marker_files:
b = Block(mf, self, Block.Level.DNLD)
#if the user is within a current block, load the HDL from its DNLD level (not INSTL)
if(mult_dev == True or Block.getCurrent(bypass=True) == b):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#2. Search for installed blocks
#glob on the workspace cache path
#print("Cache Blocks on:",self.getCachePath())
marker_files = glob.glob(self.getCachePath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found installations
for mf in marker_files:
#the block must also have a valid git repository at its root
root,_ = os.path.split(mf)
#note: only the head installation has the git repository
if(Git.isValidRepo(root, remote=False)):
b = Block(mf, self, Block.Level.INSTL)
#get the spot for this block's download
dnld_b = Block.Inventory[b.M()][b.L()][b.N()][Block.Level.DNLD.value]
#add this block if a download DNE or the dnld does not match current when
#not in multi-develop mode
if(dnld_b == None or (mult_dev == False and Block.getCurrent(bypass=True) != dnld_b)):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#3. Search for available blocks
#glob on each vendor path
marker_files = []
#find all marker files in each of the workspace's vendors
for vndr in self.getVendors():
marker_files += glob.glob(vndr.getVendorDir()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found availables
for mf in marker_files:
b = Block(mf, self, Block.Level.AVAIL)
#do not add this block to list of visible blocks because it has no
#units associated with it, only metadata
pass
#4. ID all specific version blocks if identifying designs (except current block)
spec_vers_blocks = []
for vis_block in self._visible_blocks:
if(vis_block == Block.getCurrent(bypass=True)):
continue
for spec_block in vis_block.getInstalls().values():
spec_vers_blocks += [spec_block]
if(id_dsgns):
spec_block.loadHDL()
pass
pass
self._visible_blocks += spec_vers_blocks
return self._visible_blocks
def shortcut(self, title, req_entity=False, visibility=True, ref_current=True):
'''
Returns the Block from a shortened title. If title is empty and
'ref_current' is set, then tries to refer to the current block.
Sometimes an entity is required for certain commands; so it can be
assumed entity (instead of block name) if only thing given.
Parameters:
title (str): partial or full M.L.N with optional E attached
req_entity (bool): determine if only thing given then it is an entity
visibility (bool): determine if to only look for visible blocks
ref_current (bool): determine if to try to assign empty title to current block
Returns:
(Block): the identified block from the shortened title
'''
if(title == None):
title = ''
#split into pieces
pieces = title.split('.')
sects = ['']*3
diff = 3 - len(pieces)
for i in range(len(pieces)-1, -1, -1):
sects[diff+i] = pieces[i]
#check final piece if it has an entity attached
entity = ''
if(sects[2].count(apt.ENTITY_DELIM)):
i = sects[2].find(apt.ENTITY_DELIM)
entity = sects[2][i+1:]
sects[2] = sects[2][:i]
#assume only name given is actually the entity
elif(req_entity):
entity = sects[2]
sects[2] = ''
# [!] load all necessary blocks before searching
blocks = self.loadBlocks()
#use all blocks when visibility is off :todo: is this design intent?
if(visibility == False):
blocks = Block.getAllBlocks()
#track list of possible blocks as moving up the chain
possible_blocks = []
#search for an entity
if(len(entity)):
#collect list of all entities
reg = Map()
reg[entity] = []
#iterate through every block and create a mapping for their entity names
for bk in blocks:
#get the entity names from this block
es = bk.loadHDL(returnnames=True)
#print(es)
#create mappings of entity names to their block owners
for e in es:
if(e.lower() not in reg.keys()):
reg[e] = []
reg[e] += [bk]
#see how many blocks were fit to entity name's mapping
num_blocks = len(reg[entity])
#algorithm only detected one possible solution
if(num_blocks == 1):
#make sure rest of sections are correct before returning result
potential = reg[entity][0]
title = potential.getTitle(index=2, dist=2)
#verify each part of block identifier matches what was requested
for i in range(len(sects)):
#print(sects[i])
if(len(sects[i]) and sects[i].lower() != title[i].lower()):
return None
pass
return potential
#algorithm detected multiple possible solutions (cannot infer)
elif(num_blocks > 1):
possible_blocks = reg[entity]
#only was given an entity name, algorithm cannot solve requested entity
if(len(sects[2]) == 0):
log.info("Ambiguous unit; conflicts with")
#display the units/titles that conflict with input
for bk in reg[entity]:
print('\t '+bk.getFull()+":"+entity)
print()
exit()
#no blocks matched the entity name being passed
else:
return None
pass
#search through all block names
for start in range(len(sects)-1, -1, -1):
term = sects[start]
#exit loop if next term is empty
if(len(term) == 0):
break
reg = Map()
reg[term] = []
for bk in blocks:
t = bk.getTitle(index=start, dist=0)[0]
#store the block under the given section name
if(t.lower() not in reg.keys()):
reg[t] = []
reg[t] += [bk]
#count how many blocks occupy this same name
num_blocks = len(reg[term])
#algorithm only detected one possible solution
if(num_blocks == 1):
#make sure rest of sections are correct before returning result
potential = reg[term][0]
title = potential.getTitle(index=2, dist=2)
#verify each part of block identifier matches what was requested
for i in range(len(sects)):
#print(sects[i])
if(len(sects[i]) and sects[i].lower() != title[i].lower()):
return None
pass
return potential
#algorithm detected multiple solutions (cannot infer on this step)
elif(num_blocks > 1):
#compare with blocks for a match and dwindle down choices
next_blocks = []
for bk in reg[term]:
if(bk in possible_blocks or (start == len(sects)-1 and entity == '')):
next_blocks += [bk]
#dwindled down to a single block
if(len(next_blocks) == 1):
#print("FOUND:",next_blocks[0].getTitle(index=2, dist=2))
return next_blocks[0]
#carry on to using next title section
if(len(sects[start-1])):
#continue to using next term
possible_blocks = next_blocks
continue
else:
#ran out of guesses...report the conflicting titles/units
if(req_entity):
log.info("Ambiguous unit; conflicts with")
else:
log.info("Ambiguous title; conflicts with")
for bk in reg[term]:
if(req_entity):
print('\t '+bk.getFull()+":"+entity)
else:
print('\t '+bk.getFull())
exit(print())
pass
#using the current block if title is empty string
if(ref_current and (title == '' or title == None)):
return Block.getCurrent()
#return None if all attempts have failed and not returned anything yet
return None
def decodeUnits(self):
'''
Decodes every available unit to get the complete graphing data structure.
Parameters:
None
Returns:
None
'''
blocks = self.loadBlocks()
#print(blocks)
log.info("Collecting all unit data...")
for b in blocks:
us = b.loadHDL()
for u in us.values():
u.getLanguageFile().decode(u, recursive=False)
log.info("done.")
pass
def listBlocks(self, title, alpha=False, instl=False, dnld=False, avail=False):
'''
Print a formatted table of the available blocks.
Parameters:
title (str): block title to be broken into parts for searching
alpha (bool): determine if to alphabetize the block list order (L.N.V)
instl (bool): determine if to capture only blocks that are installed
dnld (bool): determine if to capture only blocks that are downloaded
avail (bool): determine if to capture blocks available from vendor
Returns:
None
'''
#[!] load the necessary blocks
self.loadBlocks()
#collect if multi-develop is on
mult_dev = apt.getMultiDevelop()
#split the title into parts
M,L,N,_ = Block.snapTitle(title, inc_ent=False)
#get all blocks from the catalog
#store each block's text line in a map to sort keys for alpha flag
catalog = Map()
#iterate through every vendor
for vndr_k,vndrs in Block.Inventory.items():
if(vndr_k.startswith(M.lower()) == False):
continue
#iterate through every library
for lib_k,libs in vndrs.items():
if(lib_k.startswith(L.lower()) == False):
continue
#iterate through every block
for blk_k,lvls in libs.items():
if(blk_k.startswith(N.lower()) == False):
continue
downloaded = installed = available = ' '
disp_d = disp_i = disp_a = False
#if none were set on command-line default to display everything
if((dnld or instl or avail) == False):
dnld = instl = avail = True
#with each lower level, overwrite the block object to print
if(lvls[Block.Level.AVAIL.value] != None):
bk = lvls[Block.Level.AVAIL.value]
available = 'A'
disp_a = True
if(lvls[Block.Level.INSTL.value] != None):
bk = lvls[Block.Level.INSTL.value]
installed = 'I'
disp_i = True
if(lvls[Block.Level.DNLD.value] != None):
if(dnld):
bk = lvls[Block.Level.DNLD.value]
downloaded = 'D'
# if(mult_dev):
# downloaded = 'D'
# installed = installed.lower()
disp_d = True
#one condition pair must be true to display the block
if((disp_a and avail) or (disp_i and instl) or (disp_d and dnld)):
pass
else:
continue
#character to separate different status bits
spacer = ' '
#format the status column's data
sts = downloaded + spacer + installed + spacer + available
#leave version empty if its been unreleased
v = '' if(bk.getVersion() == '0.0.0') else bk.getVersion()
#check if can be updated
#prioritize installation level for checking updates
instllr = bk.getLvlBlock(Block.Level.INSTL)
cmp_v = instllr.getVersion() if(instllr != None and mult_dev == False) else bk.getVersion()
#a '^' is an update symbol indicating the latest referenced version (dnld or instl) is not the actually the latest version found
if(Block.cmpVer(bk.getHighestAvailVersion(), cmp_v) != cmp_v):
sts = sts+' ^'
v = cmp_v
#format the data to print to the console and store in catalog (L.N.V str format)
catalog[bk.L()+'.'+bk.N()+'.'+bk.M()] = '{:<16}'.format(bk.L())+' '+'{:<20}'.format(bk.N())+' '+'{:<8}'.format(sts)+' '+'{:<10}'.format(v)+' '+'{:<16}'.format(bk.M())
pass
pass
keys = list(catalog.keys())
#check if to sort by alphabet
if(alpha):
keys.sort()
#print(keys)
print('{:<16}'.format("Library"),'{:<20}'.format("Block"),'{:<8}'.format("Status"+("*"*int(mult_dev))),'{:<10}'.format("Version"),'{:<16}'.format("Vendor"))
print("-"*16+" "+"-"*20+" "+"-"*8+" "+"-"*10+" "+"-"*16)
#iterate through catalog and print each textline
for k in keys:
print(catalog[k])
pass
def listUnits(self, title, alpha=False, usable=False, ignore_tb=False):
'''
Print a formatted table of all the design units.
Parameters:
title (str): block title to be broken into parts for searching
alpha (bool): determine if to alphabetize the block list order (E.V.L.N)
usable (bool): determine if to display units that can be used
ignore_tb (bool): determine if to ignore testbench files
Returns:
None
'''
#[!] load blocks into inventory
visible = self.loadBlocks()
#:todo: add flag to print 'variations' of an entity/unit (what specific version names exist)
#todo: print status of the unit and which status is usable (D or I)
M,L,N,V,E = Block.snapTitle(title, inc_ent=True)
#print(M,L,N,V,E)
#store each entity's print line in map (key = <unit>:<block-id>) to ensure uniqueness
catalog = Map()
for bk in Block.getAllBlocks():
#for lvl in Block.Inventory[bk.M()][bk.L()][bk.N()]:
block_title = bk.getFull(inc_ver=False)
if(bk.M().lower().startswith(M.lower()) == False):
continue
if(bk.L().lower().startswith(L.lower()) == False):
continue
if(bk.N().lower().startswith(N.lower()) == False):
continue
#collect all units
if(apt.getMultiDevelop() == False):
if(bk.getLvlBlock(Block.Level.INSTL) != None):
bk = bk.getLvlBlock(Block.Level.INSTL)
#skip this block if only displaying usable units and multi-develop off
elif(usable):
continue
units = bk.loadHDL(returnnames=False).values()
for u in units:
if(len(E) and u.E().lower().startswith(E.lower()) == False):
continue
if(ignore_tb and u.isTb()):
continue
#format if unit is visible/usable
vis = '-'
if(bk in visible):
vis = 'yes'
#format design unit name according to its natural language
dsgn = u.getDesign().name.lower()
if(u.getLang() == u.Language.VERILOG and dsgn == 'entity'):
dsgn = 'module'
catalog[u.E()+':'+block_title] = '{:<22}'.format(u.E())+' '+'{:<7}'.format(vis)+' '+'{:<10}'.format(dsgn)+' '+'{:<38}'.format(block_title)
pass
pass
keys = list(catalog.keys())
#check if to sort by alphabet
if(alpha):
keys.sort()
#print to console
print('{:<22}'.format("Unit"),'{:<7}'.format("Usable"),'{:<10}'.format("Type"),'{:<38}'.format("Block"))
print("-"*22+" "+"-"*7+" "+"-"*10+" "+"-"*38)
for k in keys:
print(catalog[k])
pass
pass
@classmethod
def tidy(cls):
'''
Removes any stale hidden workspace directories that aren't mapped to a
workspace found in the class Jar container.
Parameters:
None
Returns:
None
'''
#list all hidden workspace directories
hidden_dirs = os.listdir(cls.DIR)
for hd in hidden_dirs:
if(hd.lower() not in cls.Jar.keys()):
log.info("Removing stale workspace data for "+hd+"...")
if(os.path.isdir(cls.DIR+hd)):
shutil.rmtree(cls.DIR+hd, onerror=apt.rmReadOnly)
#remove all files from workspace directory
else:
os.remove(cls.DIR+hd)
pass
def autoRefresh(self, rate):
'''
Automatically refreshes all vendors for the given workspace. Reads its
log file to determine if past next interval for refresh.
Parameters:
rate (int): how often to ask a refresh within a 24-hour period
Returns:
None
'''
def timeToFloat(prt):
'''
Converts a time object into a float type.
Parameters:
prt (datetime): iso format of current time
Returns:
(float): 0.00 (inclusive) - 24.00 (exclusive)
'''
time_stamp = str(prt).split(' ')[1]
time_sects = time_stamp.split(':')
hrs = int(time_sects[0])
#convert to 'hours'.'minutes'
time_fmt = (float(hrs)+(float(float(time_sects[1])/60)))
return time_fmt
refresh = False
last_punch = None
stage = 1
cur_time = datetime.now()
#do not perform refresh if the rate is 0
if(rate == 0):
return
#always refresh if the rate is set below 0 (-1)
elif(rate <= self.MIN_RATE):
refresh = True
#divide the 24 hour period into even checkpoints
max_hours = float(24)
spacing = float(max_hours / rate)
intervals = []
for i in range(rate):
intervals += [spacing*i]
#ensure log file exists
if(os.path.exists(self.getDir()+self.LOG_FILE) == False):
open(self.getDir()+self.LOG_FILE, 'w').close()
#read log file
#read when the last refresh time occurred
with open(self.getDir()+self.LOG_FILE, 'r') as log_file:
#read the latest date
data = log_file.readlines()
#no refreshes have occurred so automatically need a refresh
if(len(data) == 0):
last_punch = cur_time
refresh = True
else:
last_punch = datetime.fromisoformat(data[0])
#determine if its time to refresh
#get latest time that was punched
last_time_fmt = timeToFloat(last_punch)
#determine the next checkpoint available for today
next_checkpoint = max_hours
for i in range(len(intervals)):
if(last_time_fmt < intervals[i]):
next_checkpoint = intervals[i]
stage = i + 1
break
#print('next checkpoint',next_checkpoint)
cur_time_fmt = timeToFloat(cur_time)
#check if the time has occurred on a previous day, (automatically update because its a new day)
next_day = cur_time.year > last_punch.year or cur_time.month > last_punch.month or cur_time.day > last_punch.day
#print(next_day)
#print("currently",cur_time_fmt)
#determine if the current time has passed the next checkpoint or if its a new day
if(next_day or cur_time_fmt >= next_checkpoint):
last_punch = cur_time
refresh = True
log_file.close()
#determine if its time to refresh
if(refresh):
#display what interval is being refreshed on the day
infoo = "("+str(stage)+"/"+str(rate)+")" if(rate > 0) else ''
log.info("Automatically refreshing workspace "+self.getName()+" vendors... "+infoo)
#refresh all vendors attached to this workspace
for vndr in self.getVendors():
vndr.refresh()
pass
#write updated time value to log file
with open(self.getDir()+self.LOG_FILE, 'w') as lf:
lf.write(str(cur_time))
pass
@classmethod
def load(cls):
'''Load all workspaces from settings.'''
wspcs = apt.CFG.get('workspace', dtype=Section)
for ws in wspcs.keys():
#skip over immediate keys
if(isinstance(wspcs[ws], Section) == False):
continue
path = ''
vendors = '()'
#verify that a path key and vendors key exists under each workspace
apt.CFG.set('workspace.'+ws+'.path', path, override=False)
apt.CFG.set('workspace.'+ws+'.vendors', vendors, override=False)
#retrieve path and vendors keys
if('path' in wspcs[ws].keys()):
path = wspcs[ws]['path']._val
if('vendors' in wspcs[ws].keys()):
vendors = Cfg.castList(wspcs[ws]['vendors']._val)
#create Workspace objects
Workspace(wspcs[ws]._name, path, vendors)
pass
#save if made any changes
if(apt.CFG._modified):
apt.CFG.write()
pass
@classmethod
def save(cls, inc_active=True):
'''
Serializes the Workspace objects and saves them to the settings dictionary.
Parameters:
inc_active (bool): determine if to save the active workspace to settings
Returns:
None
'''
serialized = {}
#serialize the Workspace objects into dictionary format for settings
for ws in cls.Jar.values():
#do not save any workspace that has no path
if(ws.getPath() == ''):
continue
serialized[ws.getName()] = {}
serialized[ws.getName()]['path'] = ws.getPath()
serialized[ws.getName()]['vendors'] = Cfg.castStr(ws.getVendors(returnnames=True, lowercase=False), tab_cnt=2, drop_list=False)
#update settings dictionary
apt.CFG.set('workspace', Section(serialized), override=True)
#update active workspace
if(inc_active):
if(cls.getActive() != None):
apt.CFG.set('general.active-workspace', cls.getActive().getName())
else:
apt.CFG.set('general.active-workspace', '')
apt.save()
pass
@classmethod
def inWorkspace(cls):
'''
Determine if an active workspace is selected.
Parameters:
None
Returns:
(bool): true if ActiveWorkspace is not None
'''
return cls._ActiveWorkspace != None
@classmethod
def setActiveWorkspace(cls, ws):
'''
Set the active workspace after initializing all workspaces into Jar. If
the input name is invalid, it will set the first workspace in the Jar as
active if one is not already assigned.
Parameters:
ws (str): workspace name
Returns:
(bool): true if active-workspace was set
'''
#properly set the active workspace from one found in Jar
if(ws != None and ws.lower() in cls.Jar.keys()):
re_assign = (cls._ActiveWorkspace != None)
#set the active workspace obj from found workspace
cls._ActiveWorkspace = cls.Jar[ws]
#only give prompt if reassigning the active-workspace
if(re_assign):
log.info("Assigning workspace "+cls._ActiveWorkspace.getName()+" as active workspace...")
return True
#try to randomly assign active workspace if not already assigned.
elif(len(cls.Jar.keys()) and cls._ActiveWorkspace == None):
random_ws = list(cls.Jar.keys())[0]
cls._ActiveWorkspace = cls.Jar[random_ws]
msgi = "No active workspace set."
if(ws != ''):
msgi = "Workspace "+ws+" does not exist."
log.info(msgi+" Auto-assigning active workspace to "+cls._ActiveWorkspace.getName()+"...")
return True
#still was not able to set the active workspace with the given argument
elif(cls._ActiveWorkspace != None):
log.info("Workspace "+ws+" does not exist. Keeping "+cls._ActiveWorkspace.getName()+" as active.")
else:
log.error("No workspace set as active.")
return False
def isLinked(self):
'''Returns if any vendors are tied to this workspace (bool).'''
return len(self.getVendors())
def getPath(self):
'''Returns the local path where downloaded blocks are located (str).'''
return self._path
def getDir(self):
'''Returns the base hidden directory where the workspace data is kept (str).'''
return self._ws_dir
def getCachePath(self):
'''Returns the hidden directory where workspace installations are kept. (str).'''
return self.getDir()+"cache/"
def getName(self):
'''Returns the workspace's identifier (str).'''
return self._name
def isActive(self):
'''Returns is this workspace is the active workspace (bool).'''
return self == self.getActive()
def getVendors(self, returnnames=False, lowercase=True):
'''
Return the vendor objects associated with the given workspace.
Parameters:
returnnames (bool): true will return vendor names
lowercase (bool): true will return lower-case names if returnnames is enabled
Returns:
([Vendor]) or ([str]): list of available vendors
'''
if(returnnames):
vndr_names = []
for vndr in self._vendors:
name = vndr.getName()
if(lowercase):
name = name.lower()
vndr_names += [name]
return vndr_names
else:
return self._vendors
@classmethod
def printList(cls):
'''
Prints formatted list for workspaces with vendor availability and which is active.
Parameters:
None
Returns:
None
'''
print('{:<16}'.format("Workspace"),'{:<6}'.format("Active"),'{:<40}'.format("Path"),'{:<14}'.format("Vendors"))
print("-"*16+" "+"-"*6+" "+"-"*40+" "+"-"*14+" ")
for ws in cls.Jar.values():
vndrs = apt.listToStr(ws.getVendors(returnnames=True))
act = 'yes' if(ws == cls.getActive()) else '-'
print('{:<16}'.format(ws.getName()),'{:<6}'.format(act),'{:<40}'.format(ws.getPath()),'{:<14}'.format(vndrs))
pass
pass
@classmethod
def printAll(cls):
for key,ws in cls.Jar.items():
print('key:',key)
print(ws)
@classmethod
def getActive(cls):
'''Returns the active workspace and will exit on error (Workspace).'''
if(cls._ActiveWorkspace == None):
exit(log.error("Not in a workspace!"))
return cls._ActiveWorkspace
# uncomment to use for debugging
# def __str__(self):
# return f'''
# ID: {hex(id(self))}
# Name: {self.getName()}
# Path: {self.getPath()}
# Active: {self.isActive()}
# Hidden directory: {self.getDir()}
# Linked to: {self.isLinked()}
# Vendors: {self.getVendors(returnnames=True)}
# '''
pass | # ------------------------------------------------------------------------------
# Project: legohdl
# Script: workspace.py
# Author: <NAME>
# Description:
# The Workspace class. A Workspace object has a path and a list of available
# vendors. This is what the user keeps their work's scope within for a given
# "organization".
# ------------------------------------------------------------------------------
import os, shutil, glob
import logging as log
from datetime import datetime
from .vendor import Vendor
from .apparatus import Apparatus as apt
from .cfg import Cfg, Section, Key
from .map import Map
from .git import Git
from .block import Block
class Workspace:
#store all workspaces in dictionary
Jar = Map()
#active-workspace is a workspace object
_ActiveWorkspace = None
DIR = apt.fs(apt.HIDDEN+"workspaces/")
LOG_FILE = "refresh.log"
MIN_RATE = -1
MAX_RATE = 1440
def __init__(self, name, path, vendors=[], ask=True):
'''
Create a workspace instance.
Parameters:
name (str): the identity for the workspace
path (str): the local path where blocks will be looked for
vendors ([str]): the list of vendors that are tied to this workspace
ask (bool): will ask user if wishing to enter workspace path
Returns:
None
'''
self._name = name
#do not create workspace if the name is already taken
if(self.getName().lower() in self.Jar.keys()):
log.error("Skipping workspace "+self.getName()+" due to duplicate naming conflict.")
return
#set the path
self._path = ''
self.setPath(path)
#do not create workspace if the path is empty
if(self.getPath() == ''):
if(ask == False):
log.error("Skipping workspace "+self.getName()+" due to empty local path.")
return
else:
#keep asking to set path until one is decided/input
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
while(self.setPath(path) == False):
try:
path = input("Enter path for workspace "+self.getName()+": ")
except KeyboardInterrupt:
apt.CFG.remove('workspace.'+self.getName())
Workspace.save(inc_active=False)
print()
exit(log.info("Workspace not created."))
self._ws_dir = apt.fs(self.DIR+self.getName()+"/")
#ensure all workspace hidden directories exist
if(os.path.isdir(self.getDir()) == False):
log.info("Setting up workspace "+self.getName()+"...")
os.makedirs(self.getDir(), exist_ok=True)
#create workspace's cache where installed blocks will be stored
os.makedirs(self.getDir()+"cache", exist_ok=True)
#create the refresh log if DNE
if(os.path.isfile(self.getDir()+self.LOG_FILE) == False):
open(self.getDir()+self.LOG_FILE, 'w').close()
self._vendors = []
#find all vendor objects by name and store in list
for vndr in vendors:
if(vndr.lower() in Vendor.Jar.keys()):
self._vendors += [Vendor.Jar[vndr]]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
pass
#add to class Jar
self.Jar[self.getName()] = self
pass
def setPath(self, p):
'''
Set the workspace's local path to a new value. Will ask user if okay
to create the path if DNE.
Parameters:
p (str): the path string
Returns:
(bool): true if successfully changed the path attribute
'''
#cannot set an empty path
if(p == '' or p == None):
log.info("Local path for workspace "+self.getName()+" cannot be empty.")
return False
p = apt.fs(p)
#create the workspace's local path if it does not exist
if(os.path.exists(p) == False):
#prompt user
carry_on = apt.confirmation("Workspace "+self.getName()+"'s local path does not exist. Create "+p+"?")
if(carry_on):
os.makedirs(p, exist_ok=True)
self._path = p
return True
else:
log.info("Did not set "+p+" as local path.")
return False
else:
self._path = p
return True
def setName(self, n):
'''
Change the workspace's name if the name is not already taken.
Parameters:
n (str): new name for workspace
Returns:
(bool): true if name successfully altered and updated in Jar
'''
if(n == '' or n == None):
log.error("Workspace name cannot be empty.")
return False
if(n.lower() in self.Jar.keys()):
log.error("Cannot rename workspace to "+n+" due to name conflict.")
return False
else:
#remove old name from Jar
if(self.getName().lower() in self.Jar.keys()):
del self.Jar[self.getName()]
#rename hidden directory if exists
new_dir = apt.fs(self.DIR+n+"/")
if(hasattr(self, "_ws_dir")):
os.rename(self.getDir(), new_dir)
#set the hidden workspace directory
self._ws_dir = new_dir
#change to new name
self._name = n
#update the Jar
self.Jar[self.getName()] = self
return True
def remove(self):
'''
Removes the workspace object from the Jar and its hidden directory.
Parameters:
None
Returns:
None
'''
log.info("Removing workspace "+self.getName()+"...")
#delete the hidden workspace directory
shutil.rmtree(self.getDir(), onerror=apt.rmReadOnly)
#remove from class Jar
del self.Jar[self.getName()]
#remove from cfg file
apt.CFG.remove('workspace.'+self.getName())
apt.CFG.write()
pass
def linkVendor(self, vndr):
'''
Attempts to add a vendor to the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to add
Returns:
(bool): true if the vendor list was modified (successful add)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
return False
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
return True
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
return False
def setVendors(self, vndrs):
'''
Overrides entire _vendors attr by setting it equal to 'vndrs'.
Parameters:
vndrs ([str]): list of vendors
Returns:
(bool): success if all vendors listed were added
'''
#reset vendors list
self._vendors = []
success = True
#iterate through every given vendor
for vndr in vndrs:
#verify the vendor exists
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
#check if the vendor has already been linked
if(vndr_obj in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.")
#link the vendor to this workspace
else:
log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...")
self._vendors += [vndr_obj]
else:
log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".")
sucess = False
return success
def unlinkVendor(self, vndr):
'''
Attempts to remove a vendor from the workspace's vendor list.
Parameters:
vndr (str): name of the vendor to remove
Returns:
(bool): true if the vendor list was modified (successful remove)
'''
if(vndr.lower() in Vendor.Jar.keys()):
vndr_obj = Vendor.Jar[vndr]
if(vndr_obj not in self.getVendors()):
log.info("Vendor "+vndr_obj.getName()+" is already unlinked from the workspace.")
return False
else:
log.info("Unlinking vendor "+vndr_obj.getName()+" from the workspace...")
self._vendors.remove(vndr_obj)
return True
else:
log.warning("Could not unlink unknown vendor "+vndr+" from "+self.getName()+".")
return False
def loadBlocks(self, id_dsgns=False):
'''
Loads all blocks found at all levels: dnld (workspace path), instl (workspace
cache), avail (workspace vendors).
When id_dsgns is True, this method uses the 'multi-develop' setting to
determine which level has precedence in loadHDL().
'multi-develop' set to False will only loadHDL() from cache. 'multi-develop'
set to True will first try to loadHDL() from dnld, and if DNE, then try
to loadHDL() from block's cache.
Either way, if inside a current block, that block's HDL will be loaded over
its cache.
Dynamically creates _visible_blocks ([Block]) attribute to be reused.
Parameters:
id_dsgns (bool): identify design units (loadHDL) from blocks
Returns:
_visible_blocks ([Block]): list of all block objects in cache or path
'''
if(hasattr(self, "_visible_blocks")):
return self._visible_blocks
self._visible_blocks = []
#read the setting for multi-develop
mult_dev = apt.getMultiDevelop()
#1. Search for downloaded blocks
#glob on the local workspace path
#print("Local Blocks on:",self.getPath())
marker_files = glob.glob(self.getPath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found downloads
for mf in marker_files:
b = Block(mf, self, Block.Level.DNLD)
#if the user is within a current block, load the HDL from its DNLD level (not INSTL)
if(mult_dev == True or Block.getCurrent(bypass=True) == b):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#2. Search for installed blocks
#glob on the workspace cache path
#print("Cache Blocks on:",self.getCachePath())
marker_files = glob.glob(self.getCachePath()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found installations
for mf in marker_files:
#the block must also have a valid git repository at its root
root,_ = os.path.split(mf)
#note: only the head installation has the git repository
if(Git.isValidRepo(root, remote=False)):
b = Block(mf, self, Block.Level.INSTL)
#get the spot for this block's download
dnld_b = Block.Inventory[b.M()][b.L()][b.N()][Block.Level.DNLD.value]
#add this block if a download DNE or the dnld does not match current when
#not in multi-develop mode
if(dnld_b == None or (mult_dev == False and Block.getCurrent(bypass=True) != dnld_b)):
self._visible_blocks += [b]
if(id_dsgns):
b.loadHDL()
pass
#3. Search for available blocks
#glob on each vendor path
marker_files = []
#find all marker files in each of the workspace's vendors
for vndr in self.getVendors():
marker_files += glob.glob(vndr.getVendorDir()+"**/*/"+apt.MARKER, recursive=True)
#iterate through all found availables
for mf in marker_files:
b = Block(mf, self, Block.Level.AVAIL)
#do not add this block to list of visible blocks because it has no
#units associated with it, only metadata
pass
#4. ID all specific version blocks if identifying designs (except current block)
spec_vers_blocks = []
for vis_block in self._visible_blocks:
if(vis_block == Block.getCurrent(bypass=True)):
continue
for spec_block in vis_block.getInstalls().values():
spec_vers_blocks += [spec_block]
if(id_dsgns):
spec_block.loadHDL()
pass
pass
self._visible_blocks += spec_vers_blocks
return self._visible_blocks
def shortcut(self, title, req_entity=False, visibility=True, ref_current=True):
'''
Returns the Block from a shortened title. If title is empty and
'ref_current' is set, then tries to refer to the current block.
Sometimes an entity is required for certain commands; so it can be
assumed entity (instead of block name) if only thing given.
Parameters:
title (str): partial or full M.L.N with optional E attached
req_entity (bool): determine if only thing given then it is an entity
visibility (bool): determine if to only look for visible blocks
ref_current (bool): determine if to try to assign empty title to current block
Returns:
(Block): the identified block from the shortened title
'''
if(title == None):
title = ''
#split into pieces
pieces = title.split('.')
sects = ['']*3
diff = 3 - len(pieces)
for i in range(len(pieces)-1, -1, -1):
sects[diff+i] = pieces[i]
#check final piece if it has an entity attached
entity = ''
if(sects[2].count(apt.ENTITY_DELIM)):
i = sects[2].find(apt.ENTITY_DELIM)
entity = sects[2][i+1:]
sects[2] = sects[2][:i]
#assume only name given is actually the entity
elif(req_entity):
entity = sects[2]
sects[2] = ''
# [!] load all necessary blocks before searching
blocks = self.loadBlocks()
#use all blocks when visibility is off :todo: is this design intent?
if(visibility == False):
blocks = Block.getAllBlocks()
#track list of possible blocks as moving up the chain
possible_blocks = []
#search for an entity
if(len(entity)):
#collect list of all entities
reg = Map()
reg[entity] = []
#iterate through every block and create a mapping for their entity names
for bk in blocks:
#get the entity names from this block
es = bk.loadHDL(returnnames=True)
#print(es)
#create mappings of entity names to their block owners
for e in es:
if(e.lower() not in reg.keys()):
reg[e] = []
reg[e] += [bk]
#see how many blocks were fit to entity name's mapping
num_blocks = len(reg[entity])
#algorithm only detected one possible solution
if(num_blocks == 1):
#make sure rest of sections are correct before returning result
potential = reg[entity][0]
title = potential.getTitle(index=2, dist=2)
#verify each part of block identifier matches what was requested
for i in range(len(sects)):
#print(sects[i])
if(len(sects[i]) and sects[i].lower() != title[i].lower()):
return None
pass
return potential
#algorithm detected multiple possible solutions (cannot infer)
elif(num_blocks > 1):
possible_blocks = reg[entity]
#only was given an entity name, algorithm cannot solve requested entity
if(len(sects[2]) == 0):
log.info("Ambiguous unit; conflicts with")
#display the units/titles that conflict with input
for bk in reg[entity]:
print('\t '+bk.getFull()+":"+entity)
print()
exit()
#no blocks matched the entity name being passed
else:
return None
pass
#search through all block names
for start in range(len(sects)-1, -1, -1):
term = sects[start]
#exit loop if next term is empty
if(len(term) == 0):
break
reg = Map()
reg[term] = []
for bk in blocks:
t = bk.getTitle(index=start, dist=0)[0]
#store the block under the given section name
if(t.lower() not in reg.keys()):
reg[t] = []
reg[t] += [bk]
#count how many blocks occupy this same name
num_blocks = len(reg[term])
#algorithm only detected one possible solution
if(num_blocks == 1):
#make sure rest of sections are correct before returning result
potential = reg[term][0]
title = potential.getTitle(index=2, dist=2)
#verify each part of block identifier matches what was requested
for i in range(len(sects)):
#print(sects[i])
if(len(sects[i]) and sects[i].lower() != title[i].lower()):
return None
pass
return potential
#algorithm detected multiple solutions (cannot infer on this step)
elif(num_blocks > 1):
#compare with blocks for a match and dwindle down choices
next_blocks = []
for bk in reg[term]:
if(bk in possible_blocks or (start == len(sects)-1 and entity == '')):
next_blocks += [bk]
#dwindled down to a single block
if(len(next_blocks) == 1):
#print("FOUND:",next_blocks[0].getTitle(index=2, dist=2))
return next_blocks[0]
#carry on to using next title section
if(len(sects[start-1])):
#continue to using next term
possible_blocks = next_blocks
continue
else:
#ran out of guesses...report the conflicting titles/units
if(req_entity):
log.info("Ambiguous unit; conflicts with")
else:
log.info("Ambiguous title; conflicts with")
for bk in reg[term]:
if(req_entity):
print('\t '+bk.getFull()+":"+entity)
else:
print('\t '+bk.getFull())
exit(print())
pass
#using the current block if title is empty string
if(ref_current and (title == '' or title == None)):
return Block.getCurrent()
#return None if all attempts have failed and not returned anything yet
return None
def decodeUnits(self):
'''
Decodes every available unit to get the complete graphing data structure.
Parameters:
None
Returns:
None
'''
blocks = self.loadBlocks()
#print(blocks)
log.info("Collecting all unit data...")
for b in blocks:
us = b.loadHDL()
for u in us.values():
u.getLanguageFile().decode(u, recursive=False)
log.info("done.")
pass
def listBlocks(self, title, alpha=False, instl=False, dnld=False, avail=False):
'''
Print a formatted table of the available blocks.
Parameters:
title (str): block title to be broken into parts for searching
alpha (bool): determine if to alphabetize the block list order (L.N.V)
instl (bool): determine if to capture only blocks that are installed
dnld (bool): determine if to capture only blocks that are downloaded
avail (bool): determine if to capture blocks available from vendor
Returns:
None
'''
#[!] load the necessary blocks
self.loadBlocks()
#collect if multi-develop is on
mult_dev = apt.getMultiDevelop()
#split the title into parts
M,L,N,_ = Block.snapTitle(title, inc_ent=False)
#get all blocks from the catalog
#store each block's text line in a map to sort keys for alpha flag
catalog = Map()
#iterate through every vendor
for vndr_k,vndrs in Block.Inventory.items():
if(vndr_k.startswith(M.lower()) == False):
continue
#iterate through every library
for lib_k,libs in vndrs.items():
if(lib_k.startswith(L.lower()) == False):
continue
#iterate through every block
for blk_k,lvls in libs.items():
if(blk_k.startswith(N.lower()) == False):
continue
downloaded = installed = available = ' '
disp_d = disp_i = disp_a = False
#if none were set on command-line default to display everything
if((dnld or instl or avail) == False):
dnld = instl = avail = True
#with each lower level, overwrite the block object to print
if(lvls[Block.Level.AVAIL.value] != None):
bk = lvls[Block.Level.AVAIL.value]
available = 'A'
disp_a = True
if(lvls[Block.Level.INSTL.value] != None):
bk = lvls[Block.Level.INSTL.value]
installed = 'I'
disp_i = True
if(lvls[Block.Level.DNLD.value] != None):
if(dnld):
bk = lvls[Block.Level.DNLD.value]
downloaded = 'D'
# if(mult_dev):
# downloaded = 'D'
# installed = installed.lower()
disp_d = True
#one condition pair must be true to display the block
if((disp_a and avail) or (disp_i and instl) or (disp_d and dnld)):
pass
else:
continue
#character to separate different status bits
spacer = ' '
#format the status column's data
sts = downloaded + spacer + installed + spacer + available
#leave version empty if its been unreleased
v = '' if(bk.getVersion() == '0.0.0') else bk.getVersion()
#check if can be updated
#prioritize installation level for checking updates
instllr = bk.getLvlBlock(Block.Level.INSTL)
cmp_v = instllr.getVersion() if(instllr != None and mult_dev == False) else bk.getVersion()
#a '^' is an update symbol indicating the latest referenced version (dnld or instl) is not the actually the latest version found
if(Block.cmpVer(bk.getHighestAvailVersion(), cmp_v) != cmp_v):
sts = sts+' ^'
v = cmp_v
#format the data to print to the console and store in catalog (L.N.V str format)
catalog[bk.L()+'.'+bk.N()+'.'+bk.M()] = '{:<16}'.format(bk.L())+' '+'{:<20}'.format(bk.N())+' '+'{:<8}'.format(sts)+' '+'{:<10}'.format(v)+' '+'{:<16}'.format(bk.M())
pass
pass
keys = list(catalog.keys())
#check if to sort by alphabet
if(alpha):
keys.sort()
#print(keys)
print('{:<16}'.format("Library"),'{:<20}'.format("Block"),'{:<8}'.format("Status"+("*"*int(mult_dev))),'{:<10}'.format("Version"),'{:<16}'.format("Vendor"))
print("-"*16+" "+"-"*20+" "+"-"*8+" "+"-"*10+" "+"-"*16)
#iterate through catalog and print each textline
for k in keys:
print(catalog[k])
pass
def listUnits(self, title, alpha=False, usable=False, ignore_tb=False):
'''
Print a formatted table of all the design units.
Parameters:
title (str): block title to be broken into parts for searching
alpha (bool): determine if to alphabetize the block list order (E.V.L.N)
usable (bool): determine if to display units that can be used
ignore_tb (bool): determine if to ignore testbench files
Returns:
None
'''
#[!] load blocks into inventory
visible = self.loadBlocks()
#:todo: add flag to print 'variations' of an entity/unit (what specific version names exist)
#todo: print status of the unit and which status is usable (D or I)
M,L,N,V,E = Block.snapTitle(title, inc_ent=True)
#print(M,L,N,V,E)
#store each entity's print line in map (key = <unit>:<block-id>) to ensure uniqueness
catalog = Map()
for bk in Block.getAllBlocks():
#for lvl in Block.Inventory[bk.M()][bk.L()][bk.N()]:
block_title = bk.getFull(inc_ver=False)
if(bk.M().lower().startswith(M.lower()) == False):
continue
if(bk.L().lower().startswith(L.lower()) == False):
continue
if(bk.N().lower().startswith(N.lower()) == False):
continue
#collect all units
if(apt.getMultiDevelop() == False):
if(bk.getLvlBlock(Block.Level.INSTL) != None):
bk = bk.getLvlBlock(Block.Level.INSTL)
#skip this block if only displaying usable units and multi-develop off
elif(usable):
continue
units = bk.loadHDL(returnnames=False).values()
for u in units:
if(len(E) and u.E().lower().startswith(E.lower()) == False):
continue
if(ignore_tb and u.isTb()):
continue
#format if unit is visible/usable
vis = '-'
if(bk in visible):
vis = 'yes'
#format design unit name according to its natural language
dsgn = u.getDesign().name.lower()
if(u.getLang() == u.Language.VERILOG and dsgn == 'entity'):
dsgn = 'module'
catalog[u.E()+':'+block_title] = '{:<22}'.format(u.E())+' '+'{:<7}'.format(vis)+' '+'{:<10}'.format(dsgn)+' '+'{:<38}'.format(block_title)
pass
pass
keys = list(catalog.keys())
#check if to sort by alphabet
if(alpha):
keys.sort()
#print to console
print('{:<22}'.format("Unit"),'{:<7}'.format("Usable"),'{:<10}'.format("Type"),'{:<38}'.format("Block"))
print("-"*22+" "+"-"*7+" "+"-"*10+" "+"-"*38)
for k in keys:
print(catalog[k])
pass
pass
@classmethod
def tidy(cls):
'''
Removes any stale hidden workspace directories that aren't mapped to a
workspace found in the class Jar container.
Parameters:
None
Returns:
None
'''
#list all hidden workspace directories
hidden_dirs = os.listdir(cls.DIR)
for hd in hidden_dirs:
if(hd.lower() not in cls.Jar.keys()):
log.info("Removing stale workspace data for "+hd+"...")
if(os.path.isdir(cls.DIR+hd)):
shutil.rmtree(cls.DIR+hd, onerror=apt.rmReadOnly)
#remove all files from workspace directory
else:
os.remove(cls.DIR+hd)
pass
def autoRefresh(self, rate):
'''
Automatically refreshes all vendors for the given workspace. Reads its
log file to determine if past next interval for refresh.
Parameters:
rate (int): how often to ask a refresh within a 24-hour period
Returns:
None
'''
def timeToFloat(prt):
'''
Converts a time object into a float type.
Parameters:
prt (datetime): iso format of current time
Returns:
(float): 0.00 (inclusive) - 24.00 (exclusive)
'''
time_stamp = str(prt).split(' ')[1]
time_sects = time_stamp.split(':')
hrs = int(time_sects[0])
#convert to 'hours'.'minutes'
time_fmt = (float(hrs)+(float(float(time_sects[1])/60)))
return time_fmt
refresh = False
last_punch = None
stage = 1
cur_time = datetime.now()
#do not perform refresh if the rate is 0
if(rate == 0):
return
#always refresh if the rate is set below 0 (-1)
elif(rate <= self.MIN_RATE):
refresh = True
#divide the 24 hour period into even checkpoints
max_hours = float(24)
spacing = float(max_hours / rate)
intervals = []
for i in range(rate):
intervals += [spacing*i]
#ensure log file exists
if(os.path.exists(self.getDir()+self.LOG_FILE) == False):
open(self.getDir()+self.LOG_FILE, 'w').close()
#read log file
#read when the last refresh time occurred
with open(self.getDir()+self.LOG_FILE, 'r') as log_file:
#read the latest date
data = log_file.readlines()
#no refreshes have occurred so automatically need a refresh
if(len(data) == 0):
last_punch = cur_time
refresh = True
else:
last_punch = datetime.fromisoformat(data[0])
#determine if its time to refresh
#get latest time that was punched
last_time_fmt = timeToFloat(last_punch)
#determine the next checkpoint available for today
next_checkpoint = max_hours
for i in range(len(intervals)):
if(last_time_fmt < intervals[i]):
next_checkpoint = intervals[i]
stage = i + 1
break
#print('next checkpoint',next_checkpoint)
cur_time_fmt = timeToFloat(cur_time)
#check if the time has occurred on a previous day, (automatically update because its a new day)
next_day = cur_time.year > last_punch.year or cur_time.month > last_punch.month or cur_time.day > last_punch.day
#print(next_day)
#print("currently",cur_time_fmt)
#determine if the current time has passed the next checkpoint or if its a new day
if(next_day or cur_time_fmt >= next_checkpoint):
last_punch = cur_time
refresh = True
log_file.close()
#determine if its time to refresh
if(refresh):
#display what interval is being refreshed on the day
infoo = "("+str(stage)+"/"+str(rate)+")" if(rate > 0) else ''
log.info("Automatically refreshing workspace "+self.getName()+" vendors... "+infoo)
#refresh all vendors attached to this workspace
for vndr in self.getVendors():
vndr.refresh()
pass
#write updated time value to log file
with open(self.getDir()+self.LOG_FILE, 'w') as lf:
lf.write(str(cur_time))
pass
@classmethod
def load(cls):
'''Load all workspaces from settings.'''
wspcs = apt.CFG.get('workspace', dtype=Section)
for ws in wspcs.keys():
#skip over immediate keys
if(isinstance(wspcs[ws], Section) == False):
continue
path = ''
vendors = '()'
#verify that a path key and vendors key exists under each workspace
apt.CFG.set('workspace.'+ws+'.path', path, override=False)
apt.CFG.set('workspace.'+ws+'.vendors', vendors, override=False)
#retrieve path and vendors keys
if('path' in wspcs[ws].keys()):
path = wspcs[ws]['path']._val
if('vendors' in wspcs[ws].keys()):
vendors = Cfg.castList(wspcs[ws]['vendors']._val)
#create Workspace objects
Workspace(wspcs[ws]._name, path, vendors)
pass
#save if made any changes
if(apt.CFG._modified):
apt.CFG.write()
pass
@classmethod
def save(cls, inc_active=True):
'''
Serializes the Workspace objects and saves them to the settings dictionary.
Parameters:
inc_active (bool): determine if to save the active workspace to settings
Returns:
None
'''
serialized = {}
#serialize the Workspace objects into dictionary format for settings
for ws in cls.Jar.values():
#do not save any workspace that has no path
if(ws.getPath() == ''):
continue
serialized[ws.getName()] = {}
serialized[ws.getName()]['path'] = ws.getPath()
serialized[ws.getName()]['vendors'] = Cfg.castStr(ws.getVendors(returnnames=True, lowercase=False), tab_cnt=2, drop_list=False)
#update settings dictionary
apt.CFG.set('workspace', Section(serialized), override=True)
#update active workspace
if(inc_active):
if(cls.getActive() != None):
apt.CFG.set('general.active-workspace', cls.getActive().getName())
else:
apt.CFG.set('general.active-workspace', '')
apt.save()
pass
@classmethod
def inWorkspace(cls):
'''
Determine if an active workspace is selected.
Parameters:
None
Returns:
(bool): true if ActiveWorkspace is not None
'''
return cls._ActiveWorkspace != None
@classmethod
def setActiveWorkspace(cls, ws):
'''
Set the active workspace after initializing all workspaces into Jar. If
the input name is invalid, it will set the first workspace in the Jar as
active if one is not already assigned.
Parameters:
ws (str): workspace name
Returns:
(bool): true if active-workspace was set
'''
#properly set the active workspace from one found in Jar
if(ws != None and ws.lower() in cls.Jar.keys()):
re_assign = (cls._ActiveWorkspace != None)
#set the active workspace obj from found workspace
cls._ActiveWorkspace = cls.Jar[ws]
#only give prompt if reassigning the active-workspace
if(re_assign):
log.info("Assigning workspace "+cls._ActiveWorkspace.getName()+" as active workspace...")
return True
#try to randomly assign active workspace if not already assigned.
elif(len(cls.Jar.keys()) and cls._ActiveWorkspace == None):
random_ws = list(cls.Jar.keys())[0]
cls._ActiveWorkspace = cls.Jar[random_ws]
msgi = "No active workspace set."
if(ws != ''):
msgi = "Workspace "+ws+" does not exist."
log.info(msgi+" Auto-assigning active workspace to "+cls._ActiveWorkspace.getName()+"...")
return True
#still was not able to set the active workspace with the given argument
elif(cls._ActiveWorkspace != None):
log.info("Workspace "+ws+" does not exist. Keeping "+cls._ActiveWorkspace.getName()+" as active.")
else:
log.error("No workspace set as active.")
return False
def isLinked(self):
'''Returns if any vendors are tied to this workspace (bool).'''
return len(self.getVendors())
def getPath(self):
'''Returns the local path where downloaded blocks are located (str).'''
return self._path
def getDir(self):
'''Returns the base hidden directory where the workspace data is kept (str).'''
return self._ws_dir
def getCachePath(self):
'''Returns the hidden directory where workspace installations are kept. (str).'''
return self.getDir()+"cache/"
def getName(self):
'''Returns the workspace's identifier (str).'''
return self._name
def isActive(self):
'''Returns is this workspace is the active workspace (bool).'''
return self == self.getActive()
def getVendors(self, returnnames=False, lowercase=True):
'''
Return the vendor objects associated with the given workspace.
Parameters:
returnnames (bool): true will return vendor names
lowercase (bool): true will return lower-case names if returnnames is enabled
Returns:
([Vendor]) or ([str]): list of available vendors
'''
if(returnnames):
vndr_names = []
for vndr in self._vendors:
name = vndr.getName()
if(lowercase):
name = name.lower()
vndr_names += [name]
return vndr_names
else:
return self._vendors
@classmethod
def printList(cls):
'''
Prints formatted list for workspaces with vendor availability and which is active.
Parameters:
None
Returns:
None
'''
print('{:<16}'.format("Workspace"),'{:<6}'.format("Active"),'{:<40}'.format("Path"),'{:<14}'.format("Vendors"))
print("-"*16+" "+"-"*6+" "+"-"*40+" "+"-"*14+" ")
for ws in cls.Jar.values():
vndrs = apt.listToStr(ws.getVendors(returnnames=True))
act = 'yes' if(ws == cls.getActive()) else '-'
print('{:<16}'.format(ws.getName()),'{:<6}'.format(act),'{:<40}'.format(ws.getPath()),'{:<14}'.format(vndrs))
pass
pass
@classmethod
def printAll(cls):
for key,ws in cls.Jar.items():
print('key:',key)
print(ws)
@classmethod
def getActive(cls):
'''Returns the active workspace and will exit on error (Workspace).'''
if(cls._ActiveWorkspace == None):
exit(log.error("Not in a workspace!"))
return cls._ActiveWorkspace
# uncomment to use for debugging
# def __str__(self):
# return f'''
# ID: {hex(id(self))}
# Name: {self.getName()}
# Path: {self.getPath()}
# Active: {self.isActive()}
# Hidden directory: {self.getDir()}
# Linked to: {self.isLinked()}
# Vendors: {self.getVendors(returnnames=True)}
# '''
pass | pt | 0.159028 | 2.65667 | 3 |
ev_de.py | avinashmnit30/Electric-Vehicle-Optimal-Charging | 7 | 14984 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 18:01:24 2015
@author: Avinash
"""
import numpy as np
from numpy import *
import numpy
from math import *
import ev_charge_schedule_modification1 as ev
#import ev_charge_schedule.static as func1
#import ev_charge_schedule.dynamic as func2
import time
#from numba import double
from numba.decorators import autojit
func1=ev.static
func=autojit(func1)
mode=1
runs=1
maxiter=2000
F=0.5 # Mutation Factor between 0 to 2
CR=0.2 # Probability 1. Put 0.9 if parameters are dependent while 0.2 if parameters are independent(seperable)
N=40
D=100*24 # Number of particles
ev.global_var(var_set=0,N_veh=int(D/float(24)))
# boundary constraints
ub=numpy.random.random(size=(1,D))[0]
lb=numpy.random.random(size=(1,D))[0]
i=0
while i<D:
ub[i]=8.8
lb[i]=2.2
i+=1
fitness_val=numpy.zeros(shape=(runs,maxiter))
best_pos=numpy.zeros(shape=(runs,D))
for run_no in range(runs):
# target vector initializtion
x=numpy.random.uniform(size=(N,D))
i=0
while i<N:
j=0
while j<D:
x[i][j]=lb[j]+x[i][j]*(ub[j]-lb[j])
j+=1
i+=1
v=np.zeros_like(x) # donar vectors
u=np.zeros_like(x) # trail vector
g=numpy.zeros(shape=(1,D))[0] # best vector found so far
# target vector initial fitness evaluation
x_fit=numpy.random.uniform(size=(1,N))[0]
i=0
while i<N:
x_fit[i]=func(x[i],mode=mode)
i+=1
u_fit=np.zeros_like(x_fit)
j=0
i=1
while i<N:
if x_fit[j]>x_fit[i]:
j=i
i+=1
g_fit=x_fit[j]
g=x[j].copy()
time1=time.time()
it=0
while it<maxiter:
# Mutation stage
for i in range(N):
r1=i
while r1==i:
r1=np.random.randint(low=0,high=N)
r2=i
while r2==i or r2==r1:
r2=np.random.randint(low=0,high=N)
r3=i
while r3==i or r3==r1 or r3==r2:
r3=np.random.randint(low=0,high=N)
v[i]=x[r1]+(x[r2]-x[r3])*F
for j in range(D):
# if v[i][j]>ub[j]:
# v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-ub[j])
# if v[i][j]<lb[j]:
# v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-lb[j])
# if v[i][j]>ub[j]:
# v[i][j]=ub[j]
# if v[i][j]<lb[j]:
# v[i][j]=lb[j]
if v[i][j]>ub[j]:
#v[i][j]=v[i][j]-1.1*(v[i][j]-ub[j])
v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j])
if v[i][j]<lb[j]:
v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j])
#v[i][j]=v[i][j]-1.1*(v[i][j]-lb[j])
# Recombination stage
for i in range(N):
for j in range(D):
if np.random.random()<=CR or j==numpy.random.randint(0,D):
u[i][j]=v[i][j]
else:
u[i][j]=x[i][j]
# Selection stage
for i in range(N):
u_fit[i]=func(u[i],mode=mode)
if u_fit[i]<x_fit[i]:
x[i]=u[i].copy()
x_fit[i]=u_fit[i]
if u_fit[i]<g_fit:
g=u[i].copy()
g_fit=u_fit[i]
fitness_val[run_no][it]=g_fit
print it,g_fit
it+=1
best_pos[run_no]=g.copy()
time2=time.time()
print time2-time1
run_no+=1
numpy.savetxt("DE_fitness_d1_m2"+str(mode)+str(D)+".csv",fitness_val,delimiter=",")
numpy.savetxt("DE_bestpos_d1_m2"+str(mode)+str(D)+".csv",best_pos,delimiter=",")
| # -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 18:01:24 2015
@author: Avinash
"""
import numpy as np
from numpy import *
import numpy
from math import *
import ev_charge_schedule_modification1 as ev
#import ev_charge_schedule.static as func1
#import ev_charge_schedule.dynamic as func2
import time
#from numba import double
from numba.decorators import autojit
func1=ev.static
func=autojit(func1)
mode=1
runs=1
maxiter=2000
F=0.5 # Mutation Factor between 0 to 2
CR=0.2 # Probability 1. Put 0.9 if parameters are dependent while 0.2 if parameters are independent(seperable)
N=40
D=100*24 # Number of particles
ev.global_var(var_set=0,N_veh=int(D/float(24)))
# boundary constraints
ub=numpy.random.random(size=(1,D))[0]
lb=numpy.random.random(size=(1,D))[0]
i=0
while i<D:
ub[i]=8.8
lb[i]=2.2
i+=1
fitness_val=numpy.zeros(shape=(runs,maxiter))
best_pos=numpy.zeros(shape=(runs,D))
for run_no in range(runs):
# target vector initializtion
x=numpy.random.uniform(size=(N,D))
i=0
while i<N:
j=0
while j<D:
x[i][j]=lb[j]+x[i][j]*(ub[j]-lb[j])
j+=1
i+=1
v=np.zeros_like(x) # donar vectors
u=np.zeros_like(x) # trail vector
g=numpy.zeros(shape=(1,D))[0] # best vector found so far
# target vector initial fitness evaluation
x_fit=numpy.random.uniform(size=(1,N))[0]
i=0
while i<N:
x_fit[i]=func(x[i],mode=mode)
i+=1
u_fit=np.zeros_like(x_fit)
j=0
i=1
while i<N:
if x_fit[j]>x_fit[i]:
j=i
i+=1
g_fit=x_fit[j]
g=x[j].copy()
time1=time.time()
it=0
while it<maxiter:
# Mutation stage
for i in range(N):
r1=i
while r1==i:
r1=np.random.randint(low=0,high=N)
r2=i
while r2==i or r2==r1:
r2=np.random.randint(low=0,high=N)
r3=i
while r3==i or r3==r1 or r3==r2:
r3=np.random.randint(low=0,high=N)
v[i]=x[r1]+(x[r2]-x[r3])*F
for j in range(D):
# if v[i][j]>ub[j]:
# v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-ub[j])
# if v[i][j]<lb[j]:
# v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-lb[j])
# if v[i][j]>ub[j]:
# v[i][j]=ub[j]
# if v[i][j]<lb[j]:
# v[i][j]=lb[j]
if v[i][j]>ub[j]:
#v[i][j]=v[i][j]-1.1*(v[i][j]-ub[j])
v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j])
if v[i][j]<lb[j]:
v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j])
#v[i][j]=v[i][j]-1.1*(v[i][j]-lb[j])
# Recombination stage
for i in range(N):
for j in range(D):
if np.random.random()<=CR or j==numpy.random.randint(0,D):
u[i][j]=v[i][j]
else:
u[i][j]=x[i][j]
# Selection stage
for i in range(N):
u_fit[i]=func(u[i],mode=mode)
if u_fit[i]<x_fit[i]:
x[i]=u[i].copy()
x_fit[i]=u_fit[i]
if u_fit[i]<g_fit:
g=u[i].copy()
g_fit=u_fit[i]
fitness_val[run_no][it]=g_fit
print it,g_fit
it+=1
best_pos[run_no]=g.copy()
time2=time.time()
print time2-time1
run_no+=1
numpy.savetxt("DE_fitness_d1_m2"+str(mode)+str(D)+".csv",fitness_val,delimiter=",")
numpy.savetxt("DE_bestpos_d1_m2"+str(mode)+str(D)+".csv",best_pos,delimiter=",") | en | 0.084047 | 2.007106 | 2 |
var/spack/repos/builtin/packages/memaxes/package.py | xiki-tempula/spack | 9 | 14985 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Memaxes(Package):
"""MemAxes is a visualizer for sampled memory trace data."""
homepage = "https://github.com/llnl/MemAxes"
version('0.5', sha256='9858f0f675b50e347d0b88545558e5d6b4333347c762b15d399b8d8004d7b68b',
url='https://github.com/llnl/MemAxes/archive/v0.5.tar.gz')
depends_on('cmake@2.8.9:', type='build')
depends_on("qt@5:")
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
cmake('..', *std_cmake_args)
make()
make("install")
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Memaxes(Package):
"""MemAxes is a visualizer for sampled memory trace data."""
homepage = "https://github.com/llnl/MemAxes"
version('0.5', sha256='9858f0f675b50e347d0b88545558e5d6b4333347c762b15d399b8d8004d7b68b',
url='https://github.com/llnl/MemAxes/archive/v0.5.tar.gz')
depends_on('cmake@2.8.9:', type='build')
depends_on("qt@5:")
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
cmake('..', *std_cmake_args)
make()
make("install")
| pt | 0.229065 | 1.477385 | 1 |
auth/decorators.py | dongboyan77/quay | 0 | 14986 | <filename>auth/decorators.py
import logging
from functools import wraps
from flask import request, session
from prometheus_client import Counter
from auth.basic import validate_basic_auth
from auth.oauth import validate_bearer_auth
from auth.cookie import validate_session_cookie
from auth.signedgrant import validate_signed_grant
from util.http import abort
logger = logging.getLogger(__name__)
authentication_count = Counter(
"quay_authentication_attempts_total",
"number of authentication attempts accross the registry and API",
labelnames=["auth_kind", "success"],
)
def _auth_decorator(pass_result=False, handlers=None):
""" Builds an auth decorator that runs the given handlers and, if any return successfully,
sets up the auth context. The wrapped function will be invoked *regardless of success or
failure of the auth handler(s)*
"""
def processor(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth_header = request.headers.get("authorization", "")
result = None
for handler in handlers:
result = handler(auth_header)
# If the handler was missing the necessary information, skip it and try the next one.
if result.missing:
continue
# Check for a valid result.
if result.auth_valid:
logger.debug("Found valid auth result: %s", result.tuple())
# Set the various pieces of the auth context.
result.apply_to_context()
# Log the metric.
authentication_count.labels(result.kind, True).inc()
break
# Otherwise, report the error.
if result.error_message is not None:
# Log the failure.
authentication_count.labels(result.kind, False).inc()
break
if pass_result:
kwargs["auth_result"] = result
return func(*args, **kwargs)
return wrapper
return processor
process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie])
process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth])
process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie])
process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True)
process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth])
def require_session_login(func):
""" Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If
a valid session cookie does exist, the authenticated user and identity are also set.
"""
@wraps(func)
def wrapper(*args, **kwargs):
result = validate_session_cookie()
if result.has_nonrobot_user:
result.apply_to_context()
authentication_count.labels(result.kind, True).inc()
return func(*args, **kwargs)
elif not result.missing:
authentication_count.labels(result.kind, False).inc()
abort(401, message="Method requires login and no valid login could be loaded.")
return wrapper
def extract_namespace_repo_from_session(func):
""" Extracts the namespace and repository name from the current session (which must exist)
and passes them into the decorated function as the first and second arguments. If the
session doesn't exist or does not contain these arugments, a 400 error is raised.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if "namespace" not in session or "repository" not in session:
logger.error("Unable to load namespace or repository from session: %s", session)
abort(400, message="Missing namespace in request")
return func(session["namespace"], session["repository"], *args, **kwargs)
return wrapper
| <filename>auth/decorators.py
import logging
from functools import wraps
from flask import request, session
from prometheus_client import Counter
from auth.basic import validate_basic_auth
from auth.oauth import validate_bearer_auth
from auth.cookie import validate_session_cookie
from auth.signedgrant import validate_signed_grant
from util.http import abort
logger = logging.getLogger(__name__)
authentication_count = Counter(
"quay_authentication_attempts_total",
"number of authentication attempts accross the registry and API",
labelnames=["auth_kind", "success"],
)
def _auth_decorator(pass_result=False, handlers=None):
""" Builds an auth decorator that runs the given handlers and, if any return successfully,
sets up the auth context. The wrapped function will be invoked *regardless of success or
failure of the auth handler(s)*
"""
def processor(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth_header = request.headers.get("authorization", "")
result = None
for handler in handlers:
result = handler(auth_header)
# If the handler was missing the necessary information, skip it and try the next one.
if result.missing:
continue
# Check for a valid result.
if result.auth_valid:
logger.debug("Found valid auth result: %s", result.tuple())
# Set the various pieces of the auth context.
result.apply_to_context()
# Log the metric.
authentication_count.labels(result.kind, True).inc()
break
# Otherwise, report the error.
if result.error_message is not None:
# Log the failure.
authentication_count.labels(result.kind, False).inc()
break
if pass_result:
kwargs["auth_result"] = result
return func(*args, **kwargs)
return wrapper
return processor
process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie])
process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth])
process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie])
process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True)
process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth])
def require_session_login(func):
""" Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If
a valid session cookie does exist, the authenticated user and identity are also set.
"""
@wraps(func)
def wrapper(*args, **kwargs):
result = validate_session_cookie()
if result.has_nonrobot_user:
result.apply_to_context()
authentication_count.labels(result.kind, True).inc()
return func(*args, **kwargs)
elif not result.missing:
authentication_count.labels(result.kind, False).inc()
abort(401, message="Method requires login and no valid login could be loaded.")
return wrapper
def extract_namespace_repo_from_session(func):
""" Extracts the namespace and repository name from the current session (which must exist)
and passes them into the decorated function as the first and second arguments. If the
session doesn't exist or does not contain these arugments, a 400 error is raised.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if "namespace" not in session or "repository" not in session:
logger.error("Unable to load namespace or repository from session: %s", session)
abort(400, message="Missing namespace in request")
return func(session["namespace"], session["repository"], *args, **kwargs)
return wrapper
| pt | 0.166197 | 2.508037 | 3 |
ddpm_proteins/utils.py | lucidrains/ddpm-proteins | 61 | 14987 | import os
from PIL import Image
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from sidechainnet.utils.sequence import ProteinVocabulary
from einops import rearrange
# general functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# singleton msa transformer
msa_instances = None
def get_msa_transformer():
global msa_instances
if not exists(msa_instances):
msa_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S")
batch_converter = alphabet.get_batch_converter()
return msa_model, batch_converter
return msa_instances
# MSA embedding related functions
VOCAB = ProteinVocabulary()
def ids_to_aa_str(x):
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
is_char = lambda c: isinstance(c, str) and len(c) == 1
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_aa_str(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(is_char, out)):
return ''.join(out)
return out
def aa_str_to_embed_input(x):
assert isinstance(x, list), 'input must be a list'
out = []
for el in x:
if isinstance(el, list):
out.append(aa_str_to_embed_input(el))
elif isinstance(el, str):
out.append((None, el))
else:
raise TypeError('type must be either list or string')
return out
def apc(x):
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12)
normalized = x - avg
return normalized
def symmetrize(x):
return x + x.transpose(-1, -2)
def pad_image_to(tensor, size, value = 0.):
remainder = size - tensor.shape[-1]
tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value)
return tensor
# getting a single MSA attention embedding, with caching
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))
FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))
os.makedirs(CACHE_PATH, exist_ok = True)
@torch.no_grad()
def get_msa_attention_embedding(
model,
batch_converter,
aa_str,
id,
fetch_msas_fn = lambda t: [],
cache = True
):
device = next(model.parameters()).device
cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt')
if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path):
try:
loaded = torch.load(cache_full_path).to(device)
except:
loaded = None
if exists(loaded):
return loaded
msas = default(fetch_msas_fn(aa_str), [])
seq_with_msas = [aa_str, *msas]
embed_inputs = aa_str_to_embed_input(seq_with_msas)
_, _, msa_batch_tokens = batch_converter(embed_inputs)
results = model(msa_batch_tokens.to(device), need_head_weights = True)
attentions = results['row_attentions']
attentions = attentions[..., 1:, 1:]
attentions = rearrange(attentions, 'b l h m n -> b (l h) m n')
attentions = apc(symmetrize(attentions))
if cache:
print(f'caching to {cache_full_path}')
torch.save(attentions, cache_full_path)
return attentions
def get_msa_attention_embeddings(
model,
batch_converter,
seqs,
ids,
fetch_msas_fn = lambda t: [],
cache = True
):
n = seqs.shape[1]
seqs = rearrange(seqs, 'b n -> b () n')
aa_strs = ids_to_aa_str(seqs.cpu().tolist())
embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)]
embeds_list = [pad_image_to(embed, n) for embed in embeds_list]
embeds = torch.cat(embeds_list, dim = 0)
return embeds
# training utils
def cycle(loader, thres = 256):
while True:
for data in loader:
if data.seqs.shape[1] <= thres:
yield data
def save_heatmap(tensor, filepath, dpi = 200, return_image = False):
heatmap = sn.heatmap(tensor.cpu().numpy())
figure = heatmap.get_figure()
figure.savefig(filepath, dpi = dpi)
plt.clf()
if not return_image:
return
return Image.open(filepath)
| import os
from PIL import Image
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from sidechainnet.utils.sequence import ProteinVocabulary
from einops import rearrange
# general functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# singleton msa transformer
msa_instances = None
def get_msa_transformer():
global msa_instances
if not exists(msa_instances):
msa_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S")
batch_converter = alphabet.get_batch_converter()
return msa_model, batch_converter
return msa_instances
# MSA embedding related functions
VOCAB = ProteinVocabulary()
def ids_to_aa_str(x):
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
is_char = lambda c: isinstance(c, str) and len(c) == 1
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_aa_str(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(is_char, out)):
return ''.join(out)
return out
def aa_str_to_embed_input(x):
assert isinstance(x, list), 'input must be a list'
out = []
for el in x:
if isinstance(el, list):
out.append(aa_str_to_embed_input(el))
elif isinstance(el, str):
out.append((None, el))
else:
raise TypeError('type must be either list or string')
return out
def apc(x):
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12)
normalized = x - avg
return normalized
def symmetrize(x):
return x + x.transpose(-1, -2)
def pad_image_to(tensor, size, value = 0.):
remainder = size - tensor.shape[-1]
tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value)
return tensor
# getting a single MSA attention embedding, with caching
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))
FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))
os.makedirs(CACHE_PATH, exist_ok = True)
@torch.no_grad()
def get_msa_attention_embedding(
model,
batch_converter,
aa_str,
id,
fetch_msas_fn = lambda t: [],
cache = True
):
device = next(model.parameters()).device
cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt')
if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path):
try:
loaded = torch.load(cache_full_path).to(device)
except:
loaded = None
if exists(loaded):
return loaded
msas = default(fetch_msas_fn(aa_str), [])
seq_with_msas = [aa_str, *msas]
embed_inputs = aa_str_to_embed_input(seq_with_msas)
_, _, msa_batch_tokens = batch_converter(embed_inputs)
results = model(msa_batch_tokens.to(device), need_head_weights = True)
attentions = results['row_attentions']
attentions = attentions[..., 1:, 1:]
attentions = rearrange(attentions, 'b l h m n -> b (l h) m n')
attentions = apc(symmetrize(attentions))
if cache:
print(f'caching to {cache_full_path}')
torch.save(attentions, cache_full_path)
return attentions
def get_msa_attention_embeddings(
model,
batch_converter,
seqs,
ids,
fetch_msas_fn = lambda t: [],
cache = True
):
n = seqs.shape[1]
seqs = rearrange(seqs, 'b n -> b () n')
aa_strs = ids_to_aa_str(seqs.cpu().tolist())
embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)]
embeds_list = [pad_image_to(embed, n) for embed in embeds_list]
embeds = torch.cat(embeds_list, dim = 0)
return embeds
# training utils
def cycle(loader, thres = 256):
while True:
for data in loader:
if data.seqs.shape[1] <= thres:
yield data
def save_heatmap(tensor, filepath, dpi = 200, return_image = False):
heatmap = sn.heatmap(tensor.cpu().numpy())
figure = heatmap.get_figure()
figure.savefig(filepath, dpi = dpi)
plt.clf()
if not return_image:
return
return Image.open(filepath)
| it | 0.14552 | 2.2782 | 2 |
samples/python/efficientdet/create_onnx.py | L-Net-1992/TensorRT | 0 | 14988 | <filename>samples/python/efficientdet/create_onnx.py
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import logging
import tensorflow as tf
import onnx_graphsurgeon as gs
import numpy as np
import onnx
from onnx import shape_inference
from tf2onnx import tfonnx, optimizer, tf_loader
import onnx_utils
logging.basicConfig(level=logging.INFO)
logging.getLogger("EfficientDetGraphSurgeon").setLevel(logging.INFO)
log = logging.getLogger("EfficientDetGraphSurgeon")
class EfficientDetGraphSurgeon:
def __init__(self, saved_model_path):
"""
Constructor of the EfficientDet Graph Surgeon object, to do the conversion of an EfficientDet TF saved model
to an ONNX-TensorRT parsable model.
:param saved_model_path: The path pointing to the TensorFlow saved model to load.
"""
saved_model_path = os.path.realpath(saved_model_path)
assert os.path.exists(saved_model_path)
# Use tf2onnx to convert saved model to an initial ONNX graph.
graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, "serve",
["serving_default"])
log.info("Loaded saved model from {}".format(saved_model_path))
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name="")
with tf_loader.tf_session(graph=tf_graph):
onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)
onnx_model = optimizer.optimize_graph(onnx_graph).make_model("Converted from {}".format(saved_model_path))
self.graph = gs.import_onnx(onnx_model)
assert self.graph
log.info("TF2ONNX graph created successfully")
# Fold constants via ONNX-GS that TF2ONNX may have missed
self.graph.fold_constants()
# Try to auto-detect by finding if nodes match a specific name pattern expected for either of the APIs.
self.api = None
if len([node for node in self.graph.nodes if "class_net/" in node.name]) > 0:
self.api = "AutoML"
elif len([node for node in self.graph.nodes if "/WeightSharedConvolutionalClassHead/" in node.name]) > 0:
self.api = "TFOD"
assert self.api
log.info("Graph was detected as {}".format(self.api))
def sanitize(self):
"""
Sanitize the graph by cleaning any unconnected nodes, do a topological resort, and fold constant inputs values.
When possible, run shape inference on the ONNX graph to determine tensor shapes.
"""
for i in range(3):
count_before = len(self.graph.nodes)
self.graph.cleanup().toposort()
try:
for node in self.graph.nodes:
for o in node.outputs:
o.shape = None
model = gs.export_onnx(self.graph)
model = shape_inference.infer_shapes(model)
self.graph = gs.import_onnx(model)
except Exception as e:
log.info("Shape inference could not be performed at this time:\n{}".format(e))
try:
self.graph.fold_constants(fold_shapes=True)
except TypeError as e:
log.error("This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your "
"onnx_graphsurgeon module. Error:\n{}".format(e))
raise
count_after = len(self.graph.nodes)
if count_before == count_after:
# No new folding occurred in this iteration, so we can stop for now.
break
def save(self, output_path):
"""
Save the ONNX model to the given location.
:param output_path: Path pointing to the location where to write out the updated ONNX model.
"""
self.graph.cleanup().toposort()
model = gs.export_onnx(self.graph)
output_path = os.path.realpath(output_path)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
onnx.save(model, output_path)
log.info("Saved ONNX model to {}".format(output_path))
def update_preprocessor(self, input_format, input_size, preprocessor="imagenet"):
"""
Remove all the pre-processing nodes in the ONNX graph and leave only the image normalization essentials.
:param input_format: The input data format, either "NCHW" or "NHWC".
:param input_size: The input size as a comma-separated string in H,W format, e.g. "512,512".
:param preprocessor: The preprocessor to use, either "imagenet" for imagenet mean and stdev normalization,
or "scale_range" for uniform [-1,+1] range normalization.
"""
# Update the input and output tensors shape
input_size = input_size.split(",")
assert len(input_size) == 2
for i in range(len(input_size)):
input_size[i] = int(input_size[i])
assert input_size[i] >= 1
assert input_format in ["NCHW", "NHWC"]
if input_format == "NCHW":
self.graph.inputs[0].shape = ['N', 3, input_size[0], input_size[1]]
if input_format == "NHWC":
self.graph.inputs[0].shape = ['N', input_size[0], input_size[1], 3]
self.graph.inputs[0].dtype = np.float32
self.graph.inputs[0].name = "input"
log.info("ONNX graph input shape: {} [{} format]".format(self.graph.inputs[0].shape, input_format))
self.sanitize()
# Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them
for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:
node.inputs.clear()
# Convert to NCHW format if needed
input_tensor = self.graph.inputs[0]
if input_format == "NHWC":
input_tensor = self.graph.transpose("preprocessor/transpose", input_tensor, [0, 3, 1, 2])
assert preprocessor in ["imagenet", "scale_range"]
preprocessed_tensor = None
if preprocessor == "imagenet":
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting
scale_val = 1 / np.asarray([255], dtype=np.float32)
mean_val = -1 * np.expand_dims(np.asarray([0.485, 0.456, 0.406], dtype=np.float32), axis=(0, 2, 3))
stddev_val = 1 / np.expand_dims(np.asarray([0.229, 0.224, 0.225], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val * stddev_val)
mean_out = self.graph.elt_const("Add", "preprocessor/mean", scale_out, mean_val * stddev_val)
preprocessed_tensor = mean_out[0]
if preprocessor == "scale_range":
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting
scale_val = 2 / np.asarray([255], dtype=np.float32)
offset_val = np.expand_dims(np.asarray([-1, -1, -1], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val)
range_out = self.graph.elt_const("Add", "preprocessor/range", scale_out, offset_val)
preprocessed_tensor = range_out[0]
# Find the first stem conv node of the graph, and connect the normalizer directly to it
stem_name = None
if self.api == "AutoML":
stem_name = "/stem/"
if self.api == "TFOD":
stem_name = "/stem_conv2d/"
stem = [node for node in self.graph.nodes if node.op == "Conv" and stem_name in node.name][0]
log.info("Found {} node '{}' as stem entry".format(stem.op, stem.name))
stem.inputs[0] = preprocessed_tensor
self.sanitize()
def update_shapes(self):
# Reshape nodes have the batch dimension as a fixed value of 1, they should use the batch size instead
# Output-Head reshapes use [1, -1, C], corrected reshape value should be [-1, V, C]
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
shape_in = node.inputs[0].shape
if shape_in is None or len(shape_in) not in [4,5]: # TFOD graphs have 5-dim inputs on this Reshape
continue
if type(node.inputs[1]) != gs.Constant:
continue
shape_out = node.inputs[1].values
if len(shape_out) != 3 or shape_out[0] != 1 or shape_out[1] != -1:
continue
volume = shape_in[1] * shape_in[2] * shape_in[3] / shape_out[2]
if len(shape_in) == 5:
volume *= shape_in[4]
shape_corrected = np.asarray([-1, volume, shape_out[2]], dtype=np.int64)
node.inputs[1] = gs.Constant("{}_shape".format(node.name), values=shape_corrected)
log.info("Updating Output-Head Reshape node {} to {}".format(node.name, node.inputs[1].values))
# Other Reshapes only need to change the first dim to -1, as long as there are no -1's already
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
if type(node.inputs[1]) != gs.Constant or node.inputs[1].values[0] != 1 or -1 in node.inputs[1].values:
continue
node.inputs[1].values[0] = -1
log.info("Updating Reshape node {} to {}".format(node.name, node.inputs[1].values))
# Resize nodes try to calculate the output shape dynamically, it's more optimal to pre-compute the shape
if self.api == "AutoML":
# Resize on a BiFPN will always be 2x, but grab it from the graph just in case
for node in [node for node in self.graph.nodes if node.op == "Resize"]:
if len(node.inputs) < 4 or node.inputs[0].shape is None:
continue
scale_h, scale_w = None, None
if type(node.inputs[3]) == gs.Constant:
# The sizes input is already folded
if len(node.inputs[3].values) != 4:
continue
scale_h = node.inputs[3].values[2] / node.inputs[0].shape[2]
scale_w = node.inputs[3].values[3] / node.inputs[0].shape[3]
if type(node.inputs[3]) == gs.Variable:
# The sizes input comes from Shape+Slice+Concat
concat = node.i(3)
if concat.op != "Concat":
continue
if type(concat.inputs[1]) != gs.Constant or len(concat.inputs[1].values) != 2:
continue
scale_h = concat.inputs[1].values[0] / node.inputs[0].shape[2]
scale_w = concat.inputs[1].values[1] / node.inputs[0].shape[3]
scales = np.asarray([1, 1, scale_h, scale_w], dtype=np.float32)
del node.inputs[3]
node.inputs[2] = gs.Constant(name="{}_scales".format(node.name), values=scales)
log.info("Updating Resize node {} to {}".format(node.name, scales))
self.sanitize()
def update_network(self):
"""
Updates the graph to replace certain nodes in the main EfficientDet network:
- the global average pooling nodes are optimized when running for TFOD models.
"""
if self.api == "TFOD":
for reduce in [node for node in self.graph.nodes if node.op == "ReduceMean"]:
# TFOD models have their ReduceMean nodes applied with some redundant transposes that can be
# optimized away for better performance
# Make sure the correct subgraph is being replaced, basically search for this:
# X > Transpose (0,2,3,1) > ReduceMean (1,2) > Reshape (?,1,1,?) > Reshape (?,?,1,1) > Conv > Y
# And change to this:
# X > ReduceMean (2,3) > Conv > Y
transpose = reduce.i()
if transpose.op != "Transpose" or transpose.attrs['perm'] != [0, 2, 3, 1]:
continue
if len(reduce.attrs['axes']) != 2 or reduce.attrs['axes'] != [1, 2]:
continue
reshape1 = reduce.o()
if reshape1.op != "Reshape" or len(reshape1.inputs[1].values) != 4:
continue
if reshape1.inputs[1].values[1] != 1 or reshape1.inputs[1].values[2] != 1:
continue
reshape2 = reshape1.o()
if reshape2.op != "Reshape" or len(reshape2.inputs[1].values) != 4:
continue
if reshape2.inputs[1].values[2] != 1 or reshape2.inputs[1].values[3] != 1:
continue
conv = reshape2.o()
if conv.op != "Conv":
continue
# If all the checks above pass, then this node sequence can be optimized by just the ReduceMean itself
# operating on a different set of axes
input_tensor = transpose.inputs[0] # Input tensor to the Transpose
reduce.inputs[0] = input_tensor # Forward the Transpose input to the ReduceMean node
output_tensor = reduce.outputs[0] # Output tensor of the ReduceMean
conv.inputs[0] = output_tensor # Forward the ReduceMean output to the Conv node
reduce.attrs['axes'] = [2, 3] # Update the axes that ReduceMean operates on
reduce.attrs['keepdims'] = 1 # Keep the reduced dimensions
log.info("Optimized subgraph around ReduceMean node '{}'".format(reduce.name))
def update_nms(self, threshold=None, detections=None):
"""
Updates the graph to replace the NMS op by BatchedNMS_TRT TensorRT plugin node.
:param threshold: Override the score threshold attribute. If set to None, use the value in the graph.
:param detections: Override the max detections attribute. If set to None, use the value in the graph.
"""
def find_head_concat(name_scope):
# This will find the concatenation node at the end of either Class Net or Box Net. These concatenation nodes
# bring together prediction data for each of 5 scales.
# The concatenated Class Net node will have shape [batch_size, num_anchors, num_classes],
# and the concatenated Box Net node has the shape [batch_size, num_anchors, 4].
# These concatenation nodes can be be found by searching for all Concat's and checking if the node two
# steps above in the graph has a name that begins with either "box_net/..." or "class_net/...".
for node in [node for node in self.graph.nodes if node.op == "Transpose" and name_scope in node.name]:
concat = self.graph.find_descendant_by_op(node, "Concat")
assert concat and len(concat.inputs) == 5
log.info("Found {} node '{}' as the tip of {}".format(concat.op, concat.name, name_scope))
return concat
def extract_anchors_tensor(split):
# This will find the anchors that have been hardcoded somewhere within the ONNX graph.
# The function will return a gs.Constant that can be directly used as an input to the NMS plugin.
# The anchor tensor shape will be [1, num_anchors, 4]. Note that '1' is kept as first dim, regardless of
# batch size, as it's not necessary to replicate the anchors for all images in the batch.
# The anchors are available (one per coordinate) hardcoded as constants within certain box decoder nodes.
# Each of these four constants have shape [1, num_anchors], so some numpy operations are used to expand the
# dims and concatenate them as needed.
# These constants can be found by starting from the Box Net's split operation , and for each coordinate,
# walking down in the graph until either an Add or Mul node is found. The second input on this nodes will
# be the anchor data required.
def get_anchor_np(output_idx, op):
node = self.graph.find_descendant_by_op(split.o(0, output_idx), op)
assert node
val = np.squeeze(node.inputs[1].values)
return np.expand_dims(val.flatten(), axis=(0, 2))
anchors_y = get_anchor_np(0, "Add")
anchors_x = get_anchor_np(1, "Add")
anchors_h = get_anchor_np(2, "Mul")
anchors_w = get_anchor_np(3, "Mul")
anchors = np.concatenate([anchors_y, anchors_x, anchors_h, anchors_w], axis=2)
return gs.Constant(name="nms/anchors:0", values=anchors)
self.sanitize()
head_names = []
if self.api == "AutoML":
head_names = ["class_net/", "box_net/"]
if self.api == "TFOD":
head_names = ["/WeightSharedConvolutionalClassHead/", "/WeightSharedConvolutionalBoxHead/"]
# There are five nodes at the bottom of the graph that provide important connection points:
# 1. Find the concat node at the end of the class net (multi-scale class predictor)
class_net = find_head_concat(head_names[0])
class_net_tensor = class_net.outputs[0]
# 2. Find the concat node at the end of the box net (multi-scale localization predictor)
box_net = find_head_concat(head_names[1])
box_net_tensor = box_net.outputs[0]
# 3. Find the split node that separates the box net coordinates and feeds them into the box decoder.
box_net_split = self.graph.find_descendant_by_op(box_net, "Split")
assert box_net_split and len(box_net_split.outputs) == 4
# 4. Find the concat node at the end of the box decoder.
box_decoder = self.graph.find_descendant_by_op(box_net_split, "Concat")
assert box_decoder and len(box_decoder.inputs) == 4
box_decoder_tensor = box_decoder.outputs[0]
# 5. Find the NMS node.
nms_node = self.graph.find_node_by_op("NonMaxSuppression")
# Extract NMS Configuration
num_detections = int(nms_node.inputs[2].values) if detections is None else detections
iou_threshold = float(nms_node.inputs[3].values)
score_threshold = float(nms_node.inputs[4].values) if threshold is None else threshold
num_classes = class_net.i().inputs[1].values[-1]
normalized = True if self.api == "TFOD" else False
# NMS Inputs and Attributes
# NMS expects these shapes for its input tensors:
# box_net: [batch_size, number_boxes, 4]
# class_net: [batch_size, number_boxes, number_classes]
# anchors: [1, number_boxes, 4] (if used)
nms_op = None
nms_attrs = None
nms_inputs = None
# EfficientNMS TensorRT Plugin
# Fusing the decoder will always be faster, so this is the default NMS method supported. In this case,
# three inputs are given to the NMS TensorRT node:
# - The box predictions (from the Box Net node found above)
# - The class predictions (from the Class Net node found above)
# - The default anchor coordinates (from the extracted anchor constants)
# As the original tensors from EfficientDet will be used, the NMS code type is set to 1 (Center+Size),
# because this is the internal box coding format used by the network.
anchors_tensor = extract_anchors_tensor(box_net_split)
nms_inputs = [box_net_tensor, class_net_tensor, anchors_tensor]
nms_op = "EfficientNMS_TRT"
nms_attrs = {
'plugin_version': "1",
'background_class': -1,
'max_output_boxes': num_detections,
'score_threshold': max(0.01, score_threshold), # Keep threshold to at least 0.01 for better efficiency
'iou_threshold': iou_threshold,
'score_activation': True,
'box_coding': 1,
}
nms_output_classes_dtype = np.int32
# NMS Outputs
nms_output_num_detections = gs.Variable(name="num_detections", dtype=np.int32, shape=['N', 1])
nms_output_boxes = gs.Variable(name="detection_boxes", dtype=np.float32,
shape=['N', num_detections, 4])
nms_output_scores = gs.Variable(name="detection_scores", dtype=np.float32,
shape=['N', num_detections])
nms_output_classes = gs.Variable(name="detection_classes", dtype=nms_output_classes_dtype,
shape=['N', num_detections])
nms_outputs = [nms_output_num_detections, nms_output_boxes, nms_output_scores, nms_output_classes]
# Create the NMS Plugin node with the selected inputs. The outputs of the node will also become the final
# outputs of the graph.
self.graph.plugin(
op=nms_op,
name="nms/non_maximum_suppression",
inputs=nms_inputs,
outputs=nms_outputs,
attrs=nms_attrs)
log.info("Created NMS plugin '{}' with attributes: {}".format(nms_op, nms_attrs))
self.graph.outputs = nms_outputs
self.sanitize()
def main(args):
effdet_gs = EfficientDetGraphSurgeon(args.saved_model)
if args.tf2onnx:
effdet_gs.save(args.tf2onnx)
effdet_gs.update_preprocessor(args.input_format, args.input_size, args.preprocessor)
effdet_gs.update_shapes()
effdet_gs.update_network()
effdet_gs.update_nms(args.nms_threshold, args.nms_detections)
effdet_gs.save(args.onnx)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--saved_model", required=True,
help="The TensorFlow saved model directory to load")
parser.add_argument("-o", "--onnx", required=True,
help="The output ONNX model file to write")
parser.add_argument("-f", "--input_format", default="NHWC", choices=["NHWC", "NCHW"],
help="Set the input data format of the graph, either NCHW or NHWC, default: NHWC")
parser.add_argument("-i", "--input_size", default="512,512",
help="Set the input shape of the graph, as a comma-separated dimensions in H,W format, "
"default: 512,512")
parser.add_argument("-p", "--preprocessor", default="imagenet", choices=["imagenet", "scale_range"],
help="Set the preprocessor to apply on the graph, either 'imagenet' for standard mean "
"subtraction and stdev normalization, or 'scale_range' for uniform [-1,+1] "
"normalization as is used in the AdvProp models, default: imagenet")
parser.add_argument("-t", "--nms_threshold", type=float,
help="Override the NMS score threshold, default: use the original value in the model")
parser.add_argument("-d", "--nms_detections", type=int,
help="Override the NMS max detections, default: use the original value in the model")
parser.add_argument("--tf2onnx",
help="The path where to save the intermediate ONNX graph generated by tf2onnx, useful"
"for graph debugging purposes, default: not saved")
args = parser.parse_args()
main(args)
| <filename>samples/python/efficientdet/create_onnx.py
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import logging
import tensorflow as tf
import onnx_graphsurgeon as gs
import numpy as np
import onnx
from onnx import shape_inference
from tf2onnx import tfonnx, optimizer, tf_loader
import onnx_utils
logging.basicConfig(level=logging.INFO)
logging.getLogger("EfficientDetGraphSurgeon").setLevel(logging.INFO)
log = logging.getLogger("EfficientDetGraphSurgeon")
class EfficientDetGraphSurgeon:
def __init__(self, saved_model_path):
"""
Constructor of the EfficientDet Graph Surgeon object, to do the conversion of an EfficientDet TF saved model
to an ONNX-TensorRT parsable model.
:param saved_model_path: The path pointing to the TensorFlow saved model to load.
"""
saved_model_path = os.path.realpath(saved_model_path)
assert os.path.exists(saved_model_path)
# Use tf2onnx to convert saved model to an initial ONNX graph.
graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, "serve",
["serving_default"])
log.info("Loaded saved model from {}".format(saved_model_path))
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name="")
with tf_loader.tf_session(graph=tf_graph):
onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)
onnx_model = optimizer.optimize_graph(onnx_graph).make_model("Converted from {}".format(saved_model_path))
self.graph = gs.import_onnx(onnx_model)
assert self.graph
log.info("TF2ONNX graph created successfully")
# Fold constants via ONNX-GS that TF2ONNX may have missed
self.graph.fold_constants()
# Try to auto-detect by finding if nodes match a specific name pattern expected for either of the APIs.
self.api = None
if len([node for node in self.graph.nodes if "class_net/" in node.name]) > 0:
self.api = "AutoML"
elif len([node for node in self.graph.nodes if "/WeightSharedConvolutionalClassHead/" in node.name]) > 0:
self.api = "TFOD"
assert self.api
log.info("Graph was detected as {}".format(self.api))
def sanitize(self):
"""
Sanitize the graph by cleaning any unconnected nodes, do a topological resort, and fold constant inputs values.
When possible, run shape inference on the ONNX graph to determine tensor shapes.
"""
for i in range(3):
count_before = len(self.graph.nodes)
self.graph.cleanup().toposort()
try:
for node in self.graph.nodes:
for o in node.outputs:
o.shape = None
model = gs.export_onnx(self.graph)
model = shape_inference.infer_shapes(model)
self.graph = gs.import_onnx(model)
except Exception as e:
log.info("Shape inference could not be performed at this time:\n{}".format(e))
try:
self.graph.fold_constants(fold_shapes=True)
except TypeError as e:
log.error("This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your "
"onnx_graphsurgeon module. Error:\n{}".format(e))
raise
count_after = len(self.graph.nodes)
if count_before == count_after:
# No new folding occurred in this iteration, so we can stop for now.
break
def save(self, output_path):
"""
Save the ONNX model to the given location.
:param output_path: Path pointing to the location where to write out the updated ONNX model.
"""
self.graph.cleanup().toposort()
model = gs.export_onnx(self.graph)
output_path = os.path.realpath(output_path)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
onnx.save(model, output_path)
log.info("Saved ONNX model to {}".format(output_path))
def update_preprocessor(self, input_format, input_size, preprocessor="imagenet"):
"""
Remove all the pre-processing nodes in the ONNX graph and leave only the image normalization essentials.
:param input_format: The input data format, either "NCHW" or "NHWC".
:param input_size: The input size as a comma-separated string in H,W format, e.g. "512,512".
:param preprocessor: The preprocessor to use, either "imagenet" for imagenet mean and stdev normalization,
or "scale_range" for uniform [-1,+1] range normalization.
"""
# Update the input and output tensors shape
input_size = input_size.split(",")
assert len(input_size) == 2
for i in range(len(input_size)):
input_size[i] = int(input_size[i])
assert input_size[i] >= 1
assert input_format in ["NCHW", "NHWC"]
if input_format == "NCHW":
self.graph.inputs[0].shape = ['N', 3, input_size[0], input_size[1]]
if input_format == "NHWC":
self.graph.inputs[0].shape = ['N', input_size[0], input_size[1], 3]
self.graph.inputs[0].dtype = np.float32
self.graph.inputs[0].name = "input"
log.info("ONNX graph input shape: {} [{} format]".format(self.graph.inputs[0].shape, input_format))
self.sanitize()
# Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them
for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:
node.inputs.clear()
# Convert to NCHW format if needed
input_tensor = self.graph.inputs[0]
if input_format == "NHWC":
input_tensor = self.graph.transpose("preprocessor/transpose", input_tensor, [0, 3, 1, 2])
assert preprocessor in ["imagenet", "scale_range"]
preprocessed_tensor = None
if preprocessor == "imagenet":
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting
scale_val = 1 / np.asarray([255], dtype=np.float32)
mean_val = -1 * np.expand_dims(np.asarray([0.485, 0.456, 0.406], dtype=np.float32), axis=(0, 2, 3))
stddev_val = 1 / np.expand_dims(np.asarray([0.229, 0.224, 0.225], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val * stddev_val)
mean_out = self.graph.elt_const("Add", "preprocessor/mean", scale_out, mean_val * stddev_val)
preprocessed_tensor = mean_out[0]
if preprocessor == "scale_range":
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting
scale_val = 2 / np.asarray([255], dtype=np.float32)
offset_val = np.expand_dims(np.asarray([-1, -1, -1], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val)
range_out = self.graph.elt_const("Add", "preprocessor/range", scale_out, offset_val)
preprocessed_tensor = range_out[0]
# Find the first stem conv node of the graph, and connect the normalizer directly to it
stem_name = None
if self.api == "AutoML":
stem_name = "/stem/"
if self.api == "TFOD":
stem_name = "/stem_conv2d/"
stem = [node for node in self.graph.nodes if node.op == "Conv" and stem_name in node.name][0]
log.info("Found {} node '{}' as stem entry".format(stem.op, stem.name))
stem.inputs[0] = preprocessed_tensor
self.sanitize()
def update_shapes(self):
# Reshape nodes have the batch dimension as a fixed value of 1, they should use the batch size instead
# Output-Head reshapes use [1, -1, C], corrected reshape value should be [-1, V, C]
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
shape_in = node.inputs[0].shape
if shape_in is None or len(shape_in) not in [4,5]: # TFOD graphs have 5-dim inputs on this Reshape
continue
if type(node.inputs[1]) != gs.Constant:
continue
shape_out = node.inputs[1].values
if len(shape_out) != 3 or shape_out[0] != 1 or shape_out[1] != -1:
continue
volume = shape_in[1] * shape_in[2] * shape_in[3] / shape_out[2]
if len(shape_in) == 5:
volume *= shape_in[4]
shape_corrected = np.asarray([-1, volume, shape_out[2]], dtype=np.int64)
node.inputs[1] = gs.Constant("{}_shape".format(node.name), values=shape_corrected)
log.info("Updating Output-Head Reshape node {} to {}".format(node.name, node.inputs[1].values))
# Other Reshapes only need to change the first dim to -1, as long as there are no -1's already
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
if type(node.inputs[1]) != gs.Constant or node.inputs[1].values[0] != 1 or -1 in node.inputs[1].values:
continue
node.inputs[1].values[0] = -1
log.info("Updating Reshape node {} to {}".format(node.name, node.inputs[1].values))
# Resize nodes try to calculate the output shape dynamically, it's more optimal to pre-compute the shape
if self.api == "AutoML":
# Resize on a BiFPN will always be 2x, but grab it from the graph just in case
for node in [node for node in self.graph.nodes if node.op == "Resize"]:
if len(node.inputs) < 4 or node.inputs[0].shape is None:
continue
scale_h, scale_w = None, None
if type(node.inputs[3]) == gs.Constant:
# The sizes input is already folded
if len(node.inputs[3].values) != 4:
continue
scale_h = node.inputs[3].values[2] / node.inputs[0].shape[2]
scale_w = node.inputs[3].values[3] / node.inputs[0].shape[3]
if type(node.inputs[3]) == gs.Variable:
# The sizes input comes from Shape+Slice+Concat
concat = node.i(3)
if concat.op != "Concat":
continue
if type(concat.inputs[1]) != gs.Constant or len(concat.inputs[1].values) != 2:
continue
scale_h = concat.inputs[1].values[0] / node.inputs[0].shape[2]
scale_w = concat.inputs[1].values[1] / node.inputs[0].shape[3]
scales = np.asarray([1, 1, scale_h, scale_w], dtype=np.float32)
del node.inputs[3]
node.inputs[2] = gs.Constant(name="{}_scales".format(node.name), values=scales)
log.info("Updating Resize node {} to {}".format(node.name, scales))
self.sanitize()
def update_network(self):
"""
Updates the graph to replace certain nodes in the main EfficientDet network:
- the global average pooling nodes are optimized when running for TFOD models.
"""
if self.api == "TFOD":
for reduce in [node for node in self.graph.nodes if node.op == "ReduceMean"]:
# TFOD models have their ReduceMean nodes applied with some redundant transposes that can be
# optimized away for better performance
# Make sure the correct subgraph is being replaced, basically search for this:
# X > Transpose (0,2,3,1) > ReduceMean (1,2) > Reshape (?,1,1,?) > Reshape (?,?,1,1) > Conv > Y
# And change to this:
# X > ReduceMean (2,3) > Conv > Y
transpose = reduce.i()
if transpose.op != "Transpose" or transpose.attrs['perm'] != [0, 2, 3, 1]:
continue
if len(reduce.attrs['axes']) != 2 or reduce.attrs['axes'] != [1, 2]:
continue
reshape1 = reduce.o()
if reshape1.op != "Reshape" or len(reshape1.inputs[1].values) != 4:
continue
if reshape1.inputs[1].values[1] != 1 or reshape1.inputs[1].values[2] != 1:
continue
reshape2 = reshape1.o()
if reshape2.op != "Reshape" or len(reshape2.inputs[1].values) != 4:
continue
if reshape2.inputs[1].values[2] != 1 or reshape2.inputs[1].values[3] != 1:
continue
conv = reshape2.o()
if conv.op != "Conv":
continue
# If all the checks above pass, then this node sequence can be optimized by just the ReduceMean itself
# operating on a different set of axes
input_tensor = transpose.inputs[0] # Input tensor to the Transpose
reduce.inputs[0] = input_tensor # Forward the Transpose input to the ReduceMean node
output_tensor = reduce.outputs[0] # Output tensor of the ReduceMean
conv.inputs[0] = output_tensor # Forward the ReduceMean output to the Conv node
reduce.attrs['axes'] = [2, 3] # Update the axes that ReduceMean operates on
reduce.attrs['keepdims'] = 1 # Keep the reduced dimensions
log.info("Optimized subgraph around ReduceMean node '{}'".format(reduce.name))
def update_nms(self, threshold=None, detections=None):
"""
Updates the graph to replace the NMS op by BatchedNMS_TRT TensorRT plugin node.
:param threshold: Override the score threshold attribute. If set to None, use the value in the graph.
:param detections: Override the max detections attribute. If set to None, use the value in the graph.
"""
def find_head_concat(name_scope):
# This will find the concatenation node at the end of either Class Net or Box Net. These concatenation nodes
# bring together prediction data for each of 5 scales.
# The concatenated Class Net node will have shape [batch_size, num_anchors, num_classes],
# and the concatenated Box Net node has the shape [batch_size, num_anchors, 4].
# These concatenation nodes can be be found by searching for all Concat's and checking if the node two
# steps above in the graph has a name that begins with either "box_net/..." or "class_net/...".
for node in [node for node in self.graph.nodes if node.op == "Transpose" and name_scope in node.name]:
concat = self.graph.find_descendant_by_op(node, "Concat")
assert concat and len(concat.inputs) == 5
log.info("Found {} node '{}' as the tip of {}".format(concat.op, concat.name, name_scope))
return concat
def extract_anchors_tensor(split):
# This will find the anchors that have been hardcoded somewhere within the ONNX graph.
# The function will return a gs.Constant that can be directly used as an input to the NMS plugin.
# The anchor tensor shape will be [1, num_anchors, 4]. Note that '1' is kept as first dim, regardless of
# batch size, as it's not necessary to replicate the anchors for all images in the batch.
# The anchors are available (one per coordinate) hardcoded as constants within certain box decoder nodes.
# Each of these four constants have shape [1, num_anchors], so some numpy operations are used to expand the
# dims and concatenate them as needed.
# These constants can be found by starting from the Box Net's split operation , and for each coordinate,
# walking down in the graph until either an Add or Mul node is found. The second input on this nodes will
# be the anchor data required.
def get_anchor_np(output_idx, op):
node = self.graph.find_descendant_by_op(split.o(0, output_idx), op)
assert node
val = np.squeeze(node.inputs[1].values)
return np.expand_dims(val.flatten(), axis=(0, 2))
anchors_y = get_anchor_np(0, "Add")
anchors_x = get_anchor_np(1, "Add")
anchors_h = get_anchor_np(2, "Mul")
anchors_w = get_anchor_np(3, "Mul")
anchors = np.concatenate([anchors_y, anchors_x, anchors_h, anchors_w], axis=2)
return gs.Constant(name="nms/anchors:0", values=anchors)
self.sanitize()
head_names = []
if self.api == "AutoML":
head_names = ["class_net/", "box_net/"]
if self.api == "TFOD":
head_names = ["/WeightSharedConvolutionalClassHead/", "/WeightSharedConvolutionalBoxHead/"]
# There are five nodes at the bottom of the graph that provide important connection points:
# 1. Find the concat node at the end of the class net (multi-scale class predictor)
class_net = find_head_concat(head_names[0])
class_net_tensor = class_net.outputs[0]
# 2. Find the concat node at the end of the box net (multi-scale localization predictor)
box_net = find_head_concat(head_names[1])
box_net_tensor = box_net.outputs[0]
# 3. Find the split node that separates the box net coordinates and feeds them into the box decoder.
box_net_split = self.graph.find_descendant_by_op(box_net, "Split")
assert box_net_split and len(box_net_split.outputs) == 4
# 4. Find the concat node at the end of the box decoder.
box_decoder = self.graph.find_descendant_by_op(box_net_split, "Concat")
assert box_decoder and len(box_decoder.inputs) == 4
box_decoder_tensor = box_decoder.outputs[0]
# 5. Find the NMS node.
nms_node = self.graph.find_node_by_op("NonMaxSuppression")
# Extract NMS Configuration
num_detections = int(nms_node.inputs[2].values) if detections is None else detections
iou_threshold = float(nms_node.inputs[3].values)
score_threshold = float(nms_node.inputs[4].values) if threshold is None else threshold
num_classes = class_net.i().inputs[1].values[-1]
normalized = True if self.api == "TFOD" else False
# NMS Inputs and Attributes
# NMS expects these shapes for its input tensors:
# box_net: [batch_size, number_boxes, 4]
# class_net: [batch_size, number_boxes, number_classes]
# anchors: [1, number_boxes, 4] (if used)
nms_op = None
nms_attrs = None
nms_inputs = None
# EfficientNMS TensorRT Plugin
# Fusing the decoder will always be faster, so this is the default NMS method supported. In this case,
# three inputs are given to the NMS TensorRT node:
# - The box predictions (from the Box Net node found above)
# - The class predictions (from the Class Net node found above)
# - The default anchor coordinates (from the extracted anchor constants)
# As the original tensors from EfficientDet will be used, the NMS code type is set to 1 (Center+Size),
# because this is the internal box coding format used by the network.
anchors_tensor = extract_anchors_tensor(box_net_split)
nms_inputs = [box_net_tensor, class_net_tensor, anchors_tensor]
nms_op = "EfficientNMS_TRT"
nms_attrs = {
'plugin_version': "1",
'background_class': -1,
'max_output_boxes': num_detections,
'score_threshold': max(0.01, score_threshold), # Keep threshold to at least 0.01 for better efficiency
'iou_threshold': iou_threshold,
'score_activation': True,
'box_coding': 1,
}
nms_output_classes_dtype = np.int32
# NMS Outputs
nms_output_num_detections = gs.Variable(name="num_detections", dtype=np.int32, shape=['N', 1])
nms_output_boxes = gs.Variable(name="detection_boxes", dtype=np.float32,
shape=['N', num_detections, 4])
nms_output_scores = gs.Variable(name="detection_scores", dtype=np.float32,
shape=['N', num_detections])
nms_output_classes = gs.Variable(name="detection_classes", dtype=nms_output_classes_dtype,
shape=['N', num_detections])
nms_outputs = [nms_output_num_detections, nms_output_boxes, nms_output_scores, nms_output_classes]
# Create the NMS Plugin node with the selected inputs. The outputs of the node will also become the final
# outputs of the graph.
self.graph.plugin(
op=nms_op,
name="nms/non_maximum_suppression",
inputs=nms_inputs,
outputs=nms_outputs,
attrs=nms_attrs)
log.info("Created NMS plugin '{}' with attributes: {}".format(nms_op, nms_attrs))
self.graph.outputs = nms_outputs
self.sanitize()
def main(args):
effdet_gs = EfficientDetGraphSurgeon(args.saved_model)
if args.tf2onnx:
effdet_gs.save(args.tf2onnx)
effdet_gs.update_preprocessor(args.input_format, args.input_size, args.preprocessor)
effdet_gs.update_shapes()
effdet_gs.update_network()
effdet_gs.update_nms(args.nms_threshold, args.nms_detections)
effdet_gs.save(args.onnx)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--saved_model", required=True,
help="The TensorFlow saved model directory to load")
parser.add_argument("-o", "--onnx", required=True,
help="The output ONNX model file to write")
parser.add_argument("-f", "--input_format", default="NHWC", choices=["NHWC", "NCHW"],
help="Set the input data format of the graph, either NCHW or NHWC, default: NHWC")
parser.add_argument("-i", "--input_size", default="512,512",
help="Set the input shape of the graph, as a comma-separated dimensions in H,W format, "
"default: 512,512")
parser.add_argument("-p", "--preprocessor", default="imagenet", choices=["imagenet", "scale_range"],
help="Set the preprocessor to apply on the graph, either 'imagenet' for standard mean "
"subtraction and stdev normalization, or 'scale_range' for uniform [-1,+1] "
"normalization as is used in the AdvProp models, default: imagenet")
parser.add_argument("-t", "--nms_threshold", type=float,
help="Override the NMS score threshold, default: use the original value in the model")
parser.add_argument("-d", "--nms_detections", type=int,
help="Override the NMS max detections, default: use the original value in the model")
parser.add_argument("--tf2onnx",
help="The path where to save the intermediate ONNX graph generated by tf2onnx, useful"
"for graph debugging purposes, default: not saved")
args = parser.parse_args()
main(args)
| pt | 0.19674 | 2.335084 | 2 |
easy/867-transpose-matrix.py | wanglongjiang/leetcode | 2 | 14989 | <reponame>wanglongjiang/leetcode
'''
转置矩阵
给你一个二维整数数组 matrix, 返回 matrix 的 转置矩阵 。
矩阵的 转置 是指将矩阵的主对角线翻转,交换矩阵的行索引与列索引。
'''
from typing import List
'''
思路:简单问题,原矩阵大小为m*n,创建一个n*m大小的新矩阵,按照行列转化的方式将旧矩阵数据复制过去
'''
class Solution:
def transpose(self, matrix: List[List[int]]) -> List[List[int]]:
m = len(matrix)
n = len(matrix[0])
newMatrix = [[]] * n
for i in range(n):
newMatrix[i] = [0] * m
for j in range(m):
newMatrix[i][j] = matrix[j][i]
return newMatrix
s = Solution()
print(s.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
print(s.transpose([[1, 2, 3], [4, 5, 6]]))
| '''
转置矩阵
给你一个二维整数数组 matrix, 返回 matrix 的 转置矩阵 。
矩阵的 转置 是指将矩阵的主对角线翻转,交换矩阵的行索引与列索引。
'''
from typing import List
'''
思路:简单问题,原矩阵大小为m*n,创建一个n*m大小的新矩阵,按照行列转化的方式将旧矩阵数据复制过去
'''
class Solution:
def transpose(self, matrix: List[List[int]]) -> List[List[int]]:
m = len(matrix)
n = len(matrix[0])
newMatrix = [[]] * n
for i in range(n):
newMatrix[i] = [0] * m
for j in range(m):
newMatrix[i][j] = matrix[j][i]
return newMatrix
s = Solution()
print(s.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
print(s.transpose([[1, 2, 3], [4, 5, 6]])) | zh | 1.000057 | 3.997648 | 4 |
adam_visual_perception/head_gaze_estimator.py | isi-vista/adam-visual-perception | 1 | 14990 | from adam_visual_perception import LandmarkDetector
from adam_visual_perception.utility import *
import numpy as np
import math
import cv2
import os
import sys
class HeadGazeEstimator:
""" A class for estimating gaze ray from facial landmarks """
def __init__(self, write_video=False):
# 3D model points.
self.model_points = np.array(
[
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0), # Right mouth corner
]
)
self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
"""
Parameters
----------
write_video : bool, optional
Write the resulting OpenCV video
"""
self.write_video = write_video
self.landmark_detector = LandmarkDetector(write_video=False)
def get_gaze_rays(self, filename, bbox_history=None, show=True):
"""
Get the gaze rays for the given video file
"""
# Get the landmarks for the entire video
landmark_map = self.landmark_detector.detect(filename, show=False)
# Capture the video
cap = cv2.VideoCapture(filename)
frame_no = 0
gaze_angles = {}
# Loop over the frames from the video stream
while True:
success, frame = cap.read()
if not success:
if frame_no == 0:
print("Failed to read video")
sys.exit(1)
else:
break
if frame_no == 0:
# Camera internals
size = frame.shape
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1],
],
dtype="double",
)
if self.write_video:
# Initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
par_path = os.path.abspath(os.path.join(filename, os.pardir))
dir_path = par_path + "_pnp"
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
video_path = os.path.join(dir_path, os.path.basename(filename))
writer = cv2.VideoWriter(
video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True
)
if frame_no in landmark_map:
# 2D image points.
image_points = np.array(
[
landmark_map[frame_no][33], # Nose tip
landmark_map[frame_no][8], # Chin
landmark_map[frame_no][36], # Left eye left corner
landmark_map[frame_no][45], # Right eye right corne
landmark_map[frame_no][48], # Left Mouth corner
landmark_map[frame_no][54], # Right mouth corner
],
dtype="double",
)
# We use this to draw a line sticking out of the nose
success, rotation_vector, translation_vector = cv2.solvePnP(
self.model_points,
image_points,
camera_matrix,
self.dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE,
)
nose_end_point2D, jacobian = cv2.projectPoints(
np.array([(0.0, 0.0, 1000.0)]),
rotation_vector,
translation_vector,
camera_matrix,
self.dist_coeffs,
)
for p in image_points:
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (255, 0, 0), -1)
for p in landmark_map[frame_no]:
if p in image_points:
continue
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
lenAB = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
length = lenAB * 3
C_x = int(p2[0] + (p2[0] - p1[0]) / lenAB * length)
C_y = int(p2[1] + (p2[1] - p1[1]) / lenAB * length)
cv2.line(frame, p1, (C_x, C_y), (0, 255, 0), 2)
if bbox_history is not None and (self.write_video or show):
bboxes = bbox_history[frame_no]
for i, bbox in enumerate(bboxes):
x, y = int(bbox[0]), int(bbox[1])
w, h = int(bbox[2]), int(bbox[3])
cv2.circle(
frame, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1
)
# Store in the return dictionary
gaze_angles[frame_no] = (p1, p2)
# Show the frame if the flag is on
if show:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Write the video if the flag is on
if self.write_video:
writer.write(frame)
frame_no += 1
# Cleanup
cv2.destroyAllWindows()
if self.write_video:
writer.release()
return gaze_angles
| from adam_visual_perception import LandmarkDetector
from adam_visual_perception.utility import *
import numpy as np
import math
import cv2
import os
import sys
class HeadGazeEstimator:
""" A class for estimating gaze ray from facial landmarks """
def __init__(self, write_video=False):
# 3D model points.
self.model_points = np.array(
[
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0), # Right mouth corner
]
)
self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
"""
Parameters
----------
write_video : bool, optional
Write the resulting OpenCV video
"""
self.write_video = write_video
self.landmark_detector = LandmarkDetector(write_video=False)
def get_gaze_rays(self, filename, bbox_history=None, show=True):
"""
Get the gaze rays for the given video file
"""
# Get the landmarks for the entire video
landmark_map = self.landmark_detector.detect(filename, show=False)
# Capture the video
cap = cv2.VideoCapture(filename)
frame_no = 0
gaze_angles = {}
# Loop over the frames from the video stream
while True:
success, frame = cap.read()
if not success:
if frame_no == 0:
print("Failed to read video")
sys.exit(1)
else:
break
if frame_no == 0:
# Camera internals
size = frame.shape
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1],
],
dtype="double",
)
if self.write_video:
# Initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
par_path = os.path.abspath(os.path.join(filename, os.pardir))
dir_path = par_path + "_pnp"
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
video_path = os.path.join(dir_path, os.path.basename(filename))
writer = cv2.VideoWriter(
video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True
)
if frame_no in landmark_map:
# 2D image points.
image_points = np.array(
[
landmark_map[frame_no][33], # Nose tip
landmark_map[frame_no][8], # Chin
landmark_map[frame_no][36], # Left eye left corner
landmark_map[frame_no][45], # Right eye right corne
landmark_map[frame_no][48], # Left Mouth corner
landmark_map[frame_no][54], # Right mouth corner
],
dtype="double",
)
# We use this to draw a line sticking out of the nose
success, rotation_vector, translation_vector = cv2.solvePnP(
self.model_points,
image_points,
camera_matrix,
self.dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE,
)
nose_end_point2D, jacobian = cv2.projectPoints(
np.array([(0.0, 0.0, 1000.0)]),
rotation_vector,
translation_vector,
camera_matrix,
self.dist_coeffs,
)
for p in image_points:
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (255, 0, 0), -1)
for p in landmark_map[frame_no]:
if p in image_points:
continue
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
lenAB = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
length = lenAB * 3
C_x = int(p2[0] + (p2[0] - p1[0]) / lenAB * length)
C_y = int(p2[1] + (p2[1] - p1[1]) / lenAB * length)
cv2.line(frame, p1, (C_x, C_y), (0, 255, 0), 2)
if bbox_history is not None and (self.write_video or show):
bboxes = bbox_history[frame_no]
for i, bbox in enumerate(bboxes):
x, y = int(bbox[0]), int(bbox[1])
w, h = int(bbox[2]), int(bbox[3])
cv2.circle(
frame, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1
)
# Store in the return dictionary
gaze_angles[frame_no] = (p1, p2)
# Show the frame if the flag is on
if show:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Write the video if the flag is on
if self.write_video:
writer.write(frame)
frame_no += 1
# Cleanup
cv2.destroyAllWindows()
if self.write_video:
writer.release()
return gaze_angles
| pt | 0.210294 | 3.002684 | 3 |
pypoca/cogs/general.py | leandcesar/PyPoca | 1 | 14991 | # -*- coding: utf-8 -*-
import disnake
from disnake.ext import commands
from pypoca.config import COLOR, URLS
from pypoca.database import Server
from pypoca.ext import ALL, DEFAULT, Choice, Option
class General(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.slash_command(name="ping", description=DEFAULT["COMMAND_PING_DESC"])
async def slash_ping(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide):
server = Server.get_by_id(inter.guild.id)
locale = ALL[server.language] if server else DEFAULT
latency = int(self.bot.latency * 1000)
description = locale["COMMAND_PING_REPLY"] + f": {latency}ms"
embed = disnake.Embed(description=description, color=COLOR)
await inter.send(embed=embed, ephemeral=hide)
@commands.slash_command(name="help", description=DEFAULT["COMMAND_HELP_DESC"])
async def slash_help(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide):
server = Server.get_by_id(inter.guild.id)
locale = ALL[server.language] if server else DEFAULT
BLANK = "<:blank:914183315056111627>"
description = f"""
**/movie**
{BLANK} **discover** {locale["COMMAND_MOVIE_DISCOVER_DESC"]}
{BLANK} **find** {locale["COMMAND_MOVIE_FIND_DESC"]}
{BLANK} **popular** {locale["COMMAND_MOVIE_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_MOVIE_SEARCH_DESC"]}
{BLANK} **top** {locale["COMMAND_MOVIE_TOP_DESC"]}
{BLANK} **trending** {locale["COMMAND_MOVIE_TRENDING_DESC"]}
{BLANK} **upcoming** {locale["COMMAND_MOVIE_UPCOMING_DESC"]}
**/tv**
{BLANK} **discover** {locale["COMMAND_TV_DISCOVER_DESC"]}
{BLANK} **popular** {locale["COMMAND_TV_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_TV_SEARCH_DESC"]}
{BLANK} **top** {locale["COMMAND_TV_TOP_DESC"]}
{BLANK} **trending** {locale["COMMAND_TV_TRENDING_DESC"]}
{BLANK} **upcoming** {locale["COMMAND_TV_UPCOMING_DESC"]}
**/people**
{BLANK} **popular** {locale["COMMAND_PERSON_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_PERSON_SEARCH_DESC"]}
{BLANK} **trending** {locale["COMMAND_PERSON_TRENDING_DESC"]}
**/game**
{BLANK} **frame** {locale["COMMAND_GAME_FRAME_DESC"]}
{BLANK} **higher** {locale["COMMAND_GAME_HIGHER_DESC"]}
**/setting**
{BLANK} **language** {locale["COMMAND_LANGUAGE_DESC"]}
"""
buttons = [
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_INVITE"], "url": URLS["invite"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_VOTE"], "url": URLS["vote"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_SERVER"], "url": URLS["server"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_SITE"], "url": URLS["site"]},
]
embed = disnake.Embed(description=description, color=COLOR)
view = disnake.ui.View()
[view.add_item(disnake.ui.Button(**button)) for button in buttons]
await inter.send(embed=embed, view=view, ephemeral=hide)
def setup(bot: commands.Bot) -> None:
bot.add_cog(General(bot))
| # -*- coding: utf-8 -*-
import disnake
from disnake.ext import commands
from pypoca.config import COLOR, URLS
from pypoca.database import Server
from pypoca.ext import ALL, DEFAULT, Choice, Option
class General(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.slash_command(name="ping", description=DEFAULT["COMMAND_PING_DESC"])
async def slash_ping(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide):
server = Server.get_by_id(inter.guild.id)
locale = ALL[server.language] if server else DEFAULT
latency = int(self.bot.latency * 1000)
description = locale["COMMAND_PING_REPLY"] + f": {latency}ms"
embed = disnake.Embed(description=description, color=COLOR)
await inter.send(embed=embed, ephemeral=hide)
@commands.slash_command(name="help", description=DEFAULT["COMMAND_HELP_DESC"])
async def slash_help(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide):
server = Server.get_by_id(inter.guild.id)
locale = ALL[server.language] if server else DEFAULT
BLANK = "<:blank:914183315056111627>"
description = f"""
**/movie**
{BLANK} **discover** {locale["COMMAND_MOVIE_DISCOVER_DESC"]}
{BLANK} **find** {locale["COMMAND_MOVIE_FIND_DESC"]}
{BLANK} **popular** {locale["COMMAND_MOVIE_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_MOVIE_SEARCH_DESC"]}
{BLANK} **top** {locale["COMMAND_MOVIE_TOP_DESC"]}
{BLANK} **trending** {locale["COMMAND_MOVIE_TRENDING_DESC"]}
{BLANK} **upcoming** {locale["COMMAND_MOVIE_UPCOMING_DESC"]}
**/tv**
{BLANK} **discover** {locale["COMMAND_TV_DISCOVER_DESC"]}
{BLANK} **popular** {locale["COMMAND_TV_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_TV_SEARCH_DESC"]}
{BLANK} **top** {locale["COMMAND_TV_TOP_DESC"]}
{BLANK} **trending** {locale["COMMAND_TV_TRENDING_DESC"]}
{BLANK} **upcoming** {locale["COMMAND_TV_UPCOMING_DESC"]}
**/people**
{BLANK} **popular** {locale["COMMAND_PERSON_POPULAR_DESC"]}
{BLANK} **search** {locale["COMMAND_PERSON_SEARCH_DESC"]}
{BLANK} **trending** {locale["COMMAND_PERSON_TRENDING_DESC"]}
**/game**
{BLANK} **frame** {locale["COMMAND_GAME_FRAME_DESC"]}
{BLANK} **higher** {locale["COMMAND_GAME_HIGHER_DESC"]}
**/setting**
{BLANK} **language** {locale["COMMAND_LANGUAGE_DESC"]}
"""
buttons = [
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_INVITE"], "url": URLS["invite"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_VOTE"], "url": URLS["vote"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_SERVER"], "url": URLS["server"]},
{"style": 5, "label": locale["COMMAND_HELP_BUTTON_SITE"], "url": URLS["site"]},
]
embed = disnake.Embed(description=description, color=COLOR)
view = disnake.ui.View()
[view.add_item(disnake.ui.Button(**button)) for button in buttons]
await inter.send(embed=embed, view=view, ephemeral=hide)
def setup(bot: commands.Bot) -> None:
bot.add_cog(General(bot))
| pt | 0.346553 | 2.126184 | 2 |
costor_server/storage/api/views/authcheck.py | rphi/costor | 2 | 14992 | <gh_stars>1-10
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import MultiPartParser
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.exceptions import APIException
from rest_framework.decorators import parser_classes
from django.shortcuts import get_object_or_404
from manager.models import Agent
@api_view(['GET'])
@permission_classes([permissions.AllowAny])
def auth_check(request):
if not request.user.is_authenticated:
raise APIException(
detail="You aren't authenticated.",
code=403
)
#print(request.GET)
if 'agent' not in request.GET:
return Response(f'Authenticated as {request.user.username} with no agent')
agent = Agent.objects.filter(name=request.GET['agent'])
if not agent.exists():
raise APIException(
detail="Can't find that agent",
code=404
)
agent = agent.first()
if request.user not in agent.users.all():
raise APIException(
detail=f'Authenticated as {request.user.username} but you don\'t have permission for agent {agent.name}',
code=403
)
return Response(f'Authenticated as {request.user.username} for agent {agent.name}')
| from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import MultiPartParser
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.exceptions import APIException
from rest_framework.decorators import parser_classes
from django.shortcuts import get_object_or_404
from manager.models import Agent
@api_view(['GET'])
@permission_classes([permissions.AllowAny])
def auth_check(request):
if not request.user.is_authenticated:
raise APIException(
detail="You aren't authenticated.",
code=403
)
#print(request.GET)
if 'agent' not in request.GET:
return Response(f'Authenticated as {request.user.username} with no agent')
agent = Agent.objects.filter(name=request.GET['agent'])
if not agent.exists():
raise APIException(
detail="Can't find that agent",
code=404
)
agent = agent.first()
if request.user not in agent.users.all():
raise APIException(
detail=f'Authenticated as {request.user.username} but you don\'t have permission for agent {agent.name}',
code=403
)
return Response(f'Authenticated as {request.user.username} for agent {agent.name}') | en | 0.232893 | 2.188733 | 2 |
tensorflow_probability/python/experimental/mcmc/sample_fold.py | rupei/probability | 0 | 14993 | <gh_stars>0
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Drivers for streaming reductions framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import sample as exp_sample_lib
from tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel
from tensorflow_probability.python.experimental.mcmc import tracing_reducer
from tensorflow_probability.python.experimental.mcmc import with_reductions
from tensorflow_probability.python.mcmc import sample
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'sample_chain',
'sample_fold',
]
def sample_fold(
num_steps,
current_state,
previous_kernel_results=None,
kernel=None,
reducer=None,
num_burnin_steps=0,
num_steps_between_results=0,
parallel_iterations=10,
seed=None,
name=None,
):
"""Computes the requested reductions over the `kernel`'s samples.
To wit, runs the given `kernel` for `num_steps` steps, and consumes
the stream of samples with the given `Reducer`s' `one_step` method(s).
This runs in constant memory (unless a given `Reducer` builds a
large structure).
The driver internally composes the correct onion of `WithReductions`
and `SampleDiscardingKernel` to implement the requested optionally
thinned reduction; however, the kernel results of those applied
Transition Kernels will not be returned. Hence, if warm-restarting
reductions is desired, one should manually build the Transition Kernel
onion and use `tfp.experimental.mcmc.step_kernel`.
An arbitrary collection of `reducer` can be provided, and the resulting
finalized statistic(s) will be returned in an identical structure.
Args:
num_steps: Integer or scalar `Tensor` representing the number of `Reducer`
steps.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.
Warm-start for the auxiliary state needed by the given `kernel`.
If not supplied, `sample_fold` will cold-start with
`kernel.bootstrap_results`.
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
reducer: A (possibly nested) structure of `Reducer`s to be evaluated
on the `kernel`'s samples. If no reducers are given (`reducer=None`),
then `None` will be returned in place of streaming calculations.
num_burnin_steps: Integer or scalar `Tensor` representing the number
of chain steps to take before starting to collect results.
Defaults to 0 (i.e., no burn-in).
num_steps_between_results: Integer or scalar `Tensor` representing
the number of chain steps between collecting a result. Only one out
of every `num_steps_between_samples + 1` steps is included in the
returned results. Defaults to 0 (i.e., no thinning).
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mcmc_sample_fold').
Returns:
reduction_results: A (possibly nested) structure of finalized reducer
statistics. The structure identically mimics that of `reducer`.
end_state: The final state of the Markov chain(s).
final_kernel_results: `collections.namedtuple` of internal calculations
used to advance the supplied `kernel`. These results do not include
the kernel results of `WithReductions` or `SampleDiscardingKernel`.
"""
with tf.name_scope(name or 'mcmc_sample_fold'):
num_steps = tf.convert_to_tensor(
num_steps, dtype=tf.int32, name='num_steps')
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, name='current_state'),
current_state)
reducer_was_none = False
if reducer is None:
reducer = []
reducer_was_none = True
reduction_kernel = with_reductions.WithReductions(
inner_kernel=sample_discarding_kernel.SampleDiscardingKernel(
inner_kernel=kernel,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results),
reducer=reducer,
)
end_state, final_kernel_results = exp_sample_lib.step_kernel(
num_steps=num_steps,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=reduction_kernel,
return_final_kernel_results=True,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
reduction_results = nest.map_structure_up_to(
reducer,
lambda r, s: r.finalize(s),
reducer,
final_kernel_results.streaming_calculations,
check_types=False)
if reducer_was_none:
reduction_results = None
return (reduction_results,
end_state,
final_kernel_results.inner_results.inner_results)
def _trace_kernel_results(current_state, kernel_results):
del current_state
return kernel_results
def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=_trace_kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
seed=None,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from a Markov chain at `current_state` whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such 'thinning'
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized, and thus do not
increase memory requirements.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional, a seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'experimental_mcmc_sample_chain').
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### References
[1]: <NAME>. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
with tf.name_scope(name or 'experimental_mcmc_sample_chain'):
if not kernel.is_calibrated:
warnings.warn('supplied `TransitionKernel` is not calibrated. Markov '
'chain may not converge to intended target distribution.')
if trace_fn is None:
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn('Tracing all kernel results by default is deprecated. Set '
'the `trace_fn` argument to None (the future default '
'value) or an explicit callback that traces the values '
'you are interested in.')
# `WithReductions` assumes all its reducers want to reduce over the
# immediate inner results of its kernel results. However,
# We don't care about the kernel results of `SampleDiscardingKernel`; hence,
# we evaluate the `trace_fn` on a deeper level of inner results.
def real_trace_fn(curr_state, kr):
return curr_state, trace_fn(curr_state, kr.inner_results)
trace_reducer = tracing_reducer.TracingReducer(
trace_fn=real_trace_fn,
size=num_results
)
trace_results, _, final_kernel_results = sample_fold(
num_steps=num_results,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=kernel,
reducer=trace_reducer,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
all_states, trace = trace_results
if return_final_kernel_results:
return sample.CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return sample.StatesAndTrace(all_states=all_states, trace=trace)
| # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Drivers for streaming reductions framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import sample as exp_sample_lib
from tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel
from tensorflow_probability.python.experimental.mcmc import tracing_reducer
from tensorflow_probability.python.experimental.mcmc import with_reductions
from tensorflow_probability.python.mcmc import sample
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'sample_chain',
'sample_fold',
]
def sample_fold(
num_steps,
current_state,
previous_kernel_results=None,
kernel=None,
reducer=None,
num_burnin_steps=0,
num_steps_between_results=0,
parallel_iterations=10,
seed=None,
name=None,
):
"""Computes the requested reductions over the `kernel`'s samples.
To wit, runs the given `kernel` for `num_steps` steps, and consumes
the stream of samples with the given `Reducer`s' `one_step` method(s).
This runs in constant memory (unless a given `Reducer` builds a
large structure).
The driver internally composes the correct onion of `WithReductions`
and `SampleDiscardingKernel` to implement the requested optionally
thinned reduction; however, the kernel results of those applied
Transition Kernels will not be returned. Hence, if warm-restarting
reductions is desired, one should manually build the Transition Kernel
onion and use `tfp.experimental.mcmc.step_kernel`.
An arbitrary collection of `reducer` can be provided, and the resulting
finalized statistic(s) will be returned in an identical structure.
Args:
num_steps: Integer or scalar `Tensor` representing the number of `Reducer`
steps.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.
Warm-start for the auxiliary state needed by the given `kernel`.
If not supplied, `sample_fold` will cold-start with
`kernel.bootstrap_results`.
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
reducer: A (possibly nested) structure of `Reducer`s to be evaluated
on the `kernel`'s samples. If no reducers are given (`reducer=None`),
then `None` will be returned in place of streaming calculations.
num_burnin_steps: Integer or scalar `Tensor` representing the number
of chain steps to take before starting to collect results.
Defaults to 0 (i.e., no burn-in).
num_steps_between_results: Integer or scalar `Tensor` representing
the number of chain steps between collecting a result. Only one out
of every `num_steps_between_samples + 1` steps is included in the
returned results. Defaults to 0 (i.e., no thinning).
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mcmc_sample_fold').
Returns:
reduction_results: A (possibly nested) structure of finalized reducer
statistics. The structure identically mimics that of `reducer`.
end_state: The final state of the Markov chain(s).
final_kernel_results: `collections.namedtuple` of internal calculations
used to advance the supplied `kernel`. These results do not include
the kernel results of `WithReductions` or `SampleDiscardingKernel`.
"""
with tf.name_scope(name or 'mcmc_sample_fold'):
num_steps = tf.convert_to_tensor(
num_steps, dtype=tf.int32, name='num_steps')
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, name='current_state'),
current_state)
reducer_was_none = False
if reducer is None:
reducer = []
reducer_was_none = True
reduction_kernel = with_reductions.WithReductions(
inner_kernel=sample_discarding_kernel.SampleDiscardingKernel(
inner_kernel=kernel,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results),
reducer=reducer,
)
end_state, final_kernel_results = exp_sample_lib.step_kernel(
num_steps=num_steps,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=reduction_kernel,
return_final_kernel_results=True,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
reduction_results = nest.map_structure_up_to(
reducer,
lambda r, s: r.finalize(s),
reducer,
final_kernel_results.streaming_calculations,
check_types=False)
if reducer_was_none:
reduction_results = None
return (reduction_results,
end_state,
final_kernel_results.inner_results.inner_results)
def _trace_kernel_results(current_state, kernel_results):
del current_state
return kernel_results
def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=_trace_kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
seed=None,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from a Markov chain at `current_state` whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such 'thinning'
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized, and thus do not
increase memory requirements.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional, a seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'experimental_mcmc_sample_chain').
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### References
[1]: <NAME>. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
with tf.name_scope(name or 'experimental_mcmc_sample_chain'):
if not kernel.is_calibrated:
warnings.warn('supplied `TransitionKernel` is not calibrated. Markov '
'chain may not converge to intended target distribution.')
if trace_fn is None:
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn('Tracing all kernel results by default is deprecated. Set '
'the `trace_fn` argument to None (the future default '
'value) or an explicit callback that traces the values '
'you are interested in.')
# `WithReductions` assumes all its reducers want to reduce over the
# immediate inner results of its kernel results. However,
# We don't care about the kernel results of `SampleDiscardingKernel`; hence,
# we evaluate the `trace_fn` on a deeper level of inner results.
def real_trace_fn(curr_state, kr):
return curr_state, trace_fn(curr_state, kr.inner_results)
trace_reducer = tracing_reducer.TracingReducer(
trace_fn=real_trace_fn,
size=num_results
)
trace_results, _, final_kernel_results = sample_fold(
num_steps=num_results,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=kernel,
reducer=trace_reducer,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
all_states, trace = trace_results
if return_final_kernel_results:
return sample.CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return sample.StatesAndTrace(all_states=all_states, trace=trace) | pt | 0.201899 | 1.840547 | 2 |
algos/custom_ppo2.py | Ottawa-Autonomous-Vehicle-Group/learning-to-drive-in-5-minutes | 1 | 14994 | import time
from collections import deque
import gym
import numpy as np
from stable_baselines import logger, PPO2
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.common import explained_variance, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten
class PPO2WithVAE(PPO2):
"""
Custom PPO2 version.
Notable changes:
- optimization is done after each episode and not after n steps
"""
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2"):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn()
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
n_timesteps = 0
# nupdates = total_timesteps // self.n_batch
for timestep in range(1, total_timesteps + 1):
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - timestep / total_timesteps
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
n_timesteps += len(obs)
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
# timestep = ((update * self.noptepochs * self.n_batch + epoch_num * self.n_batch + start) //
# batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=n_timesteps))
else: # recurrent version
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for stan_timestepsrt in range(0, self.n_envs, envs_per_batch):
# timestep = ((update * self.noptepochs * self.n_envs + epoch_num * self.n_envs + start) //
# envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=n_timesteps,
writer=writer, states=mb_states))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, n_timesteps)
if self.verbose >= 1 and (timestep % log_interval == 0 or timestep == 1):
explained_var = explained_variance(values, returns)
logger.logkv("total_timesteps", n_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if n_timesteps > total_timesteps:
break
return self
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
while True:
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
if self.dones:
print("Episode finished. Reward: {:.2f} {} Steps".format(np.sum(mb_rewards), len(mb_rewards)))
if len(mb_rewards) >= self.n_steps:
break
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
| import time
from collections import deque
import gym
import numpy as np
from stable_baselines import logger, PPO2
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.common import explained_variance, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten
class PPO2WithVAE(PPO2):
"""
Custom PPO2 version.
Notable changes:
- optimization is done after each episode and not after n steps
"""
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2"):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn()
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
n_timesteps = 0
# nupdates = total_timesteps // self.n_batch
for timestep in range(1, total_timesteps + 1):
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - timestep / total_timesteps
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
n_timesteps += len(obs)
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
# timestep = ((update * self.noptepochs * self.n_batch + epoch_num * self.n_batch + start) //
# batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=n_timesteps))
else: # recurrent version
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for stan_timestepsrt in range(0, self.n_envs, envs_per_batch):
# timestep = ((update * self.noptepochs * self.n_envs + epoch_num * self.n_envs + start) //
# envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=n_timesteps,
writer=writer, states=mb_states))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, n_timesteps)
if self.verbose >= 1 and (timestep % log_interval == 0 or timestep == 1):
explained_var = explained_variance(values, returns)
logger.logkv("total_timesteps", n_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if n_timesteps > total_timesteps:
break
return self
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
while True:
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
if self.dones:
print("Episode finished. Reward: {:.2f} {} Steps".format(np.sum(mb_rewards), len(mb_rewards)))
if len(mb_rewards) >= self.n_steps:
break
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
| pt | 0.142028 | 2.04681 | 2 |
craw/modules/trail/trails/feeds/urlvir.py | xuluhang/DomainBlockList | 19 | 14995 | <filename>craw/modules/trail/trails/feeds/urlvir.py<gh_stars>10-100
#!/usr/bin/env python2
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from craw.modules.trail.plugins.util import wget_content
__url__ = "http://www.urlvir.com/export-hosts/"
__check__ = "Updated on"
__info__ = "malware"
__reference__ = "urlvir.com"
maintainer_url = __reference__
maintainer = "urlvir"
list_source_url = __url__
category = __info__
def fetch():
retval = {}
content = wget_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line.strip()] = (__info__, __reference__)
return retval
| <filename>craw/modules/trail/trails/feeds/urlvir.py<gh_stars>10-100
#!/usr/bin/env python2
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from craw.modules.trail.plugins.util import wget_content
__url__ = "http://www.urlvir.com/export-hosts/"
__check__ = "Updated on"
__info__ = "malware"
__reference__ = "urlvir.com"
maintainer_url = __reference__
maintainer = "urlvir"
list_source_url = __url__
category = __info__
def fetch():
retval = {}
content = wget_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line.strip()] = (__info__, __reference__)
return retval
| en | 0.089912 | 2.073721 | 2 |
tests/continuous_integration.py | kfaRabi/online-judge-tools | 0 | 14996 | <filename>tests/continuous_integration.py
import os
import subprocess
import sys
import unittest
# TODO: these command should be written at once, at only .travis.yml or at only here
paths = ['oj', 'onlinejudge', 'setup.py', 'tests']
class ContinuousIntegrationTest(unittest.TestCase):
"""A dummy test to run the commands same to CI on local environments"""
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_isort(self):
subprocess.check_call(['isort', '--check-only', '--diff', '--recursive'] + paths, stdout=sys.stdout, stderr=sys.stderr)
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_yapf(self):
output = subprocess.check_output(['yapf', '--diff', '--recursive'] + paths, stderr=sys.stderr)
self.assertEqual(output, b'')
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_mypy(self):
subprocess.check_call(['mypy', '--show-traceback'] + paths, stdout=sys.stdout, stderr=sys.stderr)
| <filename>tests/continuous_integration.py
import os
import subprocess
import sys
import unittest
# TODO: these command should be written at once, at only .travis.yml or at only here
paths = ['oj', 'onlinejudge', 'setup.py', 'tests']
class ContinuousIntegrationTest(unittest.TestCase):
"""A dummy test to run the commands same to CI on local environments"""
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_isort(self):
subprocess.check_call(['isort', '--check-only', '--diff', '--recursive'] + paths, stdout=sys.stdout, stderr=sys.stderr)
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_yapf(self):
output = subprocess.check_output(['yapf', '--diff', '--recursive'] + paths, stderr=sys.stderr)
self.assertEqual(output, b'')
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_mypy(self):
subprocess.check_call(['mypy', '--show-traceback'] + paths, stdout=sys.stdout, stderr=sys.stderr)
| pt | 0.197175 | 2.273943 | 2 |
open-hackathon-client/src/client/config_sample.py | overbest/open-hackathon | 0 | 14997 | # -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# NOTE: all following key/secrets for test purpose.
HOSTNAME = "http://localhost" # host name of the UI site
# hacking.kaiyuanshe.cn is used for wechat oauth login
# HOSTNAME = "http://hacking.kaiyuanshe.cn"
# HOSTNAME = "http://open-hackathon-dev.chinacloudapp.cn" # host name of the UI site
# HOSTNAME = "http://hacking.kaiyuanshe.cn"
QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA
HACKATHON_API_ENDPOINT = "http://localhost:15000"
# HACKATHON_API_ENDPOINT = "http://open-hackathon-dev.chinacloudapp.cn:15000"
# HACKATHON_API_ENDPOINT = "http://hacking.kaiyuanshe.cn:15000"
# github key for `localhost`
GITHUB_CLIENT_ID = "b44f3d47bdeb26b9c4e6"
GITHUB_CLIENT_SECRET = "98de14161c4b2ed3ea7a19787d62cda73b8e292c"
# github oauth key for `open-hackathon-dev.chinacloudapp.cn`
# GITHUB_CLIENT_ID = "b8e407813350f26bf537"
# GITHUB_CLIENT_SECRET = "<KEY>"
QQ_CLIENT_ID = "101200890"
QQ_CLIENT_SECRET = "<KEY>"
QQ_META_CONTENT = "274307566465013314076545663016134754100636"
WECHAT_APP_ID = "wxe75b8aef71c2059f"
WECHAT_SECRET = "<KEY>"
WECHAT_OAUTH_STATE = "openhackathon" # NOTE: may be should be same as QQ_OAUTH_STATE?
WEIBO_CLIENT_ID = "479757037"
WEIBO_CLIENT_SECRET = "efc5e75ff8891be37d90b4eaec5c02de"
WEIBO_META_CONTENT = "ae884e09bc02b700"
LIVE_CLIENT_ID = "000000004414E0A6"
LIVE_CLIENT_SECRET = "<KEY>"
ALAUDA_CLIENT_ID = "4VR9kzNZVyWcnk9OnAwMuSus7xOOcozJIpic6W6y"
ALAUDA_CLIENT_SECRET = "<KEY>"
Config = {
"environment": "local",
"app": {
"secret_key": "secret_key"
},
"login": {
"github": {
"client_id": GITHUB_CLIENT_ID,
"access_token_url": 'https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&redirect_uri=%s/github&code=' % (
GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, HOSTNAME),
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"client_id": QQ_CLIENT_ID,
"meta_content": QQ_META_CONTENT,
"access_token_url": 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=%s&client_secret=%s&redirect_uri=%s/qq&code=' % (
QQ_CLIENT_ID, QQ_CLIENT_SECRET, HOSTNAME),
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"wechat": {
"client_id": WECHAT_APP_ID,
"access_token_url": "https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%%s&grant_type=authorization_code" % (
WECHAT_APP_ID, WECHAT_SECRET),
"user_info_url": "https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s"
},
"weibo": {
"client_id": WEIBO_CLIENT_ID,
"meta_content": WEIBO_META_CONTENT,
"user_info_url": 'https://api.weibo.com/2/users/show.json?access_token=',
"email_info_url": 'https://api.weibo.com/2/account/profile/email.json?access_token=',
"access_token_url": 'https://api.weibo.com/oauth2/access_token?client_id=%s&client_secret=%s&grant_type=authorization_code&redirect_uri=%s/weibo&code=' % (
WEIBO_CLIENT_ID, WEIBO_CLIENT_SECRET, HOSTNAME)
},
"live": {
"client_id": LIVE_CLIENT_ID,
"client_secret": LIVE_CLIENT_SECRET,
"redirect_uri": '%s/live' % HOSTNAME,
"access_token_url": 'https://login.live.com/oauth20_token.srf',
"user_info_url": 'https://apis.live.net/v5.0/me?access_token='
},
"alauda": {
"client_id": ALAUDA_CLIENT_ID,
"client_secret": ALAUDA_CLIENT_SECRET,
"redirect_uri": '%s/alauda' % HOSTNAME,
"access_token_url": 'http://console.int.alauda.io/oauth/token'
},
"provider_enabled": ["github", "wechat"],
"session_valid_time_minutes": 60
},
"hackathon-api": {
"endpoint": HACKATHON_API_ENDPOINT
},
"javascript": {
"github": {
"authorize_url": "https://github.com/login/oauth/authorize?client_id=%s&redirect_uri=%s/github&scope=user" % (
GITHUB_CLIENT_ID, HOSTNAME)
},
"weibo": {
"authorize_url": "https://api.weibo.com/oauth2/authorize?client_id=%s&redirect_uri=%s/weibo&scope=all" % (
WEIBO_CLIENT_ID, HOSTNAME)
},
"qq": {
"authorize_url": "https://graph.qq.com/oauth2.0/authorize?client_id=%s&redirect_uri=%s/qq&scope=get_user_info&state=%s&response_type=code" % (
QQ_CLIENT_ID, HOSTNAME, QQ_OAUTH_STATE)
},
"wechat": {
"authorize_url": "https://open.weixin.qq.com/connect/qrconnect?appid=%s&redirect_uri=%s/wechat&response_type=code&scope=snsapi_login&state=%s#wechat_redirect" % (
WECHAT_APP_ID, HOSTNAME, WECHAT_OAUTH_STATE)
},
"live": {
"authorize_url": "https://login.live.com/oauth20_authorize.srf?client_id=%s&scope=wl.basic+,wl.emails&response_type=code&redirect_uri=%s/live" % (
LIVE_CLIENT_ID, HOSTNAME)
},
"alauda": {
"authorize_url": "http://console.int.alauda.io/oauth/authorize?response_type=code&client_id=%s&state=state&redirect_uri=%s/alauda" % (
ALAUDA_CLIENT_ID, HOSTNAME)
},
"hackathon": {
"endpoint": HACKATHON_API_ENDPOINT
},
"apiconfig": {
"proxy": HACKATHON_API_ENDPOINT,
"api": {
"admin": {
"hackathon": {
"": ["get", "post", "put", "delete"],
"checkname": ["get"],
"list": ["get"],
"online": ["post"],
"applyonline": ["post"],
"offline": ["post"],
"tags": ["get", "post", "put", "delete"],
"config": ["get", "post", "put", "delete"],
"administrator": {
"": ["put", "post", "delete"],
"list": ["get"]
},
"template": {
"": ["post", "delete"],
"list": ["get"],
"check": ["get"]
},
"organizer": {
"": ["get", "post", "put", "delete"]
},
"award": {
"": ["get", "post", "put", "delete"],
"list": ["get"]
},
"notice": {
"": ["get", "post", "put", "delete"]
}
},
"registration": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
},
"azure": {
"": ["get", "post", "delete", "put"],
"checksubid": ["post"]
},
"experiment": {
"list": ["get"],
"": ["post", "put"]
},
"team": {
"list": ["get"],
"score": {
"list": ["get"]
},
"award": ["get", "post", "delete"]
},
"user": {
"list": ["get"]
},
"hostserver": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
}
},
"template": {
"": ["get", "post", "delete", "put"],
"file": ["post"],
"list": ["get"],
"check": ["get"]
},
"user": {
"": ["get"],
"login": ["post", "delete"],
"experiment": {
"": ["get", "post", "delete", "put"]
},
"registration": {
"": ["put", "post", "get"],
"checkemail": ["get"],
"list": ["get"]
},
"profile": {
"": ["post", "put"]
},
"picture": {
"": ["put"]
},
"team": {
"member": ["get"]
},
"hackathon": {
"like": ["get", "post", "delete"]
},
"notice": {
"read": ["put"]
},
"show": {
"list": ["get"]
},
"file": {
"": ["post"]
}
},
"hackathon": {
"": ["get"],
"list": ["get"],
"stat": ["get"],
"template": ["get"],
"team": {
"list": ["get"]
},
"registration": {
"list": ["get"]
},
"show": {
"list": ["get"]
},
"grantedawards": ["get"],
"notice": {
"list": ["get"]
}
},
"team": {
"": ["get", "post", "put", "delete"],
"score": ["get", "post", "put"],
"member": {
"": ["post", "put", "delete"],
"list": ["get"]
},
"show": ["get", "post", "delete"],
"template": ["post", "delete"]
},
"talent": {
"list": ["get"]
},
"grantedawards": ["get"]
}
}
}
}
| # -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# NOTE: all following key/secrets for test purpose.
HOSTNAME = "http://localhost" # host name of the UI site
# hacking.kaiyuanshe.cn is used for wechat oauth login
# HOSTNAME = "http://hacking.kaiyuanshe.cn"
# HOSTNAME = "http://open-hackathon-dev.chinacloudapp.cn" # host name of the UI site
# HOSTNAME = "http://hacking.kaiyuanshe.cn"
QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA
HACKATHON_API_ENDPOINT = "http://localhost:15000"
# HACKATHON_API_ENDPOINT = "http://open-hackathon-dev.chinacloudapp.cn:15000"
# HACKATHON_API_ENDPOINT = "http://hacking.kaiyuanshe.cn:15000"
# github key for `localhost`
GITHUB_CLIENT_ID = "b44f3d47bdeb26b9c4e6"
GITHUB_CLIENT_SECRET = "98de14161c4b2ed3ea7a19787d62cda73b8e292c"
# github oauth key for `open-hackathon-dev.chinacloudapp.cn`
# GITHUB_CLIENT_ID = "b8e407813350f26bf537"
# GITHUB_CLIENT_SECRET = "<KEY>"
QQ_CLIENT_ID = "101200890"
QQ_CLIENT_SECRET = "<KEY>"
QQ_META_CONTENT = "274307566465013314076545663016134754100636"
WECHAT_APP_ID = "wxe75b8aef71c2059f"
WECHAT_SECRET = "<KEY>"
WECHAT_OAUTH_STATE = "openhackathon" # NOTE: may be should be same as QQ_OAUTH_STATE?
WEIBO_CLIENT_ID = "479757037"
WEIBO_CLIENT_SECRET = "efc5e75ff8891be37d90b4eaec5c02de"
WEIBO_META_CONTENT = "ae884e09bc02b700"
LIVE_CLIENT_ID = "000000004414E0A6"
LIVE_CLIENT_SECRET = "<KEY>"
ALAUDA_CLIENT_ID = "4VR9kzNZVyWcnk9OnAwMuSus7xOOcozJIpic6W6y"
ALAUDA_CLIENT_SECRET = "<KEY>"
Config = {
"environment": "local",
"app": {
"secret_key": "secret_key"
},
"login": {
"github": {
"client_id": GITHUB_CLIENT_ID,
"access_token_url": 'https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&redirect_uri=%s/github&code=' % (
GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, HOSTNAME),
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"client_id": QQ_CLIENT_ID,
"meta_content": QQ_META_CONTENT,
"access_token_url": 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=%s&client_secret=%s&redirect_uri=%s/qq&code=' % (
QQ_CLIENT_ID, QQ_CLIENT_SECRET, HOSTNAME),
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"wechat": {
"client_id": WECHAT_APP_ID,
"access_token_url": "https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%%s&grant_type=authorization_code" % (
WECHAT_APP_ID, WECHAT_SECRET),
"user_info_url": "https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s"
},
"weibo": {
"client_id": WEIBO_CLIENT_ID,
"meta_content": WEIBO_META_CONTENT,
"user_info_url": 'https://api.weibo.com/2/users/show.json?access_token=',
"email_info_url": 'https://api.weibo.com/2/account/profile/email.json?access_token=',
"access_token_url": 'https://api.weibo.com/oauth2/access_token?client_id=%s&client_secret=%s&grant_type=authorization_code&redirect_uri=%s/weibo&code=' % (
WEIBO_CLIENT_ID, WEIBO_CLIENT_SECRET, HOSTNAME)
},
"live": {
"client_id": LIVE_CLIENT_ID,
"client_secret": LIVE_CLIENT_SECRET,
"redirect_uri": '%s/live' % HOSTNAME,
"access_token_url": 'https://login.live.com/oauth20_token.srf',
"user_info_url": 'https://apis.live.net/v5.0/me?access_token='
},
"alauda": {
"client_id": ALAUDA_CLIENT_ID,
"client_secret": ALAUDA_CLIENT_SECRET,
"redirect_uri": '%s/alauda' % HOSTNAME,
"access_token_url": 'http://console.int.alauda.io/oauth/token'
},
"provider_enabled": ["github", "wechat"],
"session_valid_time_minutes": 60
},
"hackathon-api": {
"endpoint": HACKATHON_API_ENDPOINT
},
"javascript": {
"github": {
"authorize_url": "https://github.com/login/oauth/authorize?client_id=%s&redirect_uri=%s/github&scope=user" % (
GITHUB_CLIENT_ID, HOSTNAME)
},
"weibo": {
"authorize_url": "https://api.weibo.com/oauth2/authorize?client_id=%s&redirect_uri=%s/weibo&scope=all" % (
WEIBO_CLIENT_ID, HOSTNAME)
},
"qq": {
"authorize_url": "https://graph.qq.com/oauth2.0/authorize?client_id=%s&redirect_uri=%s/qq&scope=get_user_info&state=%s&response_type=code" % (
QQ_CLIENT_ID, HOSTNAME, QQ_OAUTH_STATE)
},
"wechat": {
"authorize_url": "https://open.weixin.qq.com/connect/qrconnect?appid=%s&redirect_uri=%s/wechat&response_type=code&scope=snsapi_login&state=%s#wechat_redirect" % (
WECHAT_APP_ID, HOSTNAME, WECHAT_OAUTH_STATE)
},
"live": {
"authorize_url": "https://login.live.com/oauth20_authorize.srf?client_id=%s&scope=wl.basic+,wl.emails&response_type=code&redirect_uri=%s/live" % (
LIVE_CLIENT_ID, HOSTNAME)
},
"alauda": {
"authorize_url": "http://console.int.alauda.io/oauth/authorize?response_type=code&client_id=%s&state=state&redirect_uri=%s/alauda" % (
ALAUDA_CLIENT_ID, HOSTNAME)
},
"hackathon": {
"endpoint": HACKATHON_API_ENDPOINT
},
"apiconfig": {
"proxy": HACKATHON_API_ENDPOINT,
"api": {
"admin": {
"hackathon": {
"": ["get", "post", "put", "delete"],
"checkname": ["get"],
"list": ["get"],
"online": ["post"],
"applyonline": ["post"],
"offline": ["post"],
"tags": ["get", "post", "put", "delete"],
"config": ["get", "post", "put", "delete"],
"administrator": {
"": ["put", "post", "delete"],
"list": ["get"]
},
"template": {
"": ["post", "delete"],
"list": ["get"],
"check": ["get"]
},
"organizer": {
"": ["get", "post", "put", "delete"]
},
"award": {
"": ["get", "post", "put", "delete"],
"list": ["get"]
},
"notice": {
"": ["get", "post", "put", "delete"]
}
},
"registration": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
},
"azure": {
"": ["get", "post", "delete", "put"],
"checksubid": ["post"]
},
"experiment": {
"list": ["get"],
"": ["post", "put"]
},
"team": {
"list": ["get"],
"score": {
"list": ["get"]
},
"award": ["get", "post", "delete"]
},
"user": {
"list": ["get"]
},
"hostserver": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
}
},
"template": {
"": ["get", "post", "delete", "put"],
"file": ["post"],
"list": ["get"],
"check": ["get"]
},
"user": {
"": ["get"],
"login": ["post", "delete"],
"experiment": {
"": ["get", "post", "delete", "put"]
},
"registration": {
"": ["put", "post", "get"],
"checkemail": ["get"],
"list": ["get"]
},
"profile": {
"": ["post", "put"]
},
"picture": {
"": ["put"]
},
"team": {
"member": ["get"]
},
"hackathon": {
"like": ["get", "post", "delete"]
},
"notice": {
"read": ["put"]
},
"show": {
"list": ["get"]
},
"file": {
"": ["post"]
}
},
"hackathon": {
"": ["get"],
"list": ["get"],
"stat": ["get"],
"template": ["get"],
"team": {
"list": ["get"]
},
"registration": {
"list": ["get"]
},
"show": {
"list": ["get"]
},
"grantedawards": ["get"],
"notice": {
"list": ["get"]
}
},
"team": {
"": ["get", "post", "put", "delete"],
"score": ["get", "post", "put"],
"member": {
"": ["post", "put", "delete"],
"list": ["get"]
},
"show": ["get", "post", "delete"],
"template": ["post", "delete"]
},
"talent": {
"list": ["get"]
},
"grantedawards": ["get"]
}
}
}
}
| pt | 0.28494 | 1.128867 | 1 |
processing_provider/Rast_fillRasterwithPatches.py | geodourados/lftools | 1 | 14998 | # -*- coding: utf-8 -*-
"""
fillRasterwithPatches.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2020-09-01'
__copyright__ = '(C) 2020, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal #https://gdal.org/python/
from math import floor, ceil
import numpy as np
from lftools.geocapt.dip import Interpolar
from lftools.geocapt.imgs import Imgs
import os
from qgis.PyQt.QtGui import QIcon
class FillRasterwithPatches(QgsProcessingAlgorithm):
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return FillRasterwithPatches()
def name(self):
return 'fillrasterwithpatches'
def displayName(self):
return self.tr('Fill with patches', 'Remendar vazios de raster')
def group(self):
return self.tr('Raster')
def groupId(self):
return 'raster'
def tags(self):
return self.tr('fill,hole,raster,cloud,remove,drone,patch').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png'))
txt_en = 'Fills Raster null pixels (no data) with data obtained from other smaller raster layers (Patches).'
txt_pt = 'Preenche vazios de Raster (pixels nulos) com dados obtidos de outras camadas raster menores (Remendos).'
figure = 'images/tutorial/raster_fill_holes.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<p align="right">
<b>'''+self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
RasterIN ='RasterIN'
PATCHES = 'PATCHES'
RESAMPLING = 'RESAMPLING'
RasterOUT = 'RasterOUT'
OPEN = 'OPEN'
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterRasterLayer(
self.RasterIN,
self.tr('Input Raster', 'Raster de Entrada'),
[QgsProcessing.TypeRaster]
)
)
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.PATCHES,
self.tr('Patch Layers', 'Rasters de Remendo'),
layerType = QgsProcessing.TypeRaster
)
)
interp = [self.tr('Nearest neighbor', 'Vizinho mais próximo'),
self.tr('Bilinear'),
self.tr('Bicubic', 'Bicúbica')]
self.addParameter(
QgsProcessingParameterEnum(
self.RESAMPLING,
self.tr('Interpolation', 'Interpolação'),
options = interp,
defaultValue= 0
)
)
# OUTPUT
self.addParameter(
QgsProcessingParameterFileDestination(
self.RasterOUT,
self.tr('Patched Image', 'Imagem Remendada'),
fileFilter = 'GeoTIFF (*.tif)'
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.OPEN,
self.tr('Load patched Image', 'Carregar Imagem Remendada'),
defaultValue= True
)
)
def processAlgorithm(self, parameters, context, feedback):
RasterIN = self.parameterAsRasterLayer(
parameters,
self.RasterIN,
context
)
if RasterIN is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.RasterIN))
RasterIN = RasterIN.dataProvider().dataSourceUri()
PatchesLayers = self.parameterAsLayerList(
parameters,
self.PATCHES,
context
)
reamostragem = self.parameterAsEnum(
parameters,
self.RESAMPLING,
context
)
reamostragem = ['nearest','bilinear','bicubic'][reamostragem]
RGB_Output = self.parameterAsFileOutput(
parameters,
self.RasterOUT,
context
)
Carregar = self.parameterAsBool(
parameters,
self.OPEN,
context
)
limiar = 240
# Abrir Raster layer como array
image = gdal.Open(RasterIN)
prj=image.GetProjection()
CRS=osr.SpatialReference(wkt=prj)
geotransform = image.GetGeoTransform()
n_bands = image.RasterCount # Número de bandas
cols = image.RasterXSize # Number of columns
rows = image.RasterYSize # Number of rows
# Origem e resolucao da imagem
ulx, xres, xskew, uly, yskew, yres = geotransform
origem = (ulx, uly)
resol_X = abs(xres)
resol_Y = abs(yres)
if n_bands ==1:
feedback.pushInfo(self.tr('Opening raster band...', 'Abrindo banda do raster...'))
band1 = image.GetRasterBand(1).ReadAsArray()
if n_bands >=3:
feedback.pushInfo(self.tr('Opening Band R...', 'Abrindo Banda R...'))
band1 = image.GetRasterBand(1).ReadAsArray()
feedback.pushInfo(self.tr('Opening Band G...', 'Abrindo Banda G...'))
band2 = image.GetRasterBand(2).ReadAsArray()
feedback.pushInfo(self.tr('Opening Band B...', 'Abrindo Banda B...'))
band3 = image.GetRasterBand(3).ReadAsArray()
# Transparência
if n_bands == 4:
feedback.pushInfo(self.tr('Opening Band Alpha...', 'Abrindo Banda Alfa...'))
band4 = image.GetRasterBand(4).ReadAsArray()
Pixel_Nulo = image.GetRasterBand(1).GetNoDataValue()
if Pixel_Nulo == None:
Pixel_Nulo = 0
image=None # Fechar imagem
# Número de pixels para processamento
TAM = 0
for Remendo in PatchesLayers:
Rem_Path = Remendo.dataProvider().dataSourceUri()
Rem = gdal.Open(Rem_Path)
# Rem_cols = Rem.RasterXSize # Number of columns
Rem_rows = Rem.RasterYSize # Number of rows
TAM += Rem_rows
# Remendos
total = 100.0 / TAM
cont = 0
for Remendo in PatchesLayers:
feedback.pushInfo((self.tr('Processing Layer: {}', 'Processando Camada: {}')).format(Remendo))
Rem_Path = Remendo.dataProvider().dataSourceUri()
Rem = gdal.Open(Rem_Path)
ulx, xres, xskew, uly, yskew, yres = Rem.GetGeoTransform()
Rem_origem = (ulx, uly)
Rem_resol_X = abs(xres)
Rem_resol_Y = abs(yres)
Rem_cols = Rem.RasterXSize # Number of columns
Rem_rows = Rem.RasterYSize # Number of rows
lrx = ulx + (Rem_cols * xres)
lry = uly + (Rem_rows * yres)
bbox = [ulx, lrx, lry, uly]
Rem_nulo = Rem.GetRasterBand(1).GetNoDataValue()
if Rem_nulo == None:
Rem_nulo = 0
Rem_band1 = Rem.GetRasterBand(1).ReadAsArray()
if n_bands >1:
Rem_band2 = Rem.GetRasterBand(2).ReadAsArray()
Rem_band3 = Rem.GetRasterBand(3).ReadAsArray()
# Limites de Varredura
row_ini = int(round((origem[1]-uly)/resol_Y - 0.5))
row_fim = int(round((origem[1]-lry)/resol_Y - 0.5))
col_ini = int(round((ulx - origem[0])/resol_X - 0.5))
col_fim = int(round((lrx - origem[0])/resol_X - 0.5))
# Varrer Raster
if n_bands == 4:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band4[lin][col]
if px_value == 0 or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
elif n_bands == 3:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band1[lin][col]
if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
elif n_bands == 1:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band1[lin][col]
if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
Rem = None # Fechar imagem
# Criar imagem RGB
feedback.pushInfo(self.tr('Saving Raster...', 'Salvando Raster...'))
GDT = gdal_array.NumericTypeCodeToGDALTypeCode(band1.dtype)
if n_bands ==1:
RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 1, GDT)
else:
RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 3, GDT)
RASTER.SetGeoTransform(geotransform) # specify coords
RASTER.SetProjection(CRS.ExportToWkt()) # export coords to file
if n_bands ==1:
feedback.pushInfo(self.tr('Writing rater band...', 'Escrevendo banda do raster...'))
banda = RASTER.GetRasterBand(1)
banda.WriteArray(band1)
banda.SetNoDataValue(Pixel_Nulo)
else:
feedback.pushInfo(self.tr('Writing Band R...', 'Escrevendo Banda R...'))
bandaR = RASTER.GetRasterBand(1)
bandaR.WriteArray(band1)
feedback.pushInfo(self.tr('Writing Band G...', 'Escrevendo Banda G...'))
bandaG = RASTER.GetRasterBand(2)
bandaG.WriteArray(band2)
feedback.pushInfo(self.tr('Writing Band B...', 'Escrevendo Banda B...'))
bandaB = RASTER.GetRasterBand(3)
bandaB.WriteArray(band3)
feedback.pushInfo(self.tr('Saving raster...', 'Salvando raster...'))
RASTER.FlushCache() # Escrever no disco
RASTER = None # Salvar e fechar
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
self.CAMINHO = RGB_Output
self.CARREGAR = Carregar
return {self.RasterOUT: RGB_Output}
# Carregamento de arquivo de saída
CAMINHO = ''
CARREGAR = True
def postProcessAlgorithm(self, context, feedback):
if self.CARREGAR:
rlayer = QgsRasterLayer(self.CAMINHO, self.tr('Patched Image', 'Imagem Remendada'))
QgsProject.instance().addMapLayer(rlayer)
return {}
| # -*- coding: utf-8 -*-
"""
fillRasterwithPatches.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2020-09-01'
__copyright__ = '(C) 2020, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal #https://gdal.org/python/
from math import floor, ceil
import numpy as np
from lftools.geocapt.dip import Interpolar
from lftools.geocapt.imgs import Imgs
import os
from qgis.PyQt.QtGui import QIcon
class FillRasterwithPatches(QgsProcessingAlgorithm):
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return FillRasterwithPatches()
def name(self):
return 'fillrasterwithpatches'
def displayName(self):
return self.tr('Fill with patches', 'Remendar vazios de raster')
def group(self):
return self.tr('Raster')
def groupId(self):
return 'raster'
def tags(self):
return self.tr('fill,hole,raster,cloud,remove,drone,patch').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png'))
txt_en = 'Fills Raster null pixels (no data) with data obtained from other smaller raster layers (Patches).'
txt_pt = 'Preenche vazios de Raster (pixels nulos) com dados obtidos de outras camadas raster menores (Remendos).'
figure = 'images/tutorial/raster_fill_holes.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<p align="right">
<b>'''+self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
RasterIN ='RasterIN'
PATCHES = 'PATCHES'
RESAMPLING = 'RESAMPLING'
RasterOUT = 'RasterOUT'
OPEN = 'OPEN'
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterRasterLayer(
self.RasterIN,
self.tr('Input Raster', 'Raster de Entrada'),
[QgsProcessing.TypeRaster]
)
)
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.PATCHES,
self.tr('Patch Layers', 'Rasters de Remendo'),
layerType = QgsProcessing.TypeRaster
)
)
interp = [self.tr('Nearest neighbor', 'Vizinho mais próximo'),
self.tr('Bilinear'),
self.tr('Bicubic', 'Bicúbica')]
self.addParameter(
QgsProcessingParameterEnum(
self.RESAMPLING,
self.tr('Interpolation', 'Interpolação'),
options = interp,
defaultValue= 0
)
)
# OUTPUT
self.addParameter(
QgsProcessingParameterFileDestination(
self.RasterOUT,
self.tr('Patched Image', 'Imagem Remendada'),
fileFilter = 'GeoTIFF (*.tif)'
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.OPEN,
self.tr('Load patched Image', 'Carregar Imagem Remendada'),
defaultValue= True
)
)
def processAlgorithm(self, parameters, context, feedback):
RasterIN = self.parameterAsRasterLayer(
parameters,
self.RasterIN,
context
)
if RasterIN is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.RasterIN))
RasterIN = RasterIN.dataProvider().dataSourceUri()
PatchesLayers = self.parameterAsLayerList(
parameters,
self.PATCHES,
context
)
reamostragem = self.parameterAsEnum(
parameters,
self.RESAMPLING,
context
)
reamostragem = ['nearest','bilinear','bicubic'][reamostragem]
RGB_Output = self.parameterAsFileOutput(
parameters,
self.RasterOUT,
context
)
Carregar = self.parameterAsBool(
parameters,
self.OPEN,
context
)
limiar = 240
# Abrir Raster layer como array
image = gdal.Open(RasterIN)
prj=image.GetProjection()
CRS=osr.SpatialReference(wkt=prj)
geotransform = image.GetGeoTransform()
n_bands = image.RasterCount # Número de bandas
cols = image.RasterXSize # Number of columns
rows = image.RasterYSize # Number of rows
# Origem e resolucao da imagem
ulx, xres, xskew, uly, yskew, yres = geotransform
origem = (ulx, uly)
resol_X = abs(xres)
resol_Y = abs(yres)
if n_bands ==1:
feedback.pushInfo(self.tr('Opening raster band...', 'Abrindo banda do raster...'))
band1 = image.GetRasterBand(1).ReadAsArray()
if n_bands >=3:
feedback.pushInfo(self.tr('Opening Band R...', 'Abrindo Banda R...'))
band1 = image.GetRasterBand(1).ReadAsArray()
feedback.pushInfo(self.tr('Opening Band G...', 'Abrindo Banda G...'))
band2 = image.GetRasterBand(2).ReadAsArray()
feedback.pushInfo(self.tr('Opening Band B...', 'Abrindo Banda B...'))
band3 = image.GetRasterBand(3).ReadAsArray()
# Transparência
if n_bands == 4:
feedback.pushInfo(self.tr('Opening Band Alpha...', 'Abrindo Banda Alfa...'))
band4 = image.GetRasterBand(4).ReadAsArray()
Pixel_Nulo = image.GetRasterBand(1).GetNoDataValue()
if Pixel_Nulo == None:
Pixel_Nulo = 0
image=None # Fechar imagem
# Número de pixels para processamento
TAM = 0
for Remendo in PatchesLayers:
Rem_Path = Remendo.dataProvider().dataSourceUri()
Rem = gdal.Open(Rem_Path)
# Rem_cols = Rem.RasterXSize # Number of columns
Rem_rows = Rem.RasterYSize # Number of rows
TAM += Rem_rows
# Remendos
total = 100.0 / TAM
cont = 0
for Remendo in PatchesLayers:
feedback.pushInfo((self.tr('Processing Layer: {}', 'Processando Camada: {}')).format(Remendo))
Rem_Path = Remendo.dataProvider().dataSourceUri()
Rem = gdal.Open(Rem_Path)
ulx, xres, xskew, uly, yskew, yres = Rem.GetGeoTransform()
Rem_origem = (ulx, uly)
Rem_resol_X = abs(xres)
Rem_resol_Y = abs(yres)
Rem_cols = Rem.RasterXSize # Number of columns
Rem_rows = Rem.RasterYSize # Number of rows
lrx = ulx + (Rem_cols * xres)
lry = uly + (Rem_rows * yres)
bbox = [ulx, lrx, lry, uly]
Rem_nulo = Rem.GetRasterBand(1).GetNoDataValue()
if Rem_nulo == None:
Rem_nulo = 0
Rem_band1 = Rem.GetRasterBand(1).ReadAsArray()
if n_bands >1:
Rem_band2 = Rem.GetRasterBand(2).ReadAsArray()
Rem_band3 = Rem.GetRasterBand(3).ReadAsArray()
# Limites de Varredura
row_ini = int(round((origem[1]-uly)/resol_Y - 0.5))
row_fim = int(round((origem[1]-lry)/resol_Y - 0.5))
col_ini = int(round((ulx - origem[0])/resol_X - 0.5))
col_fim = int(round((lrx - origem[0])/resol_X - 0.5))
# Varrer Raster
if n_bands == 4:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band4[lin][col]
if px_value == 0 or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
elif n_bands == 3:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band1[lin][col]
if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
elif n_bands == 1:
for lin in range(row_ini, row_fim):
for col in range(col_ini, col_fim):
px_value = band1[lin][col]
if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo)
cont += 1
feedback.setProgress(int(cont * total))
if feedback.isCanceled():
break
Rem = None # Fechar imagem
# Criar imagem RGB
feedback.pushInfo(self.tr('Saving Raster...', 'Salvando Raster...'))
GDT = gdal_array.NumericTypeCodeToGDALTypeCode(band1.dtype)
if n_bands ==1:
RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 1, GDT)
else:
RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 3, GDT)
RASTER.SetGeoTransform(geotransform) # specify coords
RASTER.SetProjection(CRS.ExportToWkt()) # export coords to file
if n_bands ==1:
feedback.pushInfo(self.tr('Writing rater band...', 'Escrevendo banda do raster...'))
banda = RASTER.GetRasterBand(1)
banda.WriteArray(band1)
banda.SetNoDataValue(Pixel_Nulo)
else:
feedback.pushInfo(self.tr('Writing Band R...', 'Escrevendo Banda R...'))
bandaR = RASTER.GetRasterBand(1)
bandaR.WriteArray(band1)
feedback.pushInfo(self.tr('Writing Band G...', 'Escrevendo Banda G...'))
bandaG = RASTER.GetRasterBand(2)
bandaG.WriteArray(band2)
feedback.pushInfo(self.tr('Writing Band B...', 'Escrevendo Banda B...'))
bandaB = RASTER.GetRasterBand(3)
bandaB.WriteArray(band3)
feedback.pushInfo(self.tr('Saving raster...', 'Salvando raster...'))
RASTER.FlushCache() # Escrever no disco
RASTER = None # Salvar e fechar
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
self.CAMINHO = RGB_Output
self.CARREGAR = Carregar
return {self.RasterOUT: RGB_Output}
# Carregamento de arquivo de saída
CAMINHO = ''
CARREGAR = True
def postProcessAlgorithm(self, context, feedback):
if self.CARREGAR:
rlayer = QgsRasterLayer(self.CAMINHO, self.tr('Patched Image', 'Imagem Remendada'))
QgsProject.instance().addMapLayer(rlayer)
return {}
| pt | 0.118485 | 1.454131 | 1 |
quantlab/COCO/utils/inference.py | lukasc-ch/QuantLab | 6 | 14999 | <filename>quantlab/COCO/utils/inference.py<gh_stars>1-10
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
def view_instance(img, gt_label, pr_label=None):
img = img.cpu()
# gt_label = gt_label.cpu()
# pr_label = pr_label.cpu()
# c, h, w = img.shape
# with open('/home/spmatteo/MSDocuments/QuantLab/COCO/coco.names', 'r') as f:
# classes = [line.strip() for line in f.read().splitlines()]
# cmap = plt.get_cmap('tab20b')
# colors = [cmap(i) for i in np.linspace(0, 1, len(classes)-1)]
# fig, ax = plt.subplots(1, figsize=(12, 9))
# ax.imshow(img.permute(1, 2, 0)) # h, w, c
# # browse annotations and draw bounding boxes
# bboxes = []
# if label is not None:
# for i, annotation in enumerate(label):
# cls = annotation[6]
# if i < 6:
# print(annotation, classes[int(cls)])
# color = colors[int(cls)]
# bbox = patches.Rectangle((annotation[0], annotation[1]), annotation[2]-annotation[0], annotation[3]-annotation[1],
# linewidth=2, edgecolor=color, facecolor='none', label=classes[int(cls)])
# ax.add_patch(bbox)
# bboxes.append((bbox, classes[int(cls)], color))
# for bbox in bboxes:
# ax.annotate(bbox[1], bbox[0].get_xy(), weight='bold', fontsize=10, color=bbox[2])
# plt.axis('off')
# plt.show()
| <filename>quantlab/COCO/utils/inference.py<gh_stars>1-10
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
def view_instance(img, gt_label, pr_label=None):
img = img.cpu()
# gt_label = gt_label.cpu()
# pr_label = pr_label.cpu()
# c, h, w = img.shape
# with open('/home/spmatteo/MSDocuments/QuantLab/COCO/coco.names', 'r') as f:
# classes = [line.strip() for line in f.read().splitlines()]
# cmap = plt.get_cmap('tab20b')
# colors = [cmap(i) for i in np.linspace(0, 1, len(classes)-1)]
# fig, ax = plt.subplots(1, figsize=(12, 9))
# ax.imshow(img.permute(1, 2, 0)) # h, w, c
# # browse annotations and draw bounding boxes
# bboxes = []
# if label is not None:
# for i, annotation in enumerate(label):
# cls = annotation[6]
# if i < 6:
# print(annotation, classes[int(cls)])
# color = colors[int(cls)]
# bbox = patches.Rectangle((annotation[0], annotation[1]), annotation[2]-annotation[0], annotation[3]-annotation[1],
# linewidth=2, edgecolor=color, facecolor='none', label=classes[int(cls)])
# ax.add_patch(bbox)
# bboxes.append((bbox, classes[int(cls)], color))
# for bbox in bboxes:
# ax.annotate(bbox[1], bbox[0].get_xy(), weight='bold', fontsize=10, color=bbox[2])
# plt.axis('off')
# plt.show()
| en | 0.099438 | 1.900678 | 2 |